From 40f46bf2d6e0d26da19cd7d7bb260a7198abbcf7 Mon Sep 17 00:00:00 2001
From: Christopher Goes
Date: Wed, 10 Feb 2021 16:22:12 +0100
Subject: [PATCH 001/393] Initial commit
---
LICENSE | 21 +++++++++++++++++++++
README.md | 2 ++
2 files changed, 23 insertions(+)
create mode 100644 LICENSE
create mode 100644 README.md
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..46dea96c
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 COSMOS
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..15644729
--- /dev/null
+++ b/README.md
@@ -0,0 +1,2 @@
+# ibc-go
+Interblockchain communication protocol (IBC) implementation in Golang.
From 09646a012aba42575ed8ea66c91811f04aebcae6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 22 Feb 2021 15:34:34 +0100
Subject: [PATCH 002/393] Initial Setup + Protobuf files (#2)
* initial setup
Add CODEOWNERS, go.mod file, copy Makefile from SDK, add proto files for client with updated import/package name, modified API route to dropt v1beta1 to v1, get third_party vendor files
* generate proto files
* add more proto files
* add connection and commitment proto files
* finish adding proto files
* make proto-all, remove unnecessary script, add buf file
* bump sdk to master, copy code directly
* make proto-all
---
.github/CODEOWNERS | 3 +
Makefile | 475 +
applications/transfer/client/cli/cli.go | 42 +
applications/transfer/client/cli/query.go | 108 +
applications/transfer/client/cli/tx.go | 117 +
applications/transfer/handler.go | 23 +
applications/transfer/handler_test.go | 123 +
applications/transfer/keeper/MBT_README.md | 51 +
applications/transfer/keeper/encoding.go | 35 +
applications/transfer/keeper/genesis.go | 45 +
applications/transfer/keeper/genesis_test.go | 39 +
applications/transfer/keeper/grpc_query.go | 83 +
.../transfer/keeper/grpc_query_test.go | 142 +
applications/transfer/keeper/keeper.go | 169 +
applications/transfer/keeper/keeper_test.go | 51 +
.../transfer/keeper/mbt_relay_test.go | 378 +
.../model_based_tests/Test5Packets.json | 492 +
.../keeper/model_based_tests/Test5Packets.tla | 1056 +++
.../Test5PacketsAllDifferentPass.json | 612 ++
.../Test5PacketsAllDifferentPass.tla | 1188 +++
.../TestOnRecvAcknowledgementErrorFail.json | 58 +
.../TestOnRecvAcknowledgementErrorFail.tla | 159 +
.../TestOnRecvAcknowledgementErrorPass.json | 159 +
.../TestOnRecvAcknowledgementErrorPass.tla | 310 +
.../TestOnRecvAcknowledgementResultFail.json | 58 +
.../TestOnRecvAcknowledgementResultFail.tla | 159 +
.../TestOnRecvAcknowledgementResultPass.json | 58 +
.../TestOnRecvAcknowledgementResultPass.tla | 159 +
.../TestOnRecvPacketFail.json | 58 +
.../TestOnRecvPacketFail.tla | 159 +
.../TestOnRecvPacketPass.json | 73 +
.../TestOnRecvPacketPass.tla | 174 +
.../model_based_tests/TestOnTimeoutFail.json | 58 +
.../model_based_tests/TestOnTimeoutFail.tla | 159 +
.../model_based_tests/TestOnTimeoutPass.json | 159 +
.../model_based_tests/TestOnTimeoutPass.tla | 310 +
.../TestSendTransferFail.json | 58 +
.../TestSendTransferFail.tla | 159 +
.../TestSendTransferPass.json | 174 +
.../TestSendTransferPass.tla | 323 +
.../model_based_tests/TestUnescrowTokens.json | 305 +
.../model_based_tests/TestUnescrowTokens.tla | 563 ++
applications/transfer/keeper/msg_server.go | 43 +
applications/transfer/keeper/params.go | 30 +
applications/transfer/keeper/params_test.go | 15 +
applications/transfer/keeper/relay.go | 406 +
.../transfer/keeper/relay_model/account.tla | 36 +
.../keeper/relay_model/account_record.tla | 46 +
.../relay_model/apalache-to-relay-test.json | 100 +
.../relay_model/apalache-to-relay-test2.json | 104 +
.../transfer/keeper/relay_model/denom.tla | 50 +
.../keeper/relay_model/denom_record.tla | 53 +
.../keeper/relay_model/denom_record2.tla | 114 +
.../keeper/relay_model/denom_sequence.tla | 47 +
.../keeper/relay_model/identifiers.tla | 10 +
.../transfer/keeper/relay_model/relay.tla | 278 +
.../keeper/relay_model/relay_tests.tla | 96 +
applications/transfer/keeper/relay_test.go | 392 +
applications/transfer/module.go | 438 +
applications/transfer/module_test.go | 246 +
applications/transfer/simulation/decoder.go | 33 +
.../transfer/simulation/decoder_test.go | 59 +
applications/transfer/simulation/genesis.go | 54 +
.../transfer/simulation/genesis_test.go | 74 +
applications/transfer/simulation/params.go | 32 +
.../transfer/simulation/params_test.go | 36 +
applications/transfer/spec/01_concepts.md | 117 +
applications/transfer/spec/02_state.md | 10 +
.../transfer/spec/03_state_transitions.md | 36 +
applications/transfer/spec/04_messages.md | 40 +
applications/transfer/spec/05_events.md | 44 +
applications/transfer/spec/06_metrics.md | 14 +
applications/transfer/spec/07_params.md | 30 +
applications/transfer/spec/README.md | 24 +
applications/transfer/types/codec.go | 41 +
applications/transfer/types/coin.go | 48 +
applications/transfer/types/errors.go | 17 +
applications/transfer/types/events.go | 21 +
.../transfer/types/expected_keepers.go | 48 +
applications/transfer/types/genesis.go | 35 +
applications/transfer/types/genesis.pb.go | 443 +
applications/transfer/types/genesis_test.go | 47 +
applications/transfer/types/keys.go | 55 +
applications/transfer/types/keys_test.go | 24 +
applications/transfer/types/msgs.go | 85 +
applications/transfer/types/msgs_test.go | 103 +
applications/transfer/types/packet.go | 56 +
applications/transfer/types/packet_test.go | 36 +
applications/transfer/types/params.go | 65 +
applications/transfer/types/params_test.go | 12 +
applications/transfer/types/query.pb.go | 1418 +++
applications/transfer/types/query.pb.gw.go | 326 +
applications/transfer/types/trace.go | 203 +
applications/transfer/types/trace_test.go | 150 +
applications/transfer/types/transfer.pb.go | 909 ++
applications/transfer/types/tx.pb.go | 804 ++
apps/transfer/types/genesis.pb.go | 443 +
apps/transfer/types/query.pb.go | 1418 +++
apps/transfer/types/query.pb.gw.go | 326 +
apps/transfer/types/transfer.pb.go | 908 ++
apps/transfer/types/tx.pb.go | 801 ++
buf.yaml | 34 +
core/02-client/abci.go | 20 +
core/02-client/abci_test.go | 60 +
core/02-client/client/cli/cli.go | 51 +
core/02-client/client/cli/query.go | 260 +
core/02-client/client/cli/tx.go | 328 +
core/02-client/client/proposal_handler.go | 8 +
core/02-client/client/utils/utils.go | 199 +
core/02-client/doc.go | 10 +
core/02-client/genesis.go | 69 +
core/02-client/keeper/client.go | 192 +
core/02-client/keeper/client_test.go | 603 ++
core/02-client/keeper/encoding.go | 42 +
core/02-client/keeper/grpc_query.go | 199 +
core/02-client/keeper/grpc_query_test.go | 381 +
core/02-client/keeper/keeper.go | 367 +
core/02-client/keeper/keeper_test.go | 389 +
core/02-client/keeper/params.go | 23 +
core/02-client/keeper/params_test.go | 17 +
core/02-client/keeper/proposal.go | 72 +
core/02-client/keeper/proposal_test.go | 130 +
core/02-client/module.go | 29 +
core/02-client/proposal_handler.go | 22 +
core/02-client/proposal_handler_test.go | 84 +
core/02-client/simulation/decoder.go | 38 +
core/02-client/simulation/decoder_test.go | 70 +
core/02-client/simulation/genesis.go | 13 +
core/02-client/types/client.go | 111 +
core/02-client/types/client.pb.go | 1598 ++++
core/02-client/types/client_test.go | 87 +
core/02-client/types/codec.go | 188 +
core/02-client/types/codec_test.go | 210 +
core/02-client/types/encoding.go | 86 +
core/02-client/types/errors.go | 35 +
core/02-client/types/events.go | 26 +
core/02-client/types/expected_keepers.go | 14 +
core/02-client/types/genesis.go | 250 +
core/02-client/types/genesis.pb.go | 1060 +++
core/02-client/types/genesis_test.go | 549 ++
core/02-client/types/height.go | 188 +
core/02-client/types/height_test.go | 155 +
core/02-client/types/keys.go | 65 +
core/02-client/types/keys_test.go | 54 +
core/02-client/types/msgs.go | 343 +
core/02-client/types/msgs_test.go | 619 ++
core/02-client/types/params.go | 71 +
core/02-client/types/params_test.go | 30 +
core/02-client/types/proposal.go | 64 +
core/02-client/types/proposal_test.go | 86 +
core/02-client/types/query.go | 65 +
core/02-client/types/query.pb.go | 2685 ++++++
core/02-client/types/query.pb.gw.go | 602 ++
core/02-client/types/tx.pb.go | 2074 +++++
core/03-connection/client/cli/cli.go | 46 +
core/03-connection/client/cli/query.go | 118 +
core/03-connection/client/cli/tx.go | 348 +
core/03-connection/client/utils/utils.go | 219 +
core/03-connection/genesis.go | 28 +
core/03-connection/keeper/grpc_query.go | 179 +
core/03-connection/keeper/grpc_query_test.go | 420 +
core/03-connection/keeper/handshake.go | 342 +
core/03-connection/keeper/handshake_test.go | 701 ++
core/03-connection/keeper/keeper.go | 198 +
core/03-connection/keeper/keeper_test.go | 133 +
core/03-connection/keeper/verify.go | 225 +
core/03-connection/keeper/verify_test.go | 514 ++
core/03-connection/module.go | 29 +
core/03-connection/simulation/decoder.go | 32 +
core/03-connection/simulation/decoder_test.go | 69 +
core/03-connection/simulation/genesis.go | 13 +
core/03-connection/types/codec.go | 47 +
core/03-connection/types/connection.go | 127 +
core/03-connection/types/connection.pb.go | 1801 ++++
core/03-connection/types/connection_test.go | 121 +
core/03-connection/types/errors.go | 19 +
core/03-connection/types/events.go | 25 +
core/03-connection/types/expected_keepers.go | 16 +
core/03-connection/types/genesis.go | 76 +
core/03-connection/types/genesis.pb.go | 438 +
core/03-connection/types/genesis_test.go | 114 +
core/03-connection/types/keys.go | 61 +
core/03-connection/types/keys_test.go | 49 +
core/03-connection/types/msgs.go | 354 +
core/03-connection/types/msgs_test.go | 243 +
core/03-connection/types/query.go | 70 +
core/03-connection/types/query.pb.go | 2892 ++++++
core/03-connection/types/query.pb.gw.go | 602 ++
core/03-connection/types/tx.pb.go | 2782 ++++++
core/03-connection/types/version.go | 220 +
core/03-connection/types/version_test.go | 167 +
core/04-channel/client/cli/cli.go | 58 +
core/04-channel/client/cli/query.go | 457 +
core/04-channel/client/cli/tx.go | 288 +
core/04-channel/client/utils/utils.go | 301 +
core/04-channel/genesis.go | 48 +
core/04-channel/handler.go | 186 +
core/04-channel/keeper/grpc_query.go | 486 +
core/04-channel/keeper/grpc_query_test.go | 1376 +++
core/04-channel/keeper/handshake.go | 496 +
core/04-channel/keeper/handshake_test.go | 773 ++
core/04-channel/keeper/keeper.go | 432 +
core/04-channel/keeper/keeper_test.go | 329 +
core/04-channel/keeper/packet.go | 528 ++
core/04-channel/keeper/packet_test.go | 665 ++
core/04-channel/keeper/timeout.go | 276 +
core/04-channel/keeper/timeout_test.go | 351 +
core/04-channel/module.go | 29 +
core/04-channel/simulation/decoder.go | 48 +
core/04-channel/simulation/decoder_test.go | 89 +
core/04-channel/simulation/genesis.go | 13 +
core/04-channel/types/channel.go | 172 +
core/04-channel/types/channel.pb.go | 2270 +++++
core/04-channel/types/channel_test.go | 119 +
core/04-channel/types/codec.go | 60 +
core/04-channel/types/errors.go | 28 +
core/04-channel/types/events.go | 46 +
core/04-channel/types/expected_keepers.go | 76 +
core/04-channel/types/genesis.go | 156 +
core/04-channel/types/genesis.pb.go | 1017 +++
core/04-channel/types/genesis_test.go | 225 +
core/04-channel/types/keys.go | 61 +
core/04-channel/types/keys_test.go | 47 +
core/04-channel/types/msgs.go | 652 ++
core/04-channel/types/msgs_test.go | 446 +
core/04-channel/types/packet.go | 112 +
core/04-channel/types/packet_test.go | 53 +
core/04-channel/types/query.go | 94 +
core/04-channel/types/query.pb.go | 7993 +++++++++++++++++
core/04-channel/types/query.pb.gw.go | 1792 ++++
core/04-channel/types/tx.pb.go | 5264 +++++++++++
core/05-port/keeper/keeper.go | 80 +
core/05-port/keeper/keeper_test.go | 70 +
core/05-port/types/errors.go | 13 +
core/05-port/types/keys.go | 15 +
core/05-port/types/module.go | 78 +
core/05-port/types/router.go | 65 +
core/05-port/types/utils.go | 17 +
core/23-commitment/types/bench_test.go | 15 +
core/23-commitment/types/codec.go | 43 +
core/23-commitment/types/commitment.pb.go | 863 ++
core/23-commitment/types/commitment_test.go | 37 +
core/23-commitment/types/errors.go | 15 +
core/23-commitment/types/merkle.go | 312 +
core/23-commitment/types/merkle_test.go | 172 +
core/23-commitment/types/utils.go | 28 +
core/23-commitment/types/utils_test.go | 98 +
core/24-host/errors.go | 15 +
core/24-host/keys.go | 235 +
core/24-host/parse.go | 79 +
core/24-host/parse_test.go | 48 +
core/24-host/validate.go | 114 +
core/24-host/validate_test.go | 119 +
core/client/cli/cli.go | 50 +
core/client/query.go | 67 +
core/exported/channel.go | 32 +
core/exported/client.go | 223 +
core/exported/commitment.go | 45 +
core/exported/connection.go | 26 +
core/genesis.go | 27 +
core/genesis_test.go | 370 +
core/handler.go | 98 +
core/keeper/grpc_query.go | 124 +
core/keeper/keeper.go | 65 +
core/keeper/msg_server.go | 616 ++
core/keeper/msg_server_test.go | 714 ++
core/module.go | 200 +
core/simulation/decoder.go | 32 +
core/simulation/decoder_test.go | 80 +
core/simulation/genesis.go | 63 +
core/simulation/genesis_test.go | 49 +
core/spec/01_concepts.md | 405 +
core/spec/02_state.md | 28 +
core/spec/03_state_transitions.md | 106 +
core/spec/04_messages.md | 497 +
core/spec/05_callbacks.md | 80 +
core/spec/06_events.md | 241 +
core/spec/07_params.md | 21 +
core/spec/README.md | 26 +
core/types/codec.go | 23 +
core/types/genesis.go | 38 +
core/types/genesis.pb.go | 440 +
core/types/query.go | 26 +
docs/README.md | 114 +
docs/ibc/proto-docs.md | 7521 ++++++++++++++++
docs/protodoc-markdown.tmpl | 0
go.mod | 23 +
go.sum | 1035 +++
light-clients/06-solomachine/doc.go | 7 +
light-clients/06-solomachine/module.go | 10 +
.../06-solomachine/spec/01_concepts.md | 160 +
light-clients/06-solomachine/spec/02_state.md | 12 +
.../spec/03_state_transitions.md | 39 +
.../06-solomachine/spec/04_messages.md | 8 +
light-clients/06-solomachine/spec/README.md | 26 +
.../06-solomachine/types/client_state.go | 491 +
.../06-solomachine/types/client_state_test.go | 912 ++
light-clients/06-solomachine/types/codec.go | 130 +
.../06-solomachine/types/codec_test.go | 190 +
.../06-solomachine/types/consensus_state.go | 60 +
.../types/consensus_state_test.go | 75 +
light-clients/06-solomachine/types/errors.go | 18 +
light-clients/06-solomachine/types/header.go | 67 +
.../06-solomachine/types/header_test.go | 98 +
.../06-solomachine/types/misbehaviour.go | 83 +
.../types/misbehaviour_handle.go | 92 +
.../types/misbehaviour_handle_test.go | 275 +
.../06-solomachine/types/misbehaviour_test.go | 132 +
light-clients/06-solomachine/types/proof.go | 475 +
.../06-solomachine/types/proof_test.go | 102 +
.../06-solomachine/types/proposal_handle.go | 64 +
.../types/proposal_handle_test.go | 88 +
.../06-solomachine/types/solomachine.go | 43 +
.../06-solomachine/types/solomachine.pb.go | 4121 +++++++++
.../06-solomachine/types/solomachine_test.go | 113 +
light-clients/06-solomachine/types/update.go | 89 +
.../06-solomachine/types/update_test.go | 181 +
light-clients/07-tendermint/doc.go | 5 +
light-clients/07-tendermint/module.go | 10 +
.../07-tendermint/types/client_state.go | 532 ++
.../07-tendermint/types/client_state_test.go | 779 ++
light-clients/07-tendermint/types/codec.go | 27 +
.../07-tendermint/types/consensus_state.go | 55 +
.../types/consensus_state_test.go | 69 +
light-clients/07-tendermint/types/errors.go | 25 +
light-clients/07-tendermint/types/fraction.go | 25 +
light-clients/07-tendermint/types/genesis.go | 21 +
.../07-tendermint/types/genesis_test.go | 38 +
light-clients/07-tendermint/types/header.go | 83 +
.../07-tendermint/types/header_test.go | 82 +
.../07-tendermint/types/misbehaviour.go | 141 +
.../types/misbehaviour_handle.go | 119 +
.../types/misbehaviour_handle_test.go | 372 +
.../07-tendermint/types/misbehaviour_test.go | 244 +
.../07-tendermint/types/proposal_handle.go | 134 +
.../types/proposal_handle_test.go | 387 +
light-clients/07-tendermint/types/store.go | 96 +
.../07-tendermint/types/store_test.go | 113 +
.../07-tendermint/types/tendermint.pb.go | 1917 ++++
.../07-tendermint/types/tendermint_test.go | 95 +
light-clients/07-tendermint/types/update.go | 186 +
.../07-tendermint/types/update_test.go | 281 +
light-clients/07-tendermint/types/upgrade.go | 156 +
.../07-tendermint/types/upgrade_test.go | 512 ++
light-clients/09-localhost/doc.go | 5 +
light-clients/09-localhost/module.go | 10 +
.../09-localhost/types/client_state.go | 346 +
.../09-localhost/types/client_state_test.go | 520 ++
light-clients/09-localhost/types/codec.go | 15 +
light-clients/09-localhost/types/errors.go | 10 +
light-clients/09-localhost/types/keys.go | 6 +
.../09-localhost/types/localhost.pb.go | 369 +
.../09-localhost/types/localhost_test.go | 43 +
proto/ibcgo/apps/transfer/v1/genesis.proto | 19 +
proto/ibcgo/apps/transfer/v1/query.proto | 68 +
proto/ibcgo/apps/transfer/v1/transfer.proto | 45 +
proto/ibcgo/apps/transfer/v1/tx.proto | 48 +
proto/ibcgo/core/channel/v1/channel.proto | 157 +
proto/ibcgo/core/channel/v1/genesis.proto | 42 +
proto/ibcgo/core/channel/v1/query.proto | 389 +
proto/ibcgo/core/channel/v1/tx.proto | 239 +
proto/ibcgo/core/client/v1/client.proto | 96 +
proto/ibcgo/core/client/v1/genesis.proto | 56 +
proto/ibcgo/core/client/v1/query.proto | 143 +
proto/ibcgo/core/client/v1/tx.proto | 107 +
.../ibcgo/core/commitment/v1/commitment.proto | 39 +
.../ibcgo/core/connection/v1/connection.proto | 108 +
proto/ibcgo/core/connection/v1/genesis.proto | 21 +
proto/ibcgo/core/connection/v1/query.proto | 145 +
proto/ibcgo/core/connection/v1/tx.proto | 140 +
proto/ibcgo/core/types/v1/genesis.proto | 29 +
.../lightclients/localhost/v1/localhost.proto | 18 +
.../solomachine/v1/solomachine.proto | 206 +
.../tendermint/v1/tendermint.proto | 146 +
scripts/README.md | 3 +
scripts/linkify_changelog.py | 15 +
scripts/protoc-swagger-gen.sh | 27 +
scripts/protocgen.sh | 40 +
testing/chain.go | 910 ++
testing/chain_test.go | 47 +
testing/coordinator.go | 700 ++
testing/mock/README.md | 6 +
testing/mock/doc.go | 9 +
testing/mock/mock.go | 188 +
testing/mock/privval.go | 50 +
testing/mock/privval_test.go | 44 +
testing/solomachine.go | 321 +
testing/types.go | 44 +
third_party/proto/confio/proofs.proto | 234 +
.../base/query/v1beta1/pagination.proto | 50 +
.../proto/cosmos/base/v1beta1/coin.proto | 40 +
third_party/proto/gogoproto/gogo.proto | 145 +
.../proto/google/api/annotations.proto | 31 +
third_party/proto/google/api/http.proto | 318 +
third_party/proto/google/protobuf/any.proto | 161 +
.../proto/tendermint/crypto/keys.proto | 17 +
.../proto/tendermint/crypto/proof.proto | 41 +
.../proto/tendermint/libs/bits/types.proto | 9 +
.../proto/tendermint/types/types.proto | 157 +
.../proto/tendermint/types/validator.proto | 25 +
.../proto/tendermint/version/types.proto | 24 +
401 files changed, 115750 insertions(+)
create mode 100644 .github/CODEOWNERS
create mode 100644 Makefile
create mode 100644 applications/transfer/client/cli/cli.go
create mode 100644 applications/transfer/client/cli/query.go
create mode 100644 applications/transfer/client/cli/tx.go
create mode 100644 applications/transfer/handler.go
create mode 100644 applications/transfer/handler_test.go
create mode 100644 applications/transfer/keeper/MBT_README.md
create mode 100644 applications/transfer/keeper/encoding.go
create mode 100644 applications/transfer/keeper/genesis.go
create mode 100644 applications/transfer/keeper/genesis_test.go
create mode 100644 applications/transfer/keeper/grpc_query.go
create mode 100644 applications/transfer/keeper/grpc_query_test.go
create mode 100644 applications/transfer/keeper/keeper.go
create mode 100644 applications/transfer/keeper/keeper_test.go
create mode 100644 applications/transfer/keeper/mbt_relay_test.go
create mode 100644 applications/transfer/keeper/model_based_tests/Test5Packets.json
create mode 100644 applications/transfer/keeper/model_based_tests/Test5Packets.tla
create mode 100644 applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
create mode 100644 applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferFail.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferPass.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla
create mode 100644 applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json
create mode 100644 applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
create mode 100644 applications/transfer/keeper/msg_server.go
create mode 100644 applications/transfer/keeper/params.go
create mode 100644 applications/transfer/keeper/params_test.go
create mode 100644 applications/transfer/keeper/relay.go
create mode 100644 applications/transfer/keeper/relay_model/account.tla
create mode 100644 applications/transfer/keeper/relay_model/account_record.tla
create mode 100644 applications/transfer/keeper/relay_model/apalache-to-relay-test.json
create mode 100644 applications/transfer/keeper/relay_model/apalache-to-relay-test2.json
create mode 100644 applications/transfer/keeper/relay_model/denom.tla
create mode 100644 applications/transfer/keeper/relay_model/denom_record.tla
create mode 100644 applications/transfer/keeper/relay_model/denom_record2.tla
create mode 100644 applications/transfer/keeper/relay_model/denom_sequence.tla
create mode 100644 applications/transfer/keeper/relay_model/identifiers.tla
create mode 100644 applications/transfer/keeper/relay_model/relay.tla
create mode 100644 applications/transfer/keeper/relay_model/relay_tests.tla
create mode 100644 applications/transfer/keeper/relay_test.go
create mode 100644 applications/transfer/module.go
create mode 100644 applications/transfer/module_test.go
create mode 100644 applications/transfer/simulation/decoder.go
create mode 100644 applications/transfer/simulation/decoder_test.go
create mode 100644 applications/transfer/simulation/genesis.go
create mode 100644 applications/transfer/simulation/genesis_test.go
create mode 100644 applications/transfer/simulation/params.go
create mode 100644 applications/transfer/simulation/params_test.go
create mode 100644 applications/transfer/spec/01_concepts.md
create mode 100644 applications/transfer/spec/02_state.md
create mode 100644 applications/transfer/spec/03_state_transitions.md
create mode 100644 applications/transfer/spec/04_messages.md
create mode 100644 applications/transfer/spec/05_events.md
create mode 100644 applications/transfer/spec/06_metrics.md
create mode 100644 applications/transfer/spec/07_params.md
create mode 100644 applications/transfer/spec/README.md
create mode 100644 applications/transfer/types/codec.go
create mode 100644 applications/transfer/types/coin.go
create mode 100644 applications/transfer/types/errors.go
create mode 100644 applications/transfer/types/events.go
create mode 100644 applications/transfer/types/expected_keepers.go
create mode 100644 applications/transfer/types/genesis.go
create mode 100644 applications/transfer/types/genesis.pb.go
create mode 100644 applications/transfer/types/genesis_test.go
create mode 100644 applications/transfer/types/keys.go
create mode 100644 applications/transfer/types/keys_test.go
create mode 100644 applications/transfer/types/msgs.go
create mode 100644 applications/transfer/types/msgs_test.go
create mode 100644 applications/transfer/types/packet.go
create mode 100644 applications/transfer/types/packet_test.go
create mode 100644 applications/transfer/types/params.go
create mode 100644 applications/transfer/types/params_test.go
create mode 100644 applications/transfer/types/query.pb.go
create mode 100644 applications/transfer/types/query.pb.gw.go
create mode 100644 applications/transfer/types/trace.go
create mode 100644 applications/transfer/types/trace_test.go
create mode 100644 applications/transfer/types/transfer.pb.go
create mode 100644 applications/transfer/types/tx.pb.go
create mode 100644 apps/transfer/types/genesis.pb.go
create mode 100644 apps/transfer/types/query.pb.go
create mode 100644 apps/transfer/types/query.pb.gw.go
create mode 100644 apps/transfer/types/transfer.pb.go
create mode 100644 apps/transfer/types/tx.pb.go
create mode 100644 buf.yaml
create mode 100644 core/02-client/abci.go
create mode 100644 core/02-client/abci_test.go
create mode 100644 core/02-client/client/cli/cli.go
create mode 100644 core/02-client/client/cli/query.go
create mode 100644 core/02-client/client/cli/tx.go
create mode 100644 core/02-client/client/proposal_handler.go
create mode 100644 core/02-client/client/utils/utils.go
create mode 100644 core/02-client/doc.go
create mode 100644 core/02-client/genesis.go
create mode 100644 core/02-client/keeper/client.go
create mode 100644 core/02-client/keeper/client_test.go
create mode 100644 core/02-client/keeper/encoding.go
create mode 100644 core/02-client/keeper/grpc_query.go
create mode 100644 core/02-client/keeper/grpc_query_test.go
create mode 100644 core/02-client/keeper/keeper.go
create mode 100644 core/02-client/keeper/keeper_test.go
create mode 100644 core/02-client/keeper/params.go
create mode 100644 core/02-client/keeper/params_test.go
create mode 100644 core/02-client/keeper/proposal.go
create mode 100644 core/02-client/keeper/proposal_test.go
create mode 100644 core/02-client/module.go
create mode 100644 core/02-client/proposal_handler.go
create mode 100644 core/02-client/proposal_handler_test.go
create mode 100644 core/02-client/simulation/decoder.go
create mode 100644 core/02-client/simulation/decoder_test.go
create mode 100644 core/02-client/simulation/genesis.go
create mode 100644 core/02-client/types/client.go
create mode 100644 core/02-client/types/client.pb.go
create mode 100644 core/02-client/types/client_test.go
create mode 100644 core/02-client/types/codec.go
create mode 100644 core/02-client/types/codec_test.go
create mode 100644 core/02-client/types/encoding.go
create mode 100644 core/02-client/types/errors.go
create mode 100644 core/02-client/types/events.go
create mode 100644 core/02-client/types/expected_keepers.go
create mode 100644 core/02-client/types/genesis.go
create mode 100644 core/02-client/types/genesis.pb.go
create mode 100644 core/02-client/types/genesis_test.go
create mode 100644 core/02-client/types/height.go
create mode 100644 core/02-client/types/height_test.go
create mode 100644 core/02-client/types/keys.go
create mode 100644 core/02-client/types/keys_test.go
create mode 100644 core/02-client/types/msgs.go
create mode 100644 core/02-client/types/msgs_test.go
create mode 100644 core/02-client/types/params.go
create mode 100644 core/02-client/types/params_test.go
create mode 100644 core/02-client/types/proposal.go
create mode 100644 core/02-client/types/proposal_test.go
create mode 100644 core/02-client/types/query.go
create mode 100644 core/02-client/types/query.pb.go
create mode 100644 core/02-client/types/query.pb.gw.go
create mode 100644 core/02-client/types/tx.pb.go
create mode 100644 core/03-connection/client/cli/cli.go
create mode 100644 core/03-connection/client/cli/query.go
create mode 100644 core/03-connection/client/cli/tx.go
create mode 100644 core/03-connection/client/utils/utils.go
create mode 100644 core/03-connection/genesis.go
create mode 100644 core/03-connection/keeper/grpc_query.go
create mode 100644 core/03-connection/keeper/grpc_query_test.go
create mode 100644 core/03-connection/keeper/handshake.go
create mode 100644 core/03-connection/keeper/handshake_test.go
create mode 100644 core/03-connection/keeper/keeper.go
create mode 100644 core/03-connection/keeper/keeper_test.go
create mode 100644 core/03-connection/keeper/verify.go
create mode 100644 core/03-connection/keeper/verify_test.go
create mode 100644 core/03-connection/module.go
create mode 100644 core/03-connection/simulation/decoder.go
create mode 100644 core/03-connection/simulation/decoder_test.go
create mode 100644 core/03-connection/simulation/genesis.go
create mode 100644 core/03-connection/types/codec.go
create mode 100644 core/03-connection/types/connection.go
create mode 100644 core/03-connection/types/connection.pb.go
create mode 100644 core/03-connection/types/connection_test.go
create mode 100644 core/03-connection/types/errors.go
create mode 100644 core/03-connection/types/events.go
create mode 100644 core/03-connection/types/expected_keepers.go
create mode 100644 core/03-connection/types/genesis.go
create mode 100644 core/03-connection/types/genesis.pb.go
create mode 100644 core/03-connection/types/genesis_test.go
create mode 100644 core/03-connection/types/keys.go
create mode 100644 core/03-connection/types/keys_test.go
create mode 100644 core/03-connection/types/msgs.go
create mode 100644 core/03-connection/types/msgs_test.go
create mode 100644 core/03-connection/types/query.go
create mode 100644 core/03-connection/types/query.pb.go
create mode 100644 core/03-connection/types/query.pb.gw.go
create mode 100644 core/03-connection/types/tx.pb.go
create mode 100644 core/03-connection/types/version.go
create mode 100644 core/03-connection/types/version_test.go
create mode 100644 core/04-channel/client/cli/cli.go
create mode 100644 core/04-channel/client/cli/query.go
create mode 100644 core/04-channel/client/cli/tx.go
create mode 100644 core/04-channel/client/utils/utils.go
create mode 100644 core/04-channel/genesis.go
create mode 100644 core/04-channel/handler.go
create mode 100644 core/04-channel/keeper/grpc_query.go
create mode 100644 core/04-channel/keeper/grpc_query_test.go
create mode 100644 core/04-channel/keeper/handshake.go
create mode 100644 core/04-channel/keeper/handshake_test.go
create mode 100644 core/04-channel/keeper/keeper.go
create mode 100644 core/04-channel/keeper/keeper_test.go
create mode 100644 core/04-channel/keeper/packet.go
create mode 100644 core/04-channel/keeper/packet_test.go
create mode 100644 core/04-channel/keeper/timeout.go
create mode 100644 core/04-channel/keeper/timeout_test.go
create mode 100644 core/04-channel/module.go
create mode 100644 core/04-channel/simulation/decoder.go
create mode 100644 core/04-channel/simulation/decoder_test.go
create mode 100644 core/04-channel/simulation/genesis.go
create mode 100644 core/04-channel/types/channel.go
create mode 100644 core/04-channel/types/channel.pb.go
create mode 100644 core/04-channel/types/channel_test.go
create mode 100644 core/04-channel/types/codec.go
create mode 100644 core/04-channel/types/errors.go
create mode 100644 core/04-channel/types/events.go
create mode 100644 core/04-channel/types/expected_keepers.go
create mode 100644 core/04-channel/types/genesis.go
create mode 100644 core/04-channel/types/genesis.pb.go
create mode 100644 core/04-channel/types/genesis_test.go
create mode 100644 core/04-channel/types/keys.go
create mode 100644 core/04-channel/types/keys_test.go
create mode 100644 core/04-channel/types/msgs.go
create mode 100644 core/04-channel/types/msgs_test.go
create mode 100644 core/04-channel/types/packet.go
create mode 100644 core/04-channel/types/packet_test.go
create mode 100644 core/04-channel/types/query.go
create mode 100644 core/04-channel/types/query.pb.go
create mode 100644 core/04-channel/types/query.pb.gw.go
create mode 100644 core/04-channel/types/tx.pb.go
create mode 100644 core/05-port/keeper/keeper.go
create mode 100644 core/05-port/keeper/keeper_test.go
create mode 100644 core/05-port/types/errors.go
create mode 100644 core/05-port/types/keys.go
create mode 100644 core/05-port/types/module.go
create mode 100644 core/05-port/types/router.go
create mode 100644 core/05-port/types/utils.go
create mode 100644 core/23-commitment/types/bench_test.go
create mode 100644 core/23-commitment/types/codec.go
create mode 100644 core/23-commitment/types/commitment.pb.go
create mode 100644 core/23-commitment/types/commitment_test.go
create mode 100644 core/23-commitment/types/errors.go
create mode 100644 core/23-commitment/types/merkle.go
create mode 100644 core/23-commitment/types/merkle_test.go
create mode 100644 core/23-commitment/types/utils.go
create mode 100644 core/23-commitment/types/utils_test.go
create mode 100644 core/24-host/errors.go
create mode 100644 core/24-host/keys.go
create mode 100644 core/24-host/parse.go
create mode 100644 core/24-host/parse_test.go
create mode 100644 core/24-host/validate.go
create mode 100644 core/24-host/validate_test.go
create mode 100644 core/client/cli/cli.go
create mode 100644 core/client/query.go
create mode 100644 core/exported/channel.go
create mode 100644 core/exported/client.go
create mode 100644 core/exported/commitment.go
create mode 100644 core/exported/connection.go
create mode 100644 core/genesis.go
create mode 100644 core/genesis_test.go
create mode 100644 core/handler.go
create mode 100644 core/keeper/grpc_query.go
create mode 100644 core/keeper/keeper.go
create mode 100644 core/keeper/msg_server.go
create mode 100644 core/keeper/msg_server_test.go
create mode 100644 core/module.go
create mode 100644 core/simulation/decoder.go
create mode 100644 core/simulation/decoder_test.go
create mode 100644 core/simulation/genesis.go
create mode 100644 core/simulation/genesis_test.go
create mode 100644 core/spec/01_concepts.md
create mode 100644 core/spec/02_state.md
create mode 100644 core/spec/03_state_transitions.md
create mode 100644 core/spec/04_messages.md
create mode 100644 core/spec/05_callbacks.md
create mode 100644 core/spec/06_events.md
create mode 100644 core/spec/07_params.md
create mode 100644 core/spec/README.md
create mode 100644 core/types/codec.go
create mode 100644 core/types/genesis.go
create mode 100644 core/types/genesis.pb.go
create mode 100644 core/types/query.go
create mode 100644 docs/README.md
create mode 100644 docs/ibc/proto-docs.md
create mode 100644 docs/protodoc-markdown.tmpl
create mode 100644 go.mod
create mode 100644 go.sum
create mode 100644 light-clients/06-solomachine/doc.go
create mode 100644 light-clients/06-solomachine/module.go
create mode 100644 light-clients/06-solomachine/spec/01_concepts.md
create mode 100644 light-clients/06-solomachine/spec/02_state.md
create mode 100644 light-clients/06-solomachine/spec/03_state_transitions.md
create mode 100644 light-clients/06-solomachine/spec/04_messages.md
create mode 100644 light-clients/06-solomachine/spec/README.md
create mode 100644 light-clients/06-solomachine/types/client_state.go
create mode 100644 light-clients/06-solomachine/types/client_state_test.go
create mode 100644 light-clients/06-solomachine/types/codec.go
create mode 100644 light-clients/06-solomachine/types/codec_test.go
create mode 100644 light-clients/06-solomachine/types/consensus_state.go
create mode 100644 light-clients/06-solomachine/types/consensus_state_test.go
create mode 100644 light-clients/06-solomachine/types/errors.go
create mode 100644 light-clients/06-solomachine/types/header.go
create mode 100644 light-clients/06-solomachine/types/header_test.go
create mode 100644 light-clients/06-solomachine/types/misbehaviour.go
create mode 100644 light-clients/06-solomachine/types/misbehaviour_handle.go
create mode 100644 light-clients/06-solomachine/types/misbehaviour_handle_test.go
create mode 100644 light-clients/06-solomachine/types/misbehaviour_test.go
create mode 100644 light-clients/06-solomachine/types/proof.go
create mode 100644 light-clients/06-solomachine/types/proof_test.go
create mode 100644 light-clients/06-solomachine/types/proposal_handle.go
create mode 100644 light-clients/06-solomachine/types/proposal_handle_test.go
create mode 100644 light-clients/06-solomachine/types/solomachine.go
create mode 100644 light-clients/06-solomachine/types/solomachine.pb.go
create mode 100644 light-clients/06-solomachine/types/solomachine_test.go
create mode 100644 light-clients/06-solomachine/types/update.go
create mode 100644 light-clients/06-solomachine/types/update_test.go
create mode 100644 light-clients/07-tendermint/doc.go
create mode 100644 light-clients/07-tendermint/module.go
create mode 100644 light-clients/07-tendermint/types/client_state.go
create mode 100644 light-clients/07-tendermint/types/client_state_test.go
create mode 100644 light-clients/07-tendermint/types/codec.go
create mode 100644 light-clients/07-tendermint/types/consensus_state.go
create mode 100644 light-clients/07-tendermint/types/consensus_state_test.go
create mode 100644 light-clients/07-tendermint/types/errors.go
create mode 100644 light-clients/07-tendermint/types/fraction.go
create mode 100644 light-clients/07-tendermint/types/genesis.go
create mode 100644 light-clients/07-tendermint/types/genesis_test.go
create mode 100644 light-clients/07-tendermint/types/header.go
create mode 100644 light-clients/07-tendermint/types/header_test.go
create mode 100644 light-clients/07-tendermint/types/misbehaviour.go
create mode 100644 light-clients/07-tendermint/types/misbehaviour_handle.go
create mode 100644 light-clients/07-tendermint/types/misbehaviour_handle_test.go
create mode 100644 light-clients/07-tendermint/types/misbehaviour_test.go
create mode 100644 light-clients/07-tendermint/types/proposal_handle.go
create mode 100644 light-clients/07-tendermint/types/proposal_handle_test.go
create mode 100644 light-clients/07-tendermint/types/store.go
create mode 100644 light-clients/07-tendermint/types/store_test.go
create mode 100644 light-clients/07-tendermint/types/tendermint.pb.go
create mode 100644 light-clients/07-tendermint/types/tendermint_test.go
create mode 100644 light-clients/07-tendermint/types/update.go
create mode 100644 light-clients/07-tendermint/types/update_test.go
create mode 100644 light-clients/07-tendermint/types/upgrade.go
create mode 100644 light-clients/07-tendermint/types/upgrade_test.go
create mode 100644 light-clients/09-localhost/doc.go
create mode 100644 light-clients/09-localhost/module.go
create mode 100644 light-clients/09-localhost/types/client_state.go
create mode 100644 light-clients/09-localhost/types/client_state_test.go
create mode 100644 light-clients/09-localhost/types/codec.go
create mode 100644 light-clients/09-localhost/types/errors.go
create mode 100644 light-clients/09-localhost/types/keys.go
create mode 100644 light-clients/09-localhost/types/localhost.pb.go
create mode 100644 light-clients/09-localhost/types/localhost_test.go
create mode 100644 proto/ibcgo/apps/transfer/v1/genesis.proto
create mode 100644 proto/ibcgo/apps/transfer/v1/query.proto
create mode 100644 proto/ibcgo/apps/transfer/v1/transfer.proto
create mode 100644 proto/ibcgo/apps/transfer/v1/tx.proto
create mode 100644 proto/ibcgo/core/channel/v1/channel.proto
create mode 100644 proto/ibcgo/core/channel/v1/genesis.proto
create mode 100644 proto/ibcgo/core/channel/v1/query.proto
create mode 100644 proto/ibcgo/core/channel/v1/tx.proto
create mode 100644 proto/ibcgo/core/client/v1/client.proto
create mode 100644 proto/ibcgo/core/client/v1/genesis.proto
create mode 100644 proto/ibcgo/core/client/v1/query.proto
create mode 100644 proto/ibcgo/core/client/v1/tx.proto
create mode 100644 proto/ibcgo/core/commitment/v1/commitment.proto
create mode 100644 proto/ibcgo/core/connection/v1/connection.proto
create mode 100644 proto/ibcgo/core/connection/v1/genesis.proto
create mode 100644 proto/ibcgo/core/connection/v1/query.proto
create mode 100644 proto/ibcgo/core/connection/v1/tx.proto
create mode 100644 proto/ibcgo/core/types/v1/genesis.proto
create mode 100644 proto/ibcgo/lightclients/localhost/v1/localhost.proto
create mode 100644 proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
create mode 100644 proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
create mode 100644 scripts/README.md
create mode 100644 scripts/linkify_changelog.py
create mode 100755 scripts/protoc-swagger-gen.sh
create mode 100755 scripts/protocgen.sh
create mode 100644 testing/chain.go
create mode 100644 testing/chain_test.go
create mode 100644 testing/coordinator.go
create mode 100644 testing/mock/README.md
create mode 100644 testing/mock/doc.go
create mode 100644 testing/mock/mock.go
create mode 100644 testing/mock/privval.go
create mode 100644 testing/mock/privval_test.go
create mode 100644 testing/solomachine.go
create mode 100644 testing/types.go
create mode 100644 third_party/proto/confio/proofs.proto
create mode 100644 third_party/proto/cosmos/base/query/v1beta1/pagination.proto
create mode 100644 third_party/proto/cosmos/base/v1beta1/coin.proto
create mode 100644 third_party/proto/gogoproto/gogo.proto
create mode 100644 third_party/proto/google/api/annotations.proto
create mode 100644 third_party/proto/google/api/http.proto
create mode 100644 third_party/proto/google/protobuf/any.proto
create mode 100644 third_party/proto/tendermint/crypto/keys.proto
create mode 100644 third_party/proto/tendermint/crypto/proof.proto
create mode 100644 third_party/proto/tendermint/libs/bits/types.proto
create mode 100644 third_party/proto/tendermint/types/types.proto
create mode 100644 third_party/proto/tendermint/types/validator.proto
create mode 100644 third_party/proto/tendermint/version/types.proto
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000..2e5239c6
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,3 @@
+# CODEOWNERS: https://help.github.com/articles/about-codeowners/
+
+* @colin-axner @fedekunze @AdityaSripal
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..7ed1d5ab
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,475 @@
+#!/usr/bin/make -f
+
+PACKAGES_NOSIMULATION=$(shell go list ./... | grep -v '/simulation')
+PACKAGES_SIMTEST=$(shell go list ./... | grep '/simulation')
+VERSION := $(shell echo $(shell git describe --always) | sed 's/^v//')
+COMMIT := $(shell git log -1 --format='%H')
+LEDGER_ENABLED ?= true
+BINDIR ?= $(GOPATH)/bin
+BUILDDIR ?= $(CURDIR)/build
+SIMAPP = ./simapp
+MOCKS_DIR = $(CURDIR)/tests/mocks
+HTTPS_GIT := https://github.com/cosmos/ibc-go.git
+DOCKER := $(shell which docker)
+DOCKER_BUF := $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace bufbuild/buf
+
+export GO111MODULE = on
+
+# process build tags
+
+build_tags = netgo
+ifeq ($(LEDGER_ENABLED),true)
+ ifeq ($(OS),Windows_NT)
+ GCCEXE = $(shell where gcc.exe 2> NUL)
+ ifeq ($(GCCEXE),)
+ $(error gcc.exe not installed for ledger support, please install or set LEDGER_ENABLED=false)
+ else
+ build_tags += ledger
+ endif
+ else
+ UNAME_S = $(shell uname -s)
+ ifeq ($(UNAME_S),OpenBSD)
+ $(warning OpenBSD detected, disabling ledger support (https://github.com/cosmos/cosmos-sdk/issues/1988))
+ else
+ GCC = $(shell command -v gcc 2> /dev/null)
+ ifeq ($(GCC),)
+ $(error gcc not installed for ledger support, please install or set LEDGER_ENABLED=false)
+ else
+ build_tags += ledger
+ endif
+ endif
+ endif
+endif
+
+ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
+ build_tags += gcc
+endif
+build_tags += $(BUILD_TAGS)
+build_tags := $(strip $(build_tags))
+
+whitespace :=
+whitespace += $(whitespace)
+comma := ,
+build_tags_comma_sep := $(subst $(whitespace),$(comma),$(build_tags))
+
+# process linker flags
+
+ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \
+ -X github.com/cosmos/cosmos-sdk/version.AppName=simd \
+ -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \
+ -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \
+ -X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)"
+
+# DB backend selection
+ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
+ ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=cleveldb
+endif
+ifeq (badgerdb,$(findstring badgerdb,$(COSMOS_BUILD_OPTIONS)))
+ ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=badgerdb
+endif
+# handle rocksdb
+ifeq (rocksdb,$(findstring rocksdb,$(COSMOS_BUILD_OPTIONS)))
+ CGO_ENABLED=1
+ BUILD_TAGS += rocksdb
+ ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=rocksdb
+endif
+# handle boltdb
+ifeq (boltdb,$(findstring boltdb,$(COSMOS_BUILD_OPTIONS)))
+ BUILD_TAGS += boltdb
+ ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=boltdb
+endif
+
+ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
+ ldflags += -w -s
+endif
+ldflags += $(LDFLAGS)
+ldflags := $(strip $(ldflags))
+
+BUILD_FLAGS := -tags "$(build_tags)" -ldflags '$(ldflags)'
+# check for nostrip option
+ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
+ BUILD_FLAGS += -trimpath
+endif
+
+all: tools build lint test
+
+# The below include contains the tools and runsim targets.
+#include contrib/devtools/Makefile
+
+###############################################################################
+### Build ###
+###############################################################################
+
+BUILD_TARGETS := build install
+
+build: BUILD_ARGS=-o $(BUILDDIR)/
+build-linux:
+ GOOS=linux GOARCH=amd64 LEDGER_ENABLED=false $(MAKE) build
+
+$(BUILD_TARGETS): go.sum $(BUILDDIR)/
+ go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./...
+
+$(BUILDDIR)/:
+ mkdir -p $(BUILDDIR)/
+
+build-simd-all: go.sum
+ $(DOCKER) rm latest-build || true
+ $(DOCKER) run --volume=$(CURDIR):/sources:ro \
+ --env TARGET_PLATFORMS='linux/amd64 darwin/amd64 linux/arm64 windows/amd64' \
+ --env APP=simd \
+ --env VERSION=$(VERSION) \
+ --env COMMIT=$(COMMIT) \
+ --env LEDGER_ENABLED=$(LEDGER_ENABLED) \
+ --name latest-build cosmossdk/rbuilder:latest
+ $(DOCKER) cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
+
+build-simd-linux: go.sum $(BUILDDIR)/
+ $(DOCKER) rm latest-build || true
+ $(DOCKER) run --volume=$(CURDIR):/sources:ro \
+ --env TARGET_PLATFORMS='linux/amd64' \
+ --env APP=simd \
+ --env VERSION=$(VERSION) \
+ --env COMMIT=$(COMMIT) \
+ --env LEDGER_ENABLED=false \
+ --name latest-build cosmossdk/rbuilder:latest
+ $(DOCKER) cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
+ cp artifacts/simd-*-linux-amd64 $(BUILDDIR)/simd
+
+cosmovisor:
+ $(MAKE) -C cosmovisor cosmovisor
+
+.PHONY: build build-linux build-simd-all build-simd-linux cosmovisor
+
+mocks: $(MOCKS_DIR)
+ mockgen -source=client/account_retriever.go -package mocks -destination tests/mocks/account_retriever.go
+ mockgen -package mocks -destination tests/mocks/tendermint_tm_db_DB.go github.com/tendermint/tm-db DB
+ mockgen -source=types/module/module.go -package mocks -destination tests/mocks/types_module_module.go
+ mockgen -source=types/invariant.go -package mocks -destination tests/mocks/types_invariant.go
+ mockgen -source=types/router.go -package mocks -destination tests/mocks/types_router.go
+ mockgen -source=types/handler.go -package mocks -destination tests/mocks/types_handler.go
+ mockgen -package mocks -destination tests/mocks/grpc_server.go github.com/gogo/protobuf/grpc Server
+ mockgen -package mocks -destination tests/mocks/tendermint_tendermint_libs_log_DB.go github.com/tendermint/tendermint/libs/log Logger
+.PHONY: mocks
+
+$(MOCKS_DIR):
+ mkdir -p $(MOCKS_DIR)
+
+distclean: clean tools-clean
+clean:
+ rm -rf \
+ $(BUILDDIR)/ \
+ artifacts/ \
+ tmp-swagger-gen/
+
+.PHONY: distclean clean
+
+###############################################################################
+### Tools & Dependencies ###
+###############################################################################
+
+go.sum: go.mod
+ echo "Ensure dependencies have not been modified ..." >&2
+ go mod verify
+ go mod tidy
+
+###############################################################################
+### Documentation ###
+###############################################################################
+
+update-swagger-docs: statik
+ $(BINDIR)/statik -src=client/docs/swagger-ui -dest=client/docs -f -m
+ @if [ -n "$(git status --porcelain)" ]; then \
+ echo "\033[91mSwagger docs are out of sync!!!\033[0m";\
+ exit 1;\
+ else \
+ echo "\033[92mSwagger docs are in sync\033[0m";\
+ fi
+.PHONY: update-swagger-docs
+
+godocs:
+ @echo "--> Wait a few seconds and visit http://localhost:6060/pkg/github.com/cosmos/cosmos-sdk/types"
+ godoc -http=:6060
+
+# This builds a docs site for each branch/tag in `./docs/versions`
+# and copies each site to a version prefixed path. The last entry inside
+# the `versions` file will be the default root index.html.
+build-docs:
+ @cd docs && \
+ while read -r branch path_prefix; do \
+ (git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
+ mkdir -p ~/output/$${path_prefix} ; \
+ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
+ cp ~/output/$${path_prefix}/index.html ~/output ; \
+ done < versions ;
+.PHONY: build-docs
+
+###############################################################################
+### Tests & Simulation ###
+###############################################################################
+
+test: test-unit
+test-all: test-unit test-ledger-mock test-race test-cover
+
+TEST_PACKAGES=./...
+TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-race test-ledger test-race
+
+# Test runs-specific rules. To add a new test target, just add
+# a new rule, customise ARGS or TEST_PACKAGES ad libitum, and
+# append the new rule to the TEST_TARGETS list.
+test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace'
+test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace'
+test-ledger: ARGS=-tags='cgo ledger norace'
+test-ledger-mock: ARGS=-tags='ledger test_ledger_mock norace'
+test-race: ARGS=-race -tags='cgo ledger test_ledger_mock'
+test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION)
+$(TEST_TARGETS): run-tests
+
+# check-* compiles and collects tests without running them
+# note: go test -c doesn't support multiple packages yet (https://github.com/golang/go/issues/15513)
+CHECK_TEST_TARGETS := check-test-unit check-test-unit-amino
+check-test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace'
+check-test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace'
+$(CHECK_TEST_TARGETS): EXTRA_ARGS=-run=none
+$(CHECK_TEST_TARGETS): run-tests
+
+run-tests:
+ifneq (,$(shell which tparse 2>/dev/null))
+ go test -mod=readonly -json $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) | tparse
+else
+ go test -mod=readonly $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES)
+endif
+
+.PHONY: run-tests test test-all $(TEST_TARGETS)
+
+test-sim-nondeterminism:
+ @echo "Running non-determinism test..."
+ @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminism -Enabled=true \
+ -NumBlocks=100 -BlockSize=200 -Commit=true -Period=0 -v -timeout 24h
+
+test-sim-custom-genesis-fast:
+ @echo "Running custom genesis simulation..."
+ @echo "By default, ${HOME}/.gaiad/config/genesis.json will be used."
+ @go test -mod=readonly $(SIMAPP) -run TestFullAppSimulation -Genesis=${HOME}/.gaiad/config/genesis.json \
+ -Enabled=true -NumBlocks=100 -BlockSize=200 -Commit=true -Seed=99 -Period=5 -v -timeout 24h
+
+test-sim-import-export: runsim
+ @echo "Running application import/export simulation. This may take several minutes..."
+ @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 50 5 TestAppImportExport
+
+test-sim-after-import: runsim
+ @echo "Running application simulation-after-import. This may take several minutes..."
+ @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 50 5 TestAppSimulationAfterImport
+
+test-sim-custom-genesis-multi-seed: runsim
+ @echo "Running multi-seed custom genesis simulation..."
+ @echo "By default, ${HOME}/.gaiad/config/genesis.json will be used."
+ @$(BINDIR)/runsim -Genesis=${HOME}/.gaiad/config/genesis.json -SimAppPkg=$(SIMAPP) -ExitOnFail 400 5 TestFullAppSimulation
+
+test-sim-multi-seed-long: runsim
+ @echo "Running long multi-seed application simulation. This may take awhile!"
+ @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 500 50 TestFullAppSimulation
+
+test-sim-multi-seed-short: runsim
+ @echo "Running short multi-seed application simulation. This may take awhile!"
+ @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 50 10 TestFullAppSimulation
+
+test-sim-benchmark-invariants:
+ @echo "Running simulation invariant benchmarks..."
+ @go test -mod=readonly $(SIMAPP) -benchmem -bench=BenchmarkInvariants -run=^$ \
+ -Enabled=true -NumBlocks=1000 -BlockSize=200 \
+ -Period=1 -Commit=true -Seed=57 -v -timeout 24h
+
+.PHONY: \
+test-sim-nondeterminism \
+test-sim-custom-genesis-fast \
+test-sim-import-export \
+test-sim-after-import \
+test-sim-custom-genesis-multi-seed \
+test-sim-multi-seed-short \
+test-sim-multi-seed-long \
+test-sim-benchmark-invariants
+
+SIM_NUM_BLOCKS ?= 500
+SIM_BLOCK_SIZE ?= 200
+SIM_COMMIT ?= true
+
+test-sim-benchmark:
+ @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!"
+ @go test -mod=readonly -benchmem -run=^$$ $(SIMAPP) -bench ^BenchmarkFullAppSimulation$$ \
+ -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h
+
+test-sim-profile:
+ @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!"
+ @go test -mod=readonly -benchmem -run=^$$ $(SIMAPP) -bench ^BenchmarkFullAppSimulation$$ \
+ -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -cpuprofile cpu.out -memprofile mem.out
+
+.PHONY: test-sim-profile test-sim-benchmark
+
+test-cover:
+ @export VERSION=$(VERSION); bash -x contrib/test_cover.sh
+.PHONY: test-cover
+
+test-rosetta:
+ docker build -t rosetta-ci:latest -f contrib/rosetta/node/Dockerfile .
+ docker-compose -f contrib/rosetta/docker-compose.yaml up --abort-on-container-exit --exit-code-from test_rosetta --build
+.PHONY: test-rosetta
+
+benchmark:
+ @go test -mod=readonly -bench=. $(PACKAGES_NOSIMULATION)
+.PHONY: benchmark
+
+###############################################################################
+### Linting ###
+###############################################################################
+
+lint:
+ golangci-lint run --out-format=tab
+
+lint-fix:
+ golangci-lint run --fix --out-format=tab --issues-exit-code=0
+.PHONY: lint lint-fix
+
+format:
+ find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs gofmt -w -s
+ find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs misspell -w
+ find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs goimports -w -local github.com/cosmos/cosmos-sdk
+.PHONY: format
+
+###############################################################################
+### Devdoc ###
+###############################################################################
+
+DEVDOC_SAVE = docker commit `docker ps -a -n 1 -q` devdoc:local
+
+devdoc-init:
+ $(DOCKER) run -it -v "$(CURDIR):/go/src/github.com/cosmos/cosmos-sdk" -w "/go/src/github.com/cosmos/cosmos-sdk" tendermint/devdoc echo
+ # TODO make this safer
+ $(call DEVDOC_SAVE)
+
+devdoc:
+ $(DOCKER) run -it -v "$(CURDIR):/go/src/github.com/cosmos/cosmos-sdk" -w "/go/src/github.com/cosmos/cosmos-sdk" devdoc:local bash
+
+devdoc-save:
+ # TODO make this safer
+ $(call DEVDOC_SAVE)
+
+devdoc-clean:
+ docker rmi -f $$(docker images -f "dangling=true" -q)
+
+devdoc-update:
+ docker pull tendermint/devdoc
+
+.PHONY: devdoc devdoc-clean devdoc-init devdoc-save devdoc-update
+
+###############################################################################
+### Protobuf ###
+###############################################################################
+
+proto-all: proto-format proto-lint proto-gen
+
+proto-gen:
+ @echo "Generating Protobuf files"
+ $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace tendermintdev/sdk-proto-gen sh ./scripts/protocgen.sh
+
+proto-format:
+ @echo "Formatting Protobuf files"
+ $(DOCKER) run --rm -v $(CURDIR):/workspace \
+ --workdir /workspace tendermintdev/docker-build-proto \
+ find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
+
+proto-swagger-gen:
+ @./scripts/protoc-swagger-gen.sh
+
+proto-lint:
+ @$(DOCKER_BUF) check lint --error-format=json
+
+proto-check-breaking:
+ @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=main
+
+TM_URL = https://raw.githubusercontent.com/tendermint/tendermint/v0.34.0-rc6/proto/tendermint
+GOGO_PROTO_URL = https://raw.githubusercontent.com/regen-network/protobuf/cosmos
+CONFIO_URL = https://raw.githubusercontent.com/confio/ics23/v0.6.3
+SDK_PROTO_URL = https://raw.githubusercontent.com/cosmos/cosmos-sdk/v0.41.0/proto/cosmos
+
+TM_CRYPTO_TYPES = third_party/proto/tendermint/crypto
+TM_ABCI_TYPES = third_party/proto/tendermint/abci
+TM_TYPES = third_party/proto/tendermint/types
+TM_VERSION = third_party/proto/tendermint/version
+TM_LIBS = third_party/proto/tendermint/libs/bits
+TM_P2P = third_party/proto/tendermint/p2p
+
+SDK_QUERY = third_party/proto/cosmos/base/query/v1beta1
+SDK_BASE = third_party/proto/cosmos/base/v1beta1
+
+GOGO_PROTO_TYPES = third_party/proto/gogoproto
+CONFIO_TYPES = third_party/proto/confio
+
+proto-update-deps:
+ @mkdir -p $(GOGO_PROTO_TYPES)
+ @curl -sSL $(GOGO_PROTO_URL)/gogoproto/gogo.proto > $(GOGO_PROTO_TYPES)/gogo.proto
+
+ @mkdir -p $(SDK_QUERY)
+ @curl -sSL $(SDK_PROTO_URL)/base/query/v1beta1/pagination.proto > $(SDK_QUERY)/pagination.proto
+
+ @mkdir -p $(SDK_BASE)
+ @curl -sSL $(SDK_PROTO_URL)/base/v1beta1/coin.proto > $(SDK_BASE)/coin.proto
+
+## Importing of tendermint protobuf definitions currently requires the
+## use of `sed` in order to build properly with cosmos-sdk's proto file layout
+## (which is the standard Buf.build FILE_LAYOUT)
+## Issue link: https://github.com/tendermint/tendermint/issues/5021
+ @mkdir -p $(TM_TYPES)
+ @curl -sSL $(TM_URL)/types/types.proto > $(TM_TYPES)/types.proto
+ @curl -sSL $(TM_URL)/types/validator.proto > $(TM_TYPES)/validator.proto
+
+ @mkdir -p $(TM_VERSION)
+ @curl -sSL $(TM_URL)/version/types.proto > $(TM_VERSION)/types.proto
+
+ @mkdir -p $(TM_LIBS)
+ @curl -sSL $(TM_URL)/libs/bits/types.proto > $(TM_LIBS)/types.proto
+
+ @mkdir -p $(TM_CRYPTO_TYPES)
+ @curl -sSL $(TM_URL)/crypto/proof.proto > $(TM_CRYPTO_TYPES)/proof.proto
+ @curl -sSL $(TM_URL)/crypto/keys.proto > $(TM_CRYPTO_TYPES)/keys.proto
+
+ @mkdir -p $(CONFIO_TYPES)
+ @curl -sSL $(CONFIO_URL)/proofs.proto > $(CONFIO_TYPES)/proofs.proto
+
+## insert go package option into proofs.proto file
+## Issue link: https://github.com/confio/ics23/issues/32
+ @sed -i '4ioption go_package = "github.com/confio/ics23/go";' $(CONFIO_TYPES)/proofs.proto
+
+.PHONY: proto-all proto-gen proto-gen-any proto-swagger-gen proto-format proto-lint proto-check-breaking proto-update-deps
+
+###############################################################################
+### Localnet ###
+###############################################################################
+
+# Run a 4-node testnet locally
+localnet-start: build-linux localnet-stop
+ $(if $(shell $(DOCKER) inspect -f '{{ .Id }}' cosmossdk/simd-env 2>/dev/null),$(info found image cosmossdk/simd-env),$(MAKE) -C contrib/images simd-env)
+ if ! [ -f build/node0/simd/config/genesis.json ]; then $(DOCKER) run --rm \
+ --user $(shell id -u):$(shell id -g) \
+ -v $(BUILDDIR):/simd:Z \
+ -v /etc/group:/etc/group:ro \
+ -v /etc/passwd:/etc/passwd:ro \
+ -v /etc/shadow:/etc/shadow:ro \
+ cosmossdk/simd-env testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
+ docker-compose up -d
+
+localnet-stop:
+ docker-compose down
+
+.PHONY: localnet-start localnet-stop
+
+###############################################################################
+### rosetta ###
+###############################################################################
+# builds rosetta test data dir
+rosetta-data:
+ -docker container rm data_dir_build
+ docker build -t rosetta-ci:latest -f contrib/rosetta/node/Dockerfile .
+ docker run --name data_dir_build -t rosetta-ci:latest sh /rosetta/data.sh
+ docker cp data_dir_build:/tmp/data.tar.gz "$(CURDIR)/contrib/rosetta/node/data.tar.gz"
+ docker container rm data_dir_build
+.PHONY: rosetta-data
diff --git a/applications/transfer/client/cli/cli.go b/applications/transfer/client/cli/cli.go
new file mode 100644
index 00000000..d3ca8341
--- /dev/null
+++ b/applications/transfer/client/cli/cli.go
@@ -0,0 +1,42 @@
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+)
+
+// GetQueryCmd returns the query commands for IBC connections
+func GetQueryCmd() *cobra.Command {
+ queryCmd := &cobra.Command{
+ Use: "ibc-transfer",
+ Short: "IBC fungible token transfer query subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ }
+
+ queryCmd.AddCommand(
+ GetCmdQueryDenomTrace(),
+ GetCmdQueryDenomTraces(),
+ GetCmdParams(),
+ )
+
+ return queryCmd
+}
+
+// NewTxCmd returns the transaction commands for IBC fungible token transfer
+func NewTxCmd() *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: "ibc-transfer",
+ Short: "IBC fungible token transfer transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ txCmd.AddCommand(
+ NewTransferTxCmd(),
+ )
+
+ return txCmd
+}
diff --git a/applications/transfer/client/cli/query.go b/applications/transfer/client/cli/query.go
new file mode 100644
index 00000000..b9658e05
--- /dev/null
+++ b/applications/transfer/client/cli/query.go
@@ -0,0 +1,108 @@
+package cli
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// GetCmdQueryDenomTrace defines the command to query a a denomination trace from a given hash.
+func GetCmdQueryDenomTrace() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "denom-trace [hash]",
+ Short: "Query the denom trace info from a given trace hash",
+ Long: "Query the denom trace info from a given trace hash",
+ Example: fmt.Sprintf("%s query ibc-transfer denom-trace [hash]", version.AppName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ req := &types.QueryDenomTraceRequest{
+ Hash: args[0],
+ }
+
+ res, err := queryClient.DenomTrace(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+ return cmd
+}
+
+// GetCmdQueryDenomTraces defines the command to query all the denomination trace infos
+// that this chain mantains.
+func GetCmdQueryDenomTraces() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "denom-traces",
+ Short: "Query the trace info for all token denominations",
+ Long: "Query the trace info for all token denominations",
+ Example: fmt.Sprintf("%s query ibc-transfer denom-traces", version.AppName),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryDenomTracesRequest{
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.DenomTraces(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "denominations trace")
+
+ return cmd
+}
+
+// GetCmdParams returns the command handler for ibc-transfer parameter querying.
+func GetCmdParams() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "params",
+ Short: "Query the current ibc-transfer parameters",
+ Long: "Query the current ibc-transfer parameters",
+ Args: cobra.NoArgs,
+ Example: fmt.Sprintf("%s query ibc-transfer params", version.AppName),
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ res, _ := queryClient.Params(cmd.Context(), &types.QueryParamsRequest{})
+ return clientCtx.PrintProto(res.Params)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/applications/transfer/client/cli/tx.go b/applications/transfer/client/cli/tx.go
new file mode 100644
index 00000000..1f9e92f6
--- /dev/null
+++ b/applications/transfer/client/cli/tx.go
@@ -0,0 +1,117 @@
+package cli
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channelutils "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/utils"
+)
+
+const (
+ flagPacketTimeoutHeight = "packet-timeout-height"
+ flagPacketTimeoutTimestamp = "packet-timeout-timestamp"
+ flagAbsoluteTimeouts = "absolute-timeouts"
+)
+
+// NewTransferTxCmd returns the command to create a NewMsgTransfer transaction
+func NewTransferTxCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "transfer [src-port] [src-channel] [receiver] [amount]",
+ Short: "Transfer a fungible token through IBC",
+ Long: strings.TrimSpace(`Transfer a fungible token through IBC. Timeouts can be specified
+as absolute or relative using the "absolute-timeouts" flag. Timeout height can be set by passing in the height string
+in the form {revision}-{height} using the "packet-timeout-height" flag. Relative timeouts are added to
+the block height and block timestamp queried from the latest consensus state corresponding
+to the counterparty channel. Any timeout set to 0 is disabled.`),
+ Example: fmt.Sprintf("%s tx ibc-transfer transfer [src-port] [src-channel] [receiver] [amount]", version.AppName),
+ Args: cobra.ExactArgs(4),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ sender := clientCtx.GetFromAddress()
+ srcPort := args[0]
+ srcChannel := args[1]
+ receiver := args[2]
+
+ coin, err := sdk.ParseCoinNormalized(args[3])
+ if err != nil {
+ return err
+ }
+
+ if !strings.HasPrefix(coin.Denom, "ibc/") {
+ denomTrace := types.ParseDenomTrace(coin.Denom)
+ coin.Denom = denomTrace.IBCDenom()
+ }
+
+ timeoutHeightStr, err := cmd.Flags().GetString(flagPacketTimeoutHeight)
+ if err != nil {
+ return err
+ }
+ timeoutHeight, err := clienttypes.ParseHeight(timeoutHeightStr)
+ if err != nil {
+ return err
+ }
+
+ timeoutTimestamp, err := cmd.Flags().GetUint64(flagPacketTimeoutTimestamp)
+ if err != nil {
+ return err
+ }
+
+ absoluteTimeouts, err := cmd.Flags().GetBool(flagAbsoluteTimeouts)
+ if err != nil {
+ return err
+ }
+
+ // if the timeouts are not absolute, retrieve latest block height and block timestamp
+ // for the consensus state connected to the destination port/channel
+ if !absoluteTimeouts {
+ consensusState, height, _, err := channelutils.QueryLatestConsensusState(clientCtx, srcPort, srcChannel)
+ if err != nil {
+ return err
+ }
+
+ if !timeoutHeight.IsZero() {
+ absoluteHeight := height
+ absoluteHeight.RevisionNumber += timeoutHeight.RevisionNumber
+ absoluteHeight.RevisionHeight += timeoutHeight.RevisionHeight
+ timeoutHeight = absoluteHeight
+ }
+
+ if timeoutTimestamp != 0 {
+ timeoutTimestamp = consensusState.GetTimestamp() + timeoutTimestamp
+ }
+ }
+
+ msg := types.NewMsgTransfer(
+ srcPort, srcChannel, coin, sender, receiver, timeoutHeight, timeoutTimestamp,
+ )
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.Transfer(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ cmd.Flags().String(flagPacketTimeoutHeight, types.DefaultRelativePacketTimeoutHeight, "Packet timeout block height. The timeout is disabled when set to 0-0.")
+ cmd.Flags().Uint64(flagPacketTimeoutTimestamp, types.DefaultRelativePacketTimeoutTimestamp, "Packet timeout timestamp in nanoseconds. Default is 10 minutes. The timeout is disabled when set to 0.")
+ cmd.Flags().Bool(flagAbsoluteTimeouts, false, "Timeout flags are used as absolute timeouts.")
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/applications/transfer/handler.go b/applications/transfer/handler.go
new file mode 100644
index 00000000..7c992c92
--- /dev/null
+++ b/applications/transfer/handler.go
@@ -0,0 +1,23 @@
+package transfer
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// NewHandler returns sdk.Handler for IBC token transfer module messages
+func NewHandler(k types.MsgServer) sdk.Handler {
+ return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+
+ switch msg := msg.(type) {
+ case *types.MsgTransfer:
+ res, err := k.Transfer(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ default:
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ICS-20 transfer message type: %T", msg)
+ }
+ }
+}
diff --git a/applications/transfer/handler_test.go b/applications/transfer/handler_test.go
new file mode 100644
index 00000000..92a04210
--- /dev/null
+++ b/applications/transfer/handler_test.go
@@ -0,0 +1,123 @@
+package transfer_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type TransferTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+ chainC *ibctesting.TestChain
+}
+
+func (suite *TransferTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(2))
+}
+
+// constructs a send from chainA to chainB on the established channel/connection
+// and sends the same coin back from chainB to chainA.
+func (suite *TransferTestSuite) TestHandleMsgTransfer() {
+ // setup between chainA and chainB
+ clientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB := suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ // originalBalance := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom)
+ timeoutHeight := clienttypes.NewHeight(0, 110)
+
+ coinToSendToB := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+
+ // send from chainA to chainB
+ msg := types.NewMsgTransfer(channelA.PortID, channelA.ID, coinToSendToB, suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+
+ err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, clientB, msg)
+ suite.Require().NoError(err) // message committed
+
+ // relay send
+ fungibleTokenPacket := types.NewFungibleTokenPacketData(coinToSendToB.Denom, coinToSendToB.Amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
+ packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
+ err = suite.coordinator.RelayPacket(suite.chainA, suite.chainB, clientA, clientB, packet, ack.GetBytes())
+ suite.Require().NoError(err) // relay committed
+
+ // check that voucher exists on chain B
+ voucherDenomTrace := types.ParseDenomTrace(types.GetPrefixedDenom(packet.GetDestPort(), packet.GetDestChannel(), sdk.DefaultBondDenom))
+ balance := suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom())
+
+ coinSentFromAToB := types.GetTransferCoin(channelB.PortID, channelB.ID, sdk.DefaultBondDenom, 100)
+ suite.Require().Equal(coinSentFromAToB, balance)
+
+ // setup between chainB to chainC
+ clientOnBForC, clientOnCForB, connOnBForC, connOnCForB := suite.coordinator.SetupClientConnections(suite.chainB, suite.chainC, exported.Tendermint)
+ channelOnBForC, channelOnCForB := suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connOnBForC, connOnCForB, channeltypes.UNORDERED)
+
+ // send from chainB to chainC
+ msg = types.NewMsgTransfer(channelOnBForC.PortID, channelOnBForC.ID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+
+ err = suite.coordinator.SendMsg(suite.chainB, suite.chainC, clientOnCForB, msg)
+ suite.Require().NoError(err) // message committed
+
+ // relay send
+ // NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment
+ fullDenomPath := types.GetPrefixedDenom(channelOnCForB.PortID, channelOnCForB.ID, voucherDenomTrace.GetFullDenomPath())
+ fungibleTokenPacket = types.NewFungibleTokenPacketData(voucherDenomTrace.GetFullDenomPath(), coinSentFromAToB.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String())
+ packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnBForC.PortID, channelOnBForC.ID, channelOnCForB.PortID, channelOnCForB.ID, timeoutHeight, 0)
+ err = suite.coordinator.RelayPacket(suite.chainB, suite.chainC, clientOnBForC, clientOnCForB, packet, ack.GetBytes())
+ suite.Require().NoError(err) // relay committed
+
+ coinSentFromBToC := sdk.NewInt64Coin(types.ParseDenomTrace(fullDenomPath).IBCDenom(), 100)
+ balance = suite.chainC.App.BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), coinSentFromBToC.Denom)
+
+ // check that the balance is updated on chainC
+ suite.Require().Equal(coinSentFromBToC, balance)
+
+ // check that balance on chain B is empty
+ balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromBToC.Denom)
+ suite.Require().Zero(balance.Amount.Int64())
+
+ // send from chainC back to chainB
+ msg = types.NewMsgTransfer(channelOnCForB.PortID, channelOnCForB.ID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+
+ err = suite.coordinator.SendMsg(suite.chainC, suite.chainB, clientOnBForC, msg)
+ suite.Require().NoError(err) // message committed
+
+ // relay send
+ // NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment
+ fungibleTokenPacket = types.NewFungibleTokenPacketData(fullDenomPath, coinSentFromBToC.Amount.Uint64(), suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
+ packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnCForB.PortID, channelOnCForB.ID, channelOnBForC.PortID, channelOnBForC.ID, timeoutHeight, 0)
+ err = suite.coordinator.RelayPacket(suite.chainC, suite.chainB, clientOnCForB, clientOnBForC, packet, ack.GetBytes())
+ suite.Require().NoError(err) // relay committed
+
+ balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom)
+
+ // check that the balance on chainA returned back to the original state
+ suite.Require().Equal(coinSentFromAToB, balance)
+
+ // check that module account escrow address is empty
+ escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel())
+ balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), escrowAddress, sdk.DefaultBondDenom)
+ suite.Require().Equal(sdk.NewCoin(sdk.DefaultBondDenom, sdk.ZeroInt()), balance)
+
+ // check that balance on chain B is empty
+ balance = suite.chainC.App.BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom())
+ suite.Require().Zero(balance.Amount.Int64())
+}
+
+func TestTransferTestSuite(t *testing.T) {
+ suite.Run(t, new(TransferTestSuite))
+}
diff --git a/applications/transfer/keeper/MBT_README.md b/applications/transfer/keeper/MBT_README.md
new file mode 100644
index 00000000..8a5930f6
--- /dev/null
+++ b/applications/transfer/keeper/MBT_README.md
@@ -0,0 +1,51 @@
+## Token Transfer Model-based Testing Guide
+
+In the process of IBC Audit performed by Informal Systems, we have implemented
+a preliminary set of model-based tests for the ICS-20 Token Transfer implementation.
+
+Model-based tests are based on the formal `TLA+` model of the Token transfer relay functions: see [relay.tla](relay_model/relay.tla).
+The tests themselves are simple `TLA+` assertions, that describe the desired shape of execution that send or receive tokens;
+see [relay_tests.tla](relay_model/relay_tests.tla) for some examples.
+To be able to specify test assertions the TLA+ model contains the `history` variable,
+which records the whole execution history.
+So, by way of referring to `history` you simply specify declaratively what execution history you want to see.
+
+After you have specified your `TLA+` test, you can run it using [Apalache model checker](https://github.com/informalsystems/apalache).
+E.g. for the test `TestUnescrowTokens` run
+
+```bash
+apalache-mc check --inv=TestUnescrowTokensInv relay_tests.tla
+```
+
+In case there are no error in the TLA+ model or in the test assertions, this will produce a couple of so-called _counterexamples_.
+This is a terminology from the model-checking community; for the testing purposes they can be considered simply as model executions.
+See the files `counterexample.tla` for human-readable representation, and `counterexample.json` for machine-readable one.
+
+In order to execute the produced test, you need to translate it into another format.
+For that translation you need the tool [Jsonatr (JSON Arrifact Translator)](https://github.com/informalsystems/jsonatr).
+It performs the translation using this [transformation spec](relay_model/apalache-to-relay-test2.json);
+
+To transform a counterexample into a test, run
+
+```bash
+jsonatr --use apalache-to-relay-test2.json --in counterexample.json --out model_based_tests/YourTestName.json
+```
+
+Now, if you run `go test` in this directory, the file you have produced above should be picked up by the [model-based test driver](mbt_relay_test.go),
+and executed automatically.
+
+
+The easiest way to run Apalache is by
+[using a Docker image](https://github.com/informalsystems/apalache/blob/master/docs/manual.md#useDocker);
+to run Jsonatr you need to locally clone the repository, and then,
+after building it, add the `target/debug` directory into your `PATH`.
+
+To wrap Apalache docker image into an executable you might create the following executable bash script `apalache-mc`:
+
+```bash
+#!/bin/bash
+docker run --rm -v $(pwd):/var/apalache apalache/mc $@
+```
+
+
+In case of any questions please don't hesitate to contact Andrey Kuprianov (andrey@informal.systems).
\ No newline at end of file
diff --git a/applications/transfer/keeper/encoding.go b/applications/transfer/keeper/encoding.go
new file mode 100644
index 00000000..ddb1bc4b
--- /dev/null
+++ b/applications/transfer/keeper/encoding.go
@@ -0,0 +1,35 @@
+package keeper
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// UnmarshalDenomTrace attempts to decode and return an DenomTrace object from
+// raw encoded bytes.
+func (k Keeper) UnmarshalDenomTrace(bz []byte) (types.DenomTrace, error) {
+ var denomTrace types.DenomTrace
+ if err := k.cdc.UnmarshalBinaryBare(bz, &denomTrace); err != nil {
+ return types.DenomTrace{}, err
+ }
+ return denomTrace, nil
+}
+
+// MustUnmarshalDenomTrace attempts to decode and return an DenomTrace object from
+// raw encoded bytes. It panics on error.
+func (k Keeper) MustUnmarshalDenomTrace(bz []byte) types.DenomTrace {
+ var denomTrace types.DenomTrace
+ k.cdc.MustUnmarshalBinaryBare(bz, &denomTrace)
+ return denomTrace
+}
+
+// MarshalDenomTrace attempts to encode an DenomTrace object and returns the
+// raw encoded bytes.
+func (k Keeper) MarshalDenomTrace(denomTrace types.DenomTrace) ([]byte, error) {
+ return k.cdc.MarshalBinaryBare(&denomTrace)
+}
+
+// MustMarshalDenomTrace attempts to encode an DenomTrace object and returns the
+// raw encoded bytes. It panics on error.
+func (k Keeper) MustMarshalDenomTrace(denomTrace types.DenomTrace) []byte {
+ return k.cdc.MustMarshalBinaryBare(&denomTrace)
+}
diff --git a/applications/transfer/keeper/genesis.go b/applications/transfer/keeper/genesis.go
new file mode 100644
index 00000000..58a0c081
--- /dev/null
+++ b/applications/transfer/keeper/genesis.go
@@ -0,0 +1,45 @@
+package keeper
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// InitGenesis initializes the ibc-transfer state and binds to PortID.
+func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) {
+ k.SetPort(ctx, state.PortId)
+
+ for _, trace := range state.DenomTraces {
+ k.SetDenomTrace(ctx, trace)
+ }
+
+ // Only try to bind to port if it is not already bound, since we may already own
+ // port capability from capability InitGenesis
+ if !k.IsBound(ctx, state.PortId) {
+ // transfer module binds to the transfer port on InitChain
+ // and claims the returned capability
+ err := k.BindPort(ctx, state.PortId)
+ if err != nil {
+ panic(fmt.Sprintf("could not claim port capability: %v", err))
+ }
+ }
+
+ k.SetParams(ctx, state.Params)
+
+ // check if the module account exists
+ moduleAcc := k.GetTransferAccount(ctx)
+ if moduleAcc == nil {
+ panic(fmt.Sprintf("%s module account has not been set", types.ModuleName))
+ }
+}
+
+// ExportGenesis exports ibc-transfer module's portID and denom trace info into its genesis state.
+func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState {
+ return &types.GenesisState{
+ PortId: k.GetPort(ctx),
+ DenomTraces: k.GetAllDenomTraces(ctx),
+ Params: k.GetParams(ctx),
+ }
+}
diff --git a/applications/transfer/keeper/genesis_test.go b/applications/transfer/keeper/genesis_test.go
new file mode 100644
index 00000000..a8543491
--- /dev/null
+++ b/applications/transfer/keeper/genesis_test.go
@@ -0,0 +1,39 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+func (suite *KeeperTestSuite) TestGenesis() {
+ var (
+ path string
+ traces types.Traces
+ )
+
+ for i := 0; i < 5; i++ {
+ prefix := fmt.Sprintf("transfer/channelToChain%d", i)
+ if i == 0 {
+ path = prefix
+ } else {
+ path = prefix + "/" + path
+ }
+
+ denomTrace := types.DenomTrace{
+ BaseDenom: "uatom",
+ Path: path,
+ }
+ traces = append(types.Traces{denomTrace}, traces...)
+ suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), denomTrace)
+ }
+
+ genesis := suite.chainA.App.TransferKeeper.ExportGenesis(suite.chainA.GetContext())
+
+ suite.Require().Equal(types.PortID, genesis.PortId)
+ suite.Require().Equal(traces.Sort(), genesis.DenomTraces)
+
+ suite.Require().NotPanics(func() {
+ suite.chainA.App.TransferKeeper.InitGenesis(suite.chainA.GetContext(), *genesis)
+ })
+}
diff --git a/applications/transfer/keeper/grpc_query.go b/applications/transfer/keeper/grpc_query.go
new file mode 100644
index 00000000..b6347895
--- /dev/null
+++ b/applications/transfer/keeper/grpc_query.go
@@ -0,0 +1,83 @@
+package keeper
+
+import (
+ "context"
+ "fmt"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+var _ types.QueryServer = Keeper{}
+
+// DenomTrace implements the Query/DenomTrace gRPC method
+func (q Keeper) DenomTrace(c context.Context, req *types.QueryDenomTraceRequest) (*types.QueryDenomTraceResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ hash, err := types.ParseHexHash(req.Hash)
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid denom trace hash %s, %s", req.Hash, err))
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ denomTrace, found := q.GetDenomTrace(ctx, hash)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrap(types.ErrTraceNotFound, req.Hash).Error(),
+ )
+ }
+
+ return &types.QueryDenomTraceResponse{
+ DenomTrace: &denomTrace,
+ }, nil
+}
+
+// DenomTraces implements the Query/DenomTraces gRPC method
+func (q Keeper) DenomTraces(c context.Context, req *types.QueryDenomTracesRequest) (*types.QueryDenomTracesResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ traces := types.Traces{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), types.DenomTraceKey)
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(_, value []byte) error {
+ result, err := q.UnmarshalDenomTrace(value)
+ if err != nil {
+ return err
+ }
+
+ traces = append(traces, result)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.QueryDenomTracesResponse{
+ DenomTraces: traces.Sort(),
+ Pagination: pageRes,
+ }, nil
+}
+
+// Params implements the Query/Params gRPC method
+func (q Keeper) Params(c context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) {
+ ctx := sdk.UnwrapSDKContext(c)
+ params := q.GetParams(ctx)
+
+ return &types.QueryParamsResponse{
+ Params: ¶ms,
+ }, nil
+}
diff --git a/applications/transfer/keeper/grpc_query_test.go b/applications/transfer/keeper/grpc_query_test.go
new file mode 100644
index 00000000..0b16e072
--- /dev/null
+++ b/applications/transfer/keeper/grpc_query_test.go
@@ -0,0 +1,142 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+func (suite *KeeperTestSuite) TestQueryDenomTrace() {
+ var (
+ req *types.QueryDenomTraceRequest
+ expTrace types.DenomTrace
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "invalid hex hash",
+ func() {
+ req = &types.QueryDenomTraceRequest{
+ Hash: "!@#!@#!",
+ }
+ },
+ false,
+ },
+ {
+ "not found denom trace",
+ func() {
+ expTrace.Path = "transfer/channelToA/transfer/channelToB"
+ expTrace.BaseDenom = "uatom"
+ req = &types.QueryDenomTraceRequest{
+ Hash: expTrace.Hash().String(),
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ expTrace.Path = "transfer/channelToA/transfer/channelToB"
+ expTrace.BaseDenom = "uatom"
+ suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), expTrace)
+
+ req = &types.QueryDenomTraceRequest{
+ Hash: expTrace.Hash().String(),
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.queryClient.DenomTrace(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(&expTrace, res.DenomTrace)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryDenomTraces() {
+ var (
+ req *types.QueryDenomTracesRequest
+ expTraces = types.Traces(nil)
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty pagination",
+ func() {
+ req = &types.QueryDenomTracesRequest{}
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ expTraces = append(expTraces, types.DenomTrace{Path: "", BaseDenom: "uatom"})
+ expTraces = append(expTraces, types.DenomTrace{Path: "transfer/channelToB", BaseDenom: "uatom"})
+ expTraces = append(expTraces, types.DenomTrace{Path: "transfer/channelToA/transfer/channelToB", BaseDenom: "uatom"})
+
+ for _, trace := range expTraces {
+ suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), trace)
+ }
+
+ req = &types.QueryDenomTracesRequest{
+ Pagination: &query.PageRequest{
+ Limit: 5,
+ CountTotal: false,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.queryClient.DenomTraces(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expTraces.Sort(), res.DenomTraces)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryParams() {
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+ expParams := types.DefaultParams()
+ res, _ := suite.queryClient.Params(ctx, &types.QueryParamsRequest{})
+ suite.Require().Equal(&expParams, res.Params)
+}
diff --git a/applications/transfer/keeper/keeper.go b/applications/transfer/keeper/keeper.go
new file mode 100644
index 00000000..a2eebb55
--- /dev/null
+++ b/applications/transfer/keeper/keeper.go
@@ -0,0 +1,169 @@
+package keeper
+
+import (
+ tmbytes "github.com/tendermint/tendermint/libs/bytes"
+ "github.com/tendermint/tendermint/libs/log"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+)
+
+// Keeper defines the IBC fungible transfer keeper
+type Keeper struct {
+ storeKey sdk.StoreKey
+ cdc codec.BinaryMarshaler
+ paramSpace paramtypes.Subspace
+
+ channelKeeper types.ChannelKeeper
+ portKeeper types.PortKeeper
+ authKeeper types.AccountKeeper
+ bankKeeper types.BankKeeper
+ scopedKeeper capabilitykeeper.ScopedKeeper
+}
+
+// NewKeeper creates a new IBC transfer Keeper instance
+func NewKeeper(
+ cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace,
+ channelKeeper types.ChannelKeeper, portKeeper types.PortKeeper,
+ authKeeper types.AccountKeeper, bankKeeper types.BankKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,
+) Keeper {
+
+ // ensure ibc transfer module account is set
+ if addr := authKeeper.GetModuleAddress(types.ModuleName); addr == nil {
+ panic("the IBC transfer module account has not been set")
+ }
+
+ // set KeyTable if it has not already been set
+ if !paramSpace.HasKeyTable() {
+ paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable())
+ }
+
+ return Keeper{
+ cdc: cdc,
+ storeKey: key,
+ paramSpace: paramSpace,
+ channelKeeper: channelKeeper,
+ portKeeper: portKeeper,
+ authKeeper: authKeeper,
+ bankKeeper: bankKeeper,
+ scopedKeeper: scopedKeeper,
+ }
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper) Logger(ctx sdk.Context) log.Logger {
+ return ctx.Logger().With("module", "x/"+host.ModuleName+"-"+types.ModuleName)
+}
+
+// GetTransferAccount returns the ICS20 - transfers ModuleAccount
+func (k Keeper) GetTransferAccount(ctx sdk.Context) authtypes.ModuleAccountI {
+ return k.authKeeper.GetModuleAccount(ctx, types.ModuleName)
+}
+
+// ChanCloseInit defines a wrapper function for the channel Keeper's function
+// in order to expose it to the ICS20 transfer handler.
+func (k Keeper) ChanCloseInit(ctx sdk.Context, portID, channelID string) error {
+ capName := host.ChannelCapabilityPath(portID, channelID)
+ chanCap, ok := k.scopedKeeper.GetCapability(ctx, capName)
+ if !ok {
+ return sdkerrors.Wrapf(channeltypes.ErrChannelCapabilityNotFound, "could not retrieve channel capability at: %s", capName)
+ }
+ return k.channelKeeper.ChanCloseInit(ctx, portID, channelID, chanCap)
+}
+
+// IsBound checks if the transfer module is already bound to the desired port
+func (k Keeper) IsBound(ctx sdk.Context, portID string) bool {
+ _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID))
+ return ok
+}
+
+// BindPort defines a wrapper function for the ort Keeper's function in
+// order to expose it to module's InitGenesis function
+func (k Keeper) BindPort(ctx sdk.Context, portID string) error {
+ cap := k.portKeeper.BindPort(ctx, portID)
+ return k.ClaimCapability(ctx, cap, host.PortPath(portID))
+}
+
+// GetPort returns the portID for the transfer module. Used in ExportGenesis
+func (k Keeper) GetPort(ctx sdk.Context) string {
+ store := ctx.KVStore(k.storeKey)
+ return string(store.Get(types.PortKey))
+}
+
+// SetPort sets the portID for the transfer module. Used in InitGenesis
+func (k Keeper) SetPort(ctx sdk.Context, portID string) {
+ store := ctx.KVStore(k.storeKey)
+ store.Set(types.PortKey, []byte(portID))
+}
+
+// GetDenomTrace retreives the full identifiers trace and base denomination from the store.
+func (k Keeper) GetDenomTrace(ctx sdk.Context, denomTraceHash tmbytes.HexBytes) (types.DenomTrace, bool) {
+ store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DenomTraceKey)
+ bz := store.Get(denomTraceHash)
+ if bz == nil {
+ return types.DenomTrace{}, false
+ }
+
+ denomTrace := k.MustUnmarshalDenomTrace(bz)
+ return denomTrace, true
+}
+
+// HasDenomTrace checks if a the key with the given denomination trace hash exists on the store.
+func (k Keeper) HasDenomTrace(ctx sdk.Context, denomTraceHash tmbytes.HexBytes) bool {
+ store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DenomTraceKey)
+ return store.Has(denomTraceHash)
+}
+
+// SetDenomTrace sets a new {trace hash -> denom trace} pair to the store.
+func (k Keeper) SetDenomTrace(ctx sdk.Context, denomTrace types.DenomTrace) {
+ store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DenomTraceKey)
+ bz := k.MustMarshalDenomTrace(denomTrace)
+ store.Set(denomTrace.Hash(), bz)
+}
+
+// GetAllDenomTraces returns the trace information for all the denominations.
+func (k Keeper) GetAllDenomTraces(ctx sdk.Context) types.Traces {
+ traces := types.Traces{}
+ k.IterateDenomTraces(ctx, func(denomTrace types.DenomTrace) bool {
+ traces = append(traces, denomTrace)
+ return false
+ })
+
+ return traces.Sort()
+}
+
+// IterateDenomTraces iterates over the denomination traces in the store
+// and performs a callback function.
+func (k Keeper) IterateDenomTraces(ctx sdk.Context, cb func(denomTrace types.DenomTrace) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, types.DenomTraceKey)
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+
+ denomTrace := k.MustUnmarshalDenomTrace(iterator.Value())
+ if cb(denomTrace) {
+ break
+ }
+ }
+}
+
+// AuthenticateCapability wraps the scopedKeeper's AuthenticateCapability function
+func (k Keeper) AuthenticateCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) bool {
+ return k.scopedKeeper.AuthenticateCapability(ctx, cap, name)
+}
+
+// ClaimCapability allows the transfer module that can claim a capability that IBC module
+// passes to it
+func (k Keeper) ClaimCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) error {
+ return k.scopedKeeper.ClaimCapability(ctx, cap, name)
+}
diff --git a/applications/transfer/keeper/keeper_test.go b/applications/transfer/keeper/keeper_test.go
new file mode 100644
index 00000000..cce9cbcc
--- /dev/null
+++ b/applications/transfer/keeper/keeper_test.go
@@ -0,0 +1,51 @@
+package keeper_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ "github.com/tendermint/tendermint/crypto"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+ chainC *ibctesting.TestChain
+
+ queryClient types.QueryClient
+}
+
+func (suite *KeeperTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(2))
+
+ queryHelper := baseapp.NewQueryServerTestHelper(suite.chainA.GetContext(), suite.chainA.App.InterfaceRegistry())
+ types.RegisterQueryServer(queryHelper, suite.chainA.App.TransferKeeper)
+ suite.queryClient = types.NewQueryClient(queryHelper)
+}
+
+func (suite *KeeperTestSuite) TestGetTransferAccount() {
+ expectedMaccAddr := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName)))
+
+ macc := suite.chainA.App.TransferKeeper.GetTransferAccount(suite.chainA.GetContext())
+
+ suite.Require().NotNil(macc)
+ suite.Require().Equal(types.ModuleName, macc.GetName())
+ suite.Require().Equal(expectedMaccAddr, macc.GetAddress())
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
diff --git a/applications/transfer/keeper/mbt_relay_test.go b/applications/transfer/keeper/mbt_relay_test.go
new file mode 100644
index 00000000..cd64fbab
--- /dev/null
+++ b/applications/transfer/keeper/mbt_relay_test.go
@@ -0,0 +1,378 @@
+package keeper_test
+
+/// This file is a test driver for model-based tests generated from the TLA+ model of token transfer
+/// Written by Andrey Kuprianov within the scope of IBC Audit performed by Informal Systems.
+/// In case of any questions please don't hesitate to contact andrey@informal.systems.
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+
+ "github.com/tendermint/tendermint/crypto"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type TlaBalance struct {
+ Address []string `json:"address"`
+ Denom []string `json:"denom"`
+ Amount int64 `json:"amount"`
+}
+
+type TlaFungibleTokenPacketData struct {
+ Sender string `json:"sender"`
+ Receiver string `json:"receiver"`
+ Amount int `json:"amount"`
+ Denom []string `json:"denom"`
+}
+
+type TlaFungibleTokenPacket struct {
+ SourceChannel string `json:"sourceChannel"`
+ SourcePort string `json:"sourcePort"`
+ DestChannel string `json:"destChannel"`
+ DestPort string `json:"destPort"`
+ Data TlaFungibleTokenPacketData `json:"data"`
+}
+
+type TlaOnRecvPacketTestCase = struct {
+ // The required subset of bank balances
+ BankBefore []TlaBalance `json:"bankBefore"`
+ // The packet to process
+ Packet TlaFungibleTokenPacket `json:"packet"`
+ // The handler to call
+ Handler string `json:"handler"`
+ // The expected changes in the bank
+ BankAfter []TlaBalance `json:"bankAfter"`
+ // Whether OnRecvPacket should fail or not
+ Error bool `json:"error"`
+}
+
+type FungibleTokenPacket struct {
+ SourceChannel string
+ SourcePort string
+ DestChannel string
+ DestPort string
+ Data types.FungibleTokenPacketData
+}
+
+type OnRecvPacketTestCase = struct {
+ description string
+ // The required subset of bank balances
+ bankBefore []Balance
+ // The packet to process
+ packet FungibleTokenPacket
+ // The handler to call
+ handler string
+ // The expected bank state after processing (wrt. bankBefore)
+ bankAfter []Balance
+ // Whether OnRecvPacket should pass or fail
+ pass bool
+}
+
+type OwnedCoin struct {
+ Address string
+ Denom string
+}
+
+type Balance struct {
+ Id string
+ Address string
+ Denom string
+ Amount sdk.Int
+}
+
+func AddressFromString(address string) string {
+ return sdk.AccAddress(crypto.AddressHash([]byte(address))).String()
+}
+
+func AddressFromTla(addr []string) string {
+ if len(addr) != 3 {
+ panic("failed to convert from TLA+ address: wrong number of address components")
+ }
+ s := ""
+ if len(addr[0]) == 0 && len(addr[1]) == 0 {
+ // simple address: id
+ s = addr[2]
+ } else if len(addr[2]) == 0 {
+ // escrow address: ics20-1\x00port/channel
+ s = fmt.Sprintf("%s\x00%s/%s", types.Version, addr[0], addr[1])
+ } else {
+ panic("failed to convert from TLA+ address: neither simple nor escrow address")
+ }
+ return s
+}
+
+func DenomFromTla(denom []string) string {
+ var i int
+ for i = 0; i+1 < len(denom) && len(denom[i]) == 0 && len(denom[i+1]) == 0; i += 2 {
+ // skip empty prefixes
+ }
+ return strings.Join(denom[i:], "/")
+}
+
+func BalanceFromTla(balance TlaBalance) Balance {
+ return Balance{
+ Id: AddressFromTla(balance.Address),
+ Address: AddressFromString(AddressFromTla(balance.Address)),
+ Denom: DenomFromTla(balance.Denom),
+ Amount: sdk.NewInt(balance.Amount),
+ }
+}
+
+func BalancesFromTla(tla []TlaBalance) []Balance {
+ balances := make([]Balance, 0)
+ for _, b := range tla {
+ balances = append(balances, BalanceFromTla(b))
+ }
+ return balances
+}
+
+func FungibleTokenPacketFromTla(packet TlaFungibleTokenPacket) FungibleTokenPacket {
+ return FungibleTokenPacket{
+ SourceChannel: packet.SourceChannel,
+ SourcePort: packet.SourcePort,
+ DestChannel: packet.DestChannel,
+ DestPort: packet.DestPort,
+ Data: types.NewFungibleTokenPacketData(
+ DenomFromTla(packet.Data.Denom),
+ uint64(packet.Data.Amount),
+ AddressFromString(packet.Data.Sender),
+ AddressFromString(packet.Data.Receiver)),
+ }
+}
+
+func OnRecvPacketTestCaseFromTla(tc TlaOnRecvPacketTestCase) OnRecvPacketTestCase {
+ return OnRecvPacketTestCase{
+ description: "auto-generated",
+ bankBefore: BalancesFromTla(tc.BankBefore),
+ packet: FungibleTokenPacketFromTla(tc.Packet),
+ handler: tc.Handler,
+ bankAfter: BalancesFromTla(tc.BankAfter), // TODO different semantics
+ pass: !tc.Error,
+ }
+}
+
+var addressMap = make(map[string]string)
+
+type Bank struct {
+ balances map[OwnedCoin]sdk.Int
+}
+
+// Make an empty bank
+func MakeBank() Bank {
+ return Bank{balances: make(map[OwnedCoin]sdk.Int)}
+}
+
+// Subtract other bank from this bank
+func (bank *Bank) Sub(other *Bank) Bank {
+ diff := MakeBank()
+ for coin, amount := range bank.balances {
+ otherAmount, exists := other.balances[coin]
+ if exists {
+ diff.balances[coin] = amount.Sub(otherAmount)
+ } else {
+ diff.balances[coin] = amount
+ }
+ }
+ for coin, amount := range other.balances {
+ if _, exists := bank.balances[coin]; !exists {
+ diff.balances[coin] = amount.Neg()
+ }
+ }
+ return diff
+}
+
+// Set specific bank balance
+func (bank *Bank) SetBalance(address string, denom string, amount sdk.Int) {
+ bank.balances[OwnedCoin{address, denom}] = amount
+}
+
+// Set several balances at once
+func (bank *Bank) SetBalances(balances []Balance) {
+ for _, balance := range balances {
+ bank.balances[OwnedCoin{balance.Address, balance.Denom}] = balance.Amount
+ addressMap[balance.Address] = balance.Id
+ }
+}
+
+func NullCoin() OwnedCoin {
+ return OwnedCoin{
+ Address: AddressFromString(""),
+ Denom: "",
+ }
+}
+
+// Set several balances at once
+func BankFromBalances(balances []Balance) Bank {
+ bank := MakeBank()
+ for _, balance := range balances {
+ coin := OwnedCoin{balance.Address, balance.Denom}
+ if coin != NullCoin() { // ignore null coin
+ bank.balances[coin] = balance.Amount
+ addressMap[balance.Address] = balance.Id
+ }
+ }
+ return bank
+}
+
+// String representation of all bank balances
+func (bank *Bank) String() string {
+ str := ""
+ for coin, amount := range bank.balances {
+ str += coin.Address
+ if addressMap[coin.Address] != "" {
+ str += "(" + addressMap[coin.Address] + ")"
+ }
+ str += " : " + coin.Denom + " = " + amount.String() + "\n"
+ }
+ return str
+}
+
+// String representation of non-zero bank balances
+func (bank *Bank) NonZeroString() string {
+ str := ""
+ for coin, amount := range bank.balances {
+ if !amount.IsZero() {
+ str += coin.Address + " : " + coin.Denom + " = " + amount.String() + "\n"
+ }
+ }
+ return str
+}
+
+// Construct a bank out of the chain bank
+func BankOfChain(chain *ibctesting.TestChain) Bank {
+ bank := MakeBank()
+ chain.App.BankKeeper.IterateAllBalances(chain.GetContext(), func(address sdk.AccAddress, coin sdk.Coin) (stop bool) {
+ fullDenom := coin.Denom
+ if strings.HasPrefix(coin.Denom, "ibc/") {
+ fullDenom, _ = chain.App.TransferKeeper.DenomPathFromHash(chain.GetContext(), coin.Denom)
+ }
+ bank.SetBalance(address.String(), fullDenom, coin.Amount)
+ return false
+ })
+ return bank
+}
+
+// Check that the state of the bank is the bankBefore + expectedBankChange
+func (suite *KeeperTestSuite) CheckBankBalances(chain *ibctesting.TestChain, bankBefore *Bank, expectedBankChange *Bank) error {
+ bankAfter := BankOfChain(chain)
+ bankChange := bankAfter.Sub(bankBefore)
+ diff := bankChange.Sub(expectedBankChange)
+ NonZeroString := diff.NonZeroString()
+ if len(NonZeroString) != 0 {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, "Unexpected changes in the bank: \n"+NonZeroString)
+ }
+ return nil
+}
+
+func (suite *KeeperTestSuite) TestModelBasedRelay() {
+ dirname := "model_based_tests/"
+ files, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ panic(fmt.Errorf("Failed to read model-based test files: %w", err))
+ }
+ for _, file_info := range files {
+ var tlaTestCases = []TlaOnRecvPacketTestCase{}
+ if !strings.HasSuffix(file_info.Name(), ".json") {
+ continue
+ }
+ jsonBlob, err := ioutil.ReadFile(dirname + file_info.Name())
+ if err != nil {
+ panic(fmt.Errorf("Failed to read JSON test fixture: %w", err))
+ }
+ err = json.Unmarshal([]byte(jsonBlob), &tlaTestCases)
+ if err != nil {
+ panic(fmt.Errorf("Failed to parse JSON test fixture: %w", err))
+ }
+
+ suite.SetupTest()
+ _, _, connAB, connBA := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, connBC, connCB := suite.coordinator.SetupClientConnections(suite.chainB, suite.chainC, exported.Tendermint)
+ suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connAB, connBA, channeltypes.UNORDERED)
+ suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connBC, connCB, channeltypes.UNORDERED)
+
+ for i, tlaTc := range tlaTestCases {
+ tc := OnRecvPacketTestCaseFromTla(tlaTc)
+ registerDenom := func() {
+ denomTrace := types.ParseDenomTrace(tc.packet.Data.Denom)
+ traceHash := denomTrace.Hash()
+ if !suite.chainB.App.TransferKeeper.HasDenomTrace(suite.chainB.GetContext(), traceHash) {
+ suite.chainB.App.TransferKeeper.SetDenomTrace(suite.chainB.GetContext(), denomTrace)
+ }
+ }
+
+ description := file_info.Name() + " # " + strconv.Itoa(i+1)
+ suite.Run(fmt.Sprintf("Case %s", description), func() {
+ seq := uint64(1)
+ packet := channeltypes.NewPacket(tc.packet.Data.GetBytes(), seq, tc.packet.SourcePort, tc.packet.SourceChannel, tc.packet.DestPort, tc.packet.DestChannel, clienttypes.NewHeight(0, 100), 0)
+ bankBefore := BankFromBalances(tc.bankBefore)
+ realBankBefore := BankOfChain(suite.chainB)
+ // First validate the packet itself (mimics what happens when the packet is being sent and/or received)
+ err := packet.ValidateBasic()
+ if err != nil {
+ suite.Require().False(tc.pass, err.Error())
+ return
+ }
+ switch tc.handler {
+ case "SendTransfer":
+ var sender sdk.AccAddress
+ sender, err = sdk.AccAddressFromBech32(tc.packet.Data.Sender)
+ if err != nil {
+ panic("MBT failed to convert sender address")
+ }
+ registerDenom()
+ denomTrace := types.ParseDenomTrace(tc.packet.Data.Denom)
+ denom := denomTrace.IBCDenom()
+ err = sdk.ValidateDenom(denom)
+ if err == nil {
+ err = suite.chainB.App.TransferKeeper.SendTransfer(
+ suite.chainB.GetContext(),
+ tc.packet.SourcePort,
+ tc.packet.SourceChannel,
+ sdk.NewCoin(denom, sdk.NewIntFromUint64(tc.packet.Data.Amount)),
+ sender,
+ tc.packet.Data.Receiver,
+ clienttypes.NewHeight(0, 110),
+ 0)
+ }
+ case "OnRecvPacket":
+ err = suite.chainB.App.TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, tc.packet.Data)
+ case "OnTimeoutPacket":
+ registerDenom()
+ err = suite.chainB.App.TransferKeeper.OnTimeoutPacket(suite.chainB.GetContext(), packet, tc.packet.Data)
+ case "OnRecvAcknowledgementResult":
+ err = suite.chainB.App.TransferKeeper.OnAcknowledgementPacket(
+ suite.chainB.GetContext(), packet, tc.packet.Data,
+ channeltypes.NewResultAcknowledgement(nil))
+ case "OnRecvAcknowledgementError":
+ registerDenom()
+ err = suite.chainB.App.TransferKeeper.OnAcknowledgementPacket(
+ suite.chainB.GetContext(), packet, tc.packet.Data,
+ channeltypes.NewErrorAcknowledgement("MBT Error Acknowledgement"))
+ default:
+ err = fmt.Errorf("Unknown handler: %s", tc.handler)
+ }
+ if err != nil {
+ suite.Require().False(tc.pass, err.Error())
+ return
+ }
+ bankAfter := BankFromBalances(tc.bankAfter)
+ expectedBankChange := bankAfter.Sub(&bankBefore)
+ if err := suite.CheckBankBalances(suite.chainB, &realBankBefore, &expectedBankChange); err != nil {
+ suite.Require().False(tc.pass, err.Error())
+ return
+ }
+ suite.Require().True(tc.pass)
+ })
+ }
+ }
+}
diff --git a/applications/transfer/keeper/model_based_tests/Test5Packets.json b/applications/transfer/keeper/model_based_tests/Test5Packets.json
new file mode 100644
index 00000000..6ccdccc8
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/Test5Packets.json
@@ -0,0 +1,492 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a3",
+ "receiver": "a3",
+ "amount": 2,
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "ethereum-hub",
+ "sourcePort": "channel-0",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a1",
+ "receiver": "a3",
+ "amount": 1,
+ "denom": [
+ "cosmos-hub",
+ "",
+ "",
+ "",
+ "btc"
+ ]
+ }
+ },
+ "handler": "SendTransfer",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": true
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a2",
+ "receiver": "a2",
+ "amount": 4,
+ "denom": [
+ "",
+ "",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 4
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "",
+ "receiver": "a2",
+ "amount": 4,
+ "denom": [
+ "",
+ "",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 4
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 8
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "cosmos-hub",
+ "sourcePort": "bitcoin-hub",
+ "destChannel": "channel-0",
+ "destPort": "channel-1",
+ "data": {
+ "sender": "a1",
+ "receiver": "",
+ "amount": 1,
+ "denom": [
+ "transfer",
+ "channel-0",
+ "transfer",
+ "channel-0",
+ "atom"
+ ]
+ }
+ },
+ "handler": "SendTransfer",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 8
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "ethereum-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 8
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-1",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": true
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/Test5Packets.tla b/applications/transfer/keeper/model_based_tests/Test5Packets.tla
new file mode 100644
index 00000000..9691eec2
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/Test5Packets.tla
@@ -0,0 +1,1056 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 3 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "channel-0"]
+
+(* Transition 0 to State4 *)
+
+State4 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+/\ count = 2
+/\ error = TRUE
+/\ handler = "SendTransfer"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> TRUE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "channel-0"]]
+/\ p = [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a2"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 5 to State5 *)
+
+State5 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 4
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+/\ count = 3
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> TRUE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "channel-0"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 4
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a2"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 5 to State6 *)
+
+State6 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 8
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+/\ count = 4
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> TRUE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "channel-0"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 4
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a2"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 4
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 8
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 4
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "channel-1",
+ sourceChannel |-> "cosmos-hub",
+ sourcePort |-> "bitcoin-hub"]
+
+(* Transition 0 to State7 *)
+
+State7 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 8
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+/\ count = 5
+/\ error = TRUE
+/\ handler = "SendTransfer"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> TRUE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "channel-0"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 4
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a2"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 4
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 8
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 4
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 4,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 5
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 8
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 8
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "channel-1", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ error |-> TRUE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "channel-1",
+ sourceChannel |-> "cosmos-hub",
+ sourcePort |-> "bitcoin-hub"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ count >= 5
+ /\ BMC!Skolem((\E s1$2 \in DOMAIN history:
+ BMC!Skolem((\E s2$2 \in DOMAIN history:
+ ~(history[s1$2]["handler"] = history[s2$2]["handler"])))))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:52:41 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
new file mode 100644
index 00000000..6a039f3e
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
@@ -0,0 +1,612 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a3",
+ "receiver": "a2",
+ "amount": 3,
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ]
+ }
+ },
+ "handler": "OnTimeoutPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-1",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a2",
+ "receiver": "a1",
+ "amount": 3,
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvAcknowledgementError",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a1",
+ "receiver": "a2",
+ "amount": 3,
+ "denom": [
+ "",
+ "",
+ "cosmos-hub",
+ "cosmos-hub",
+ "atom"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "cosmos-hub",
+ "sourcePort": "bitcoin-hub",
+ "destChannel": "transfer",
+ "destPort": "cosmos-hub",
+ "data": {
+ "sender": "a1",
+ "receiver": "",
+ "amount": 2,
+ "denom": [
+ "",
+ "channel-0",
+ "channel-1",
+ "channel-1",
+ ""
+ ]
+ }
+ },
+ "handler": "OnRecvAcknowledgementResult",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-1",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a3",
+ "receiver": "a3",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ]
+ }
+ },
+ "handler": "SendTransfer",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 3
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "atom"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "cosmos-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 3
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 2
+ },
+ {
+ "address": [
+ "transfer",
+ "channel-1",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "eth"
+ ],
+ "amount": 1
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
new file mode 100644
index 00000000..89e6d87b
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
@@ -0,0 +1,1188 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 6 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnTimeoutPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]
+
+(* Transition 10 to State4 *)
+
+State4 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3
+/\ count = 2
+/\ error = FALSE
+/\ handler = "OnRecvAcknowledgementError"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementError",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 5 to State5 *)
+
+State5 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3
+/\ count = 3
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementError",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-1", port |-> "channel-1"],
+ prefix1 |-> [channel |-> "channel-0", port |-> ""]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "transfer",
+ destPort |-> "cosmos-hub",
+ sourceChannel |-> "cosmos-hub",
+ sourcePort |-> "bitcoin-hub"]
+
+(* Transition 12 to State6 *)
+
+State6 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3
+/\ count = 4
+/\ error = FALSE
+/\ handler = "OnRecvAcknowledgementResult"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementError",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 4
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementResult",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-1", port |-> "channel-1"],
+ prefix1 |-> [channel |-> "channel-0", port |-> ""]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "transfer",
+ destPort |-> "cosmos-hub",
+ sourceChannel |-> "cosmos-hub",
+ sourcePort |-> "bitcoin-hub"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]
+
+(* Transition 1 to State7 *)
+
+State7 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 1
+/\ count = 5
+/\ error = FALSE
+/\ handler = "SendTransfer"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementError",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 4
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementResult",
+ packet |->
+ [data |->
+ [amount |-> 2,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-1", port |-> "channel-1"],
+ prefix1 |-> [channel |-> "channel-0", port |-> ""]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "transfer",
+ destPort |-> "cosmos-hub",
+ sourceChannel |-> "cosmos-hub",
+ sourcePort |-> "bitcoin-hub"]]
+ @@ 5
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |->
+ "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 3
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "cosmos-hub", port |-> "transfer"]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "bitcoin-hub",
+ destPort |-> "ethereum-hub",
+ sourceChannel |-> "transfer",
+ sourcePort |-> "channel-1"]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ (count >= 5
+ /\ (\A s1$2 \in DOMAIN history:
+ \A s2$2 \in DOMAIN history:
+ s1$2 = s2$2 \/ ~(history[s1$2]["handler"] = history[s2$2]["handler"])))
+ /\ (\A s$2 \in DOMAIN history:
+ s$2 <= 0
+ \/ (history[s$2]["error"] = FALSE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 12:49:42 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
new file mode 100644
index 00000000..f1f55321
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
@@ -0,0 +1,58 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "",
+ "sourcePort": "",
+ "destChannel": "",
+ "destPort": "",
+ "data": {
+ "sender": "a1",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "cosmos-hub",
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvAcknowledgementError",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "error": true
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
new file mode 100644
index 00000000..583b3211
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
@@ -0,0 +1,159 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* Transition 7 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 1
+/\ error = TRUE
+/\ handler = "OnRecvAcknowledgementError"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> TRUE,
+ handler |-> "OnRecvAcknowledgementError",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnRecvAcknowledgementError"
+ /\ history[s$2]["error"] = TRUE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:15:18 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
new file mode 100644
index 00000000..3fbfe7fd
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
@@ -0,0 +1,159 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "",
+ "receiver": "a1",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "channel-0",
+ "ethereum-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "channel-0",
+ "ethereum-hub",
+ "btc"
+ ],
+ "amount": 1
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-1",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a1",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "transfer",
+ "channel-1",
+ "channel-0",
+ "ethereum-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvAcknowledgementError",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "channel-0",
+ "ethereum-hub",
+ "btc"
+ ],
+ "amount": 1
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "channel-0",
+ "ethereum-hub",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
new file mode 100644
index 00000000..cd43eb26
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
@@ -0,0 +1,310 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 2 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]
+
+(* Transition 11 to State4 *)
+
+State4 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 2
+/\ count = 2
+/\ error = FALSE
+/\ handler = "OnRecvAcknowledgementError"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementError",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnRecvAcknowledgementError"
+ /\ history[s$2]["error"] = FALSE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:14:33 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
new file mode 100644
index 00000000..9110a38a
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
@@ -0,0 +1,58 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "",
+ "sourcePort": "",
+ "destChannel": "",
+ "destPort": "",
+ "data": {
+ "sender": "a1",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "cosmos-hub",
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvAcknowledgementResult",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "error": true
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
new file mode 100644
index 00000000..b97ec73a
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
@@ -0,0 +1,159 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* Transition 13 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 1
+/\ error = TRUE
+/\ handler = "OnRecvAcknowledgementResult"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> TRUE,
+ handler |-> "OnRecvAcknowledgementResult",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnRecvAcknowledgementResult"
+ /\ history[s$2]["error"] = TRUE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:13:42 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
new file mode 100644
index 00000000..5215df7d
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
@@ -0,0 +1,58 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "ethereum-hub",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "ethereum-hub",
+ "data": {
+ "sender": "a1",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "cosmos-hub",
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvAcknowledgementResult",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
new file mode 100644
index 00000000..f9d049c5
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
@@ -0,0 +1,159 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "ethereum-hub",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "ethereum-hub",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "transfer"]
+
+(* Transition 12 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvAcknowledgementResult"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "ethereum-hub",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvAcknowledgementResult",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "ethereum-hub",
+ sourceChannel |-> "ethereum-hub",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnRecvAcknowledgementResult"
+ /\ history[s$2]["error"] = FALSE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:12:59 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
new file mode 100644
index 00000000..9a7e8c40
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
@@ -0,0 +1,58 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "",
+ "receiver": "",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ ""
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "error": true
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
new file mode 100644
index 00000000..980be28a
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
@@ -0,0 +1,159 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 3 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 1
+/\ error = TRUE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> TRUE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnRecvPacket"
+ /\ history[s$2]["error"] = TRUE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:02:31 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
new file mode 100644
index 00000000..35f94c57
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
@@ -0,0 +1,73 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "ethereum-hub",
+ "cosmos-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-0",
+ "ethereum-hub",
+ "cosmos-hub",
+ "btc"
+ ],
+ "amount": 1
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
new file mode 100644
index 00000000..342b097f
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
@@ -0,0 +1,174 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 5 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |->
+ [channel |-> "cosmos-hub", port |-> "ethereum-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnRecvPacket"
+ /\ history[s$2]["error"] = FALSE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:01:28 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
new file mode 100644
index 00000000..a78ed85c
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
@@ -0,0 +1,58 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "",
+ "sourcePort": "",
+ "destChannel": "",
+ "destPort": "",
+ "data": {
+ "sender": "a1",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "cosmos-hub",
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnTimeoutPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "error": true
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
new file mode 100644
index 00000000..1bc209d9
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
@@ -0,0 +1,159 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* Transition 6 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 1
+/\ error = TRUE
+/\ handler = "OnTimeoutPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> TRUE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"],
+ prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]],
+ receiver |-> "a2",
+ sender |-> "a1"],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnTimeoutPacket"
+ /\ history[s$2]["error"] = TRUE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:09:25 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
new file mode 100644
index 00000000..3136aace
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
@@ -0,0 +1,159 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a3",
+ "receiver": "a1",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "bitcoin-hub",
+ "transfer",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "bitcoin-hub",
+ "transfer",
+ "btc"
+ ],
+ "amount": 1
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-1",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a1",
+ "receiver": "",
+ "amount": 1,
+ "denom": [
+ "transfer",
+ "channel-1",
+ "bitcoin-hub",
+ "transfer",
+ "btc"
+ ]
+ }
+ },
+ "handler": "OnTimeoutPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "bitcoin-hub",
+ "transfer",
+ "btc"
+ ],
+ "amount": 1
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "transfer",
+ "channel-1",
+ "bitcoin-hub",
+ "transfer",
+ "btc"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
new file mode 100644
index 00000000..5dc5a994
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
@@ -0,0 +1,310 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 2 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]
+
+(* Transition 10 to State4 *)
+
+State4 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 2
+/\ count = 2
+/\ error = FALSE
+/\ handler = "OnTimeoutPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]]
+ >>
+ :> 1,
+ error |-> FALSE,
+ handler |-> "OnTimeoutPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]],
+ receiver |-> "",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "OnTimeoutPacket"
+ /\ history[s$2]["error"] = FALSE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:07:37 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json
new file mode 100644
index 00000000..01d589d8
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json
@@ -0,0 +1,58 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "",
+ "receiver": "",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ }
+ },
+ "handler": "SendTransfer",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "error": true
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla
new file mode 100644
index 00000000..dc3a1c00
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla
@@ -0,0 +1,159 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 0 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 1
+/\ error = TRUE
+/\ handler = "SendTransfer"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> TRUE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "SendTransfer"
+ /\ history[s$2]["error"] = TRUE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 11:00:34 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json
new file mode 100644
index 00000000..452d2b3a
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json
@@ -0,0 +1,174 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a3",
+ "receiver": "a2",
+ "amount": 1,
+ "denom": [
+ "",
+ "",
+ "cosmos-hub",
+ "cosmos-hub",
+ "eth"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "cosmos-hub",
+ "eth"
+ ],
+ "amount": 1
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-1",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a2",
+ "receiver": "a1",
+ "amount": 1,
+ "denom": [
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "cosmos-hub",
+ "eth"
+ ]
+ }
+ },
+ "handler": "SendTransfer",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "cosmos-hub",
+ "eth"
+ ],
+ "amount": 1
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a2"
+ ],
+ "denom": [
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "cosmos-hub",
+ "eth"
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "transfer",
+ "channel-1",
+ ""
+ ],
+ "denom": [
+ "transfer",
+ "channel-0",
+ "cosmos-hub",
+ "cosmos-hub",
+ "eth"
+ ],
+ "amount": 1
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla
new file mode 100644
index 00000000..23c45c67
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla
@@ -0,0 +1,323 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 2 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]
+
+(* Transition 1 to State4 *)
+
+State4 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1
+/\ count = 2
+/\ error = FALSE
+/\ handler = "SendTransfer"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a2",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |->
+ "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]]
+ >>
+ :> 1,
+ error |-> FALSE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "eth",
+ prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a2"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "",
+ sender |-> ""],
+ destChannel |-> "",
+ destPort |-> "",
+ sourceChannel |-> "",
+ sourcePort |-> ""]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ BMC!Skolem((\E s$2 \in DOMAIN history:
+ history[s$2]["handler"] = "SendTransfer"
+ /\ history[s$2]["error"] = FALSE
+ /\ history[s$2]["packet"]["data"]["amount"] > 0))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 10:58:54 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json
new file mode 100644
index 00000000..98552207
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json
@@ -0,0 +1,305 @@
+[
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a1",
+ "receiver": "a3",
+ "amount": 5,
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ "atom"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 5
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-1",
+ "sourcePort": "transfer",
+ "destChannel": "channel-0",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a3",
+ "receiver": "a1",
+ "amount": 3,
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ]
+ }
+ },
+ "handler": "SendTransfer",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 5
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 2
+ },
+ {
+ "address": [
+ "transfer",
+ "channel-1",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 3
+ }
+ ],
+ "error": false
+ },
+ {
+ "packet": {
+ "sourceChannel": "channel-0",
+ "sourcePort": "transfer",
+ "destChannel": "channel-1",
+ "destPort": "transfer",
+ "data": {
+ "sender": "a1",
+ "receiver": "a1",
+ "amount": 1,
+ "denom": [
+ "transfer",
+ "channel-0",
+ "transfer",
+ "channel-0",
+ "atom"
+ ]
+ }
+ },
+ "handler": "OnRecvPacket",
+ "bankBefore": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 2
+ },
+ {
+ "address": [
+ "transfer",
+ "channel-1",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 3
+ }
+ ],
+ "bankAfter": [
+ {
+ "address": [
+ "",
+ "",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ],
+ "amount": 0
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a1"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 1
+ },
+ {
+ "address": [
+ "",
+ "",
+ "a3"
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 2
+ },
+ {
+ "address": [
+ "transfer",
+ "channel-1",
+ ""
+ ],
+ "denom": [
+ "",
+ "",
+ "transfer",
+ "channel-0",
+ "atom"
+ ],
+ "amount": 2
+ }
+ ],
+ "error": false
+ }
+]
\ No newline at end of file
diff --git a/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
new file mode 100644
index 00000000..e99081c1
--- /dev/null
+++ b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
@@ -0,0 +1,563 @@
+------------------------- MODULE counterexample -------------------------
+
+EXTENDS relay_tests
+
+(* Initial state *)
+
+State1 ==
+TRUE
+(* Transition 0 to State2 *)
+
+State2 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+/\ count = 0
+/\ error = FALSE
+/\ handler = ""
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 3 to State3 *)
+
+State3 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 5
+/\ count = 1
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 5,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]
+
+(* Transition 1 to State4 *)
+
+State4 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3
+/\ count = 2
+/\ error = FALSE
+/\ handler = "SendTransfer"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 5,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |->
+ "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 5,
+ error |-> FALSE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* Transition 4 to State5 *)
+
+State5 ==
+/\ bank = <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 1
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+/\ count = 3
+/\ error = FALSE
+/\ handler = "OnRecvPacket"
+/\ history = 0
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 1
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 5,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 5,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a3",
+ sender |-> "a1"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+ @@ 2
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |->
+ "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 5,
+ error |-> FALSE,
+ handler |-> "SendTransfer",
+ packet |->
+ [data |->
+ [amount |-> 3,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]],
+ receiver |-> "a1",
+ sender |-> "a3"],
+ destChannel |-> "channel-0",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-1",
+ sourcePort |-> "transfer"]]
+ @@ 3
+ :> [bankAfter |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 1
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |->
+ "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2,
+ bankBefore |->
+ <<
+ [channel |-> "", id |-> "", port |-> ""], [denom |-> "",
+ prefix0 |-> [channel |-> "", port |-> ""],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 0
+ @@ <<
+ [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 2
+ @@ <<
+ [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |->
+ "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "", port |-> ""]]
+ >>
+ :> 3,
+ error |-> FALSE,
+ handler |-> "OnRecvPacket",
+ packet |->
+ [data |->
+ [amount |-> 1,
+ denomTrace |->
+ [denom |-> "atom",
+ prefix0 |-> [channel |-> "channel-0", port |-> "transfer"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]],
+ receiver |-> "a1",
+ sender |-> "a1"],
+ destChannel |-> "channel-1",
+ destPort |-> "transfer",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]]
+/\ p = [data |->
+ [amount |-> 0,
+ denomTrace |->
+ [denom |-> "btc",
+ prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"],
+ prefix1 |-> [channel |-> "channel-0", port |-> "channel-1"]],
+ receiver |-> "a1",
+ sender |-> ""],
+ destChannel |-> "ethereum-hub",
+ destPort |-> "cosmos-hub",
+ sourceChannel |-> "channel-0",
+ sourcePort |-> "transfer"]
+
+(* The following formula holds true in the last state and violates the invariant *)
+
+InvariantViolation ==
+ history[1]["handler"] = "OnRecvPacket"
+ /\ BMC!Skolem((\E s$2 \in DOMAIN history:
+ ((IF history[s$2]["packet"]["data"]["denomTrace"]["prefix0"]
+ = [port |-> "", channel |-> ""]
+ THEN [port |-> "", channel |-> ""]
+ ELSE IF history[s$2]["packet"]["data"]["denomTrace"]["prefix1"]
+ = [port |-> "", channel |-> ""]
+ THEN history[s$2]["packet"]["data"]["denomTrace"]["prefix0"]
+ ELSE history[s$2]["packet"]["data"]["denomTrace"]["prefix1"])[
+ "port"
+ ]
+ = history[s$2]["packet"]["sourcePort"]
+ /\ (IF history[s$2]["packet"]["data"]["denomTrace"]["prefix0"]
+ = [port |-> "", channel |-> ""]
+ THEN [port |-> "", channel |-> ""]
+ ELSE IF history[s$2]["packet"]["data"]["denomTrace"]["prefix1"]
+ = [port |-> "", channel |-> ""]
+ THEN history[s$2]["packet"]["data"]["denomTrace"]["prefix0"]
+ ELSE history[s$2]["packet"]["data"]["denomTrace"]["prefix1"])[
+ "channel"
+ ]
+ = history[s$2]["packet"]["sourceChannel"])
+ /\ history[s$2]["handler"] = "OnRecvPacket"
+ /\ history[s$2]["error"] = FALSE))
+
+================================================================================
+\* Created by Apalache on Thu Dec 10 13:38:11 CET 2020
+\* https://github.com/informalsystems/apalache
diff --git a/applications/transfer/keeper/msg_server.go b/applications/transfer/keeper/msg_server.go
new file mode 100644
index 00000000..dd2999af
--- /dev/null
+++ b/applications/transfer/keeper/msg_server.go
@@ -0,0 +1,43 @@
+package keeper
+
+import (
+ "context"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+var _ types.MsgServer = Keeper{}
+
+// See createOutgoingPacket in spec:https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#packet-relay
+
+// Transfer defines a rpc handler method for MsgTransfer.
+func (k Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types.MsgTransferResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ sender, err := sdk.AccAddressFromBech32(msg.Sender)
+ if err != nil {
+ return nil, err
+ }
+ if err := k.SendTransfer(
+ ctx, msg.SourcePort, msg.SourceChannel, msg.Token, sender, msg.Receiver, msg.TimeoutHeight, msg.TimeoutTimestamp,
+ ); err != nil {
+ return nil, err
+ }
+
+ k.Logger(ctx).Info("IBC fungible token transfer", "token", msg.Token.Denom, "amount", msg.Token.Amount.String(), "sender", msg.Sender, "receiver", msg.Receiver)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeTransfer,
+ sdk.NewAttribute(sdk.AttributeKeySender, msg.Sender),
+ sdk.NewAttribute(types.AttributeKeyReceiver, msg.Receiver),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName),
+ ),
+ })
+
+ return &types.MsgTransferResponse{}, nil
+}
diff --git a/applications/transfer/keeper/params.go b/applications/transfer/keeper/params.go
new file mode 100644
index 00000000..39a6c5d5
--- /dev/null
+++ b/applications/transfer/keeper/params.go
@@ -0,0 +1,30 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// GetSendEnabled retrieves the send enabled boolean from the paramstore
+func (k Keeper) GetSendEnabled(ctx sdk.Context) bool {
+ var res bool
+ k.paramSpace.Get(ctx, types.KeySendEnabled, &res)
+ return res
+}
+
+// GetReceiveEnabled retrieves the receive enabled boolean from the paramstore
+func (k Keeper) GetReceiveEnabled(ctx sdk.Context) bool {
+ var res bool
+ k.paramSpace.Get(ctx, types.KeyReceiveEnabled, &res)
+ return res
+}
+
+// GetParams returns the total set of ibc-transfer parameters.
+func (k Keeper) GetParams(ctx sdk.Context) types.Params {
+ return types.NewParams(k.GetSendEnabled(ctx), k.GetReceiveEnabled(ctx))
+}
+
+// SetParams sets the total set of ibc-transfer parameters.
+func (k Keeper) SetParams(ctx sdk.Context, params types.Params) {
+ k.paramSpace.SetParamSet(ctx, ¶ms)
+}
diff --git a/applications/transfer/keeper/params_test.go b/applications/transfer/keeper/params_test.go
new file mode 100644
index 00000000..96f17ff7
--- /dev/null
+++ b/applications/transfer/keeper/params_test.go
@@ -0,0 +1,15 @@
+package keeper_test
+
+import "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+
+func (suite *KeeperTestSuite) TestParams() {
+ expParams := types.DefaultParams()
+
+ params := suite.chainA.App.TransferKeeper.GetParams(suite.chainA.GetContext())
+ suite.Require().Equal(expParams, params)
+
+ expParams.SendEnabled = false
+ suite.chainA.App.TransferKeeper.SetParams(suite.chainA.GetContext(), expParams)
+ params = suite.chainA.App.TransferKeeper.GetParams(suite.chainA.GetContext())
+ suite.Require().Equal(expParams, params)
+}
diff --git a/applications/transfer/keeper/relay.go b/applications/transfer/keeper/relay.go
new file mode 100644
index 00000000..4889014a
--- /dev/null
+++ b/applications/transfer/keeper/relay.go
@@ -0,0 +1,406 @@
+package keeper
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/armon/go-metrics"
+
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// SendTransfer handles transfer sending logic. There are 2 possible cases:
+//
+// 1. Sender chain is acting as the source zone. The coins are transferred
+// to an escrow address (i.e locked) on the sender chain and then transferred
+// to the receiving chain through IBC TAO logic. It is expected that the
+// receiving chain will mint vouchers to the receiving address.
+//
+// 2. Sender chain is acting as the sink zone. The coins (vouchers) are burned
+// on the sender chain and then transferred to the receiving chain though IBC
+// TAO logic. It is expected that the receiving chain, which had previously
+// sent the original denomination, will unescrow the fungible token and send
+// it to the receiving address.
+//
+// Another way of thinking of source and sink zones is through the token's
+// timeline. Each send to any chain other than the one it was previously
+// received from is a movement forwards in the token's timeline. This causes
+// trace to be added to the token's history and the destination port and
+// destination channel to be prefixed to the denomination. In these instances
+// the sender chain is acting as the source zone. When the token is sent back
+// to the chain it previously received from, the prefix is removed. This is
+// a backwards movement in the token's timeline and the sender chain
+// is acting as the sink zone.
+//
+// Example:
+// These steps of transfer occur: A -> B -> C -> A -> C -> B -> A
+//
+// 1. A -> B : sender chain is source zone. Denom upon receiving: 'B/denom'
+// 2. B -> C : sender chain is source zone. Denom upon receiving: 'C/B/denom'
+// 3. C -> A : sender chain is source zone. Denom upon receiving: 'A/C/B/denom'
+// 4. A -> C : sender chain is sink zone. Denom upon receiving: 'C/B/denom'
+// 5. C -> B : sender chain is sink zone. Denom upon receiving: 'B/denom'
+// 6. B -> A : sender chain is sink zone. Denom upon receiving: 'denom'
+func (k Keeper) SendTransfer(
+ ctx sdk.Context,
+ sourcePort,
+ sourceChannel string,
+ token sdk.Coin,
+ sender sdk.AccAddress,
+ receiver string,
+ timeoutHeight clienttypes.Height,
+ timeoutTimestamp uint64,
+) error {
+
+ if !k.GetSendEnabled(ctx) {
+ return types.ErrSendDisabled
+ }
+
+ sourceChannelEnd, found := k.channelKeeper.GetChannel(ctx, sourcePort, sourceChannel)
+ if !found {
+ return sdkerrors.Wrapf(channeltypes.ErrChannelNotFound, "port ID (%s) channel ID (%s)", sourcePort, sourceChannel)
+ }
+
+ destinationPort := sourceChannelEnd.GetCounterparty().GetPortID()
+ destinationChannel := sourceChannelEnd.GetCounterparty().GetChannelID()
+
+ // get the next sequence
+ sequence, found := k.channelKeeper.GetNextSequenceSend(ctx, sourcePort, sourceChannel)
+ if !found {
+ return sdkerrors.Wrapf(
+ channeltypes.ErrSequenceSendNotFound,
+ "source port: %s, source channel: %s", sourcePort, sourceChannel,
+ )
+ }
+
+ // begin createOutgoingPacket logic
+ // See spec for this logic: https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#packet-relay
+ channelCap, ok := k.scopedKeeper.GetCapability(ctx, host.ChannelCapabilityPath(sourcePort, sourceChannel))
+ if !ok {
+ return sdkerrors.Wrap(channeltypes.ErrChannelCapabilityNotFound, "module does not own channel capability")
+ }
+
+ // NOTE: denomination and hex hash correctness checked during msg.ValidateBasic
+ fullDenomPath := token.Denom
+
+ var err error
+
+ // deconstruct the token denomination into the denomination trace info
+ // to determine if the sender is the source chain
+ if strings.HasPrefix(token.Denom, "ibc/") {
+ fullDenomPath, err = k.DenomPathFromHash(ctx, token.Denom)
+ if err != nil {
+ return err
+ }
+ }
+
+ labels := []metrics.Label{
+ telemetry.NewLabel("destination-port", destinationPort),
+ telemetry.NewLabel("destination-channel", destinationChannel),
+ }
+
+ // NOTE: SendTransfer simply sends the denomination as it exists on its own
+ // chain inside the packet data. The receiving chain will perform denom
+ // prefixing as necessary.
+
+ if types.SenderChainIsSource(sourcePort, sourceChannel, fullDenomPath) {
+ labels = append(labels, telemetry.NewLabel("source", "true"))
+
+ // create the escrow address for the tokens
+ escrowAddress := types.GetEscrowAddress(sourcePort, sourceChannel)
+
+ // escrow source tokens. It fails if balance insufficient.
+ if err := k.bankKeeper.SendCoins(
+ ctx, sender, escrowAddress, sdk.NewCoins(token),
+ ); err != nil {
+ return err
+ }
+
+ } else {
+ labels = append(labels, telemetry.NewLabel("source", "false"))
+
+ // transfer the coins to the module account and burn them
+ if err := k.bankKeeper.SendCoinsFromAccountToModule(
+ ctx, sender, types.ModuleName, sdk.NewCoins(token),
+ ); err != nil {
+ return err
+ }
+
+ if err := k.bankKeeper.BurnCoins(
+ ctx, types.ModuleName, sdk.NewCoins(token),
+ ); err != nil {
+ // NOTE: should not happen as the module account was
+ // retrieved on the step above and it has enough balace
+ // to burn.
+ panic(fmt.Sprintf("cannot burn coins after a successful send to a module account: %v", err))
+ }
+ }
+
+ packetData := types.NewFungibleTokenPacketData(
+ fullDenomPath, token.Amount.Uint64(), sender.String(), receiver,
+ )
+
+ packet := channeltypes.NewPacket(
+ packetData.GetBytes(),
+ sequence,
+ sourcePort,
+ sourceChannel,
+ destinationPort,
+ destinationChannel,
+ timeoutHeight,
+ timeoutTimestamp,
+ )
+
+ if err := k.channelKeeper.SendPacket(ctx, channelCap, packet); err != nil {
+ return err
+ }
+
+ defer func() {
+ telemetry.SetGaugeWithLabels(
+ []string{"tx", "msg", "ibc", "transfer"},
+ float32(token.Amount.Int64()),
+ []metrics.Label{telemetry.NewLabel("denom", fullDenomPath)},
+ )
+
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", types.ModuleName, "send"},
+ 1,
+ labels,
+ )
+ }()
+
+ return nil
+}
+
+// OnRecvPacket processes a cross chain fungible token transfer. If the
+// sender chain is the source of minted tokens then vouchers will be minted
+// and sent to the receiving address. Otherwise if the sender chain is sending
+// back tokens this chain originally transferred to it, the tokens are
+// unescrowed and sent to the receiving address.
+func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData) error {
+ // validate packet data upon receiving
+ if err := data.ValidateBasic(); err != nil {
+ return err
+ }
+
+ if !k.GetReceiveEnabled(ctx) {
+ return types.ErrReceiveDisabled
+ }
+
+ // decode the receiver address
+ receiver, err := sdk.AccAddressFromBech32(data.Receiver)
+ if err != nil {
+ return err
+ }
+
+ labels := []metrics.Label{
+ telemetry.NewLabel("source-port", packet.GetSourcePort()),
+ telemetry.NewLabel("source-channel", packet.GetSourceChannel()),
+ }
+
+ // This is the prefix that would have been prefixed to the denomination
+ // on sender chain IF and only if the token originally came from the
+ // receiving chain.
+ //
+ // NOTE: We use SourcePort and SourceChannel here, because the counterparty
+ // chain would have prefixed with DestPort and DestChannel when originally
+ // receiving this coin as seen in the "sender chain is the source" condition.
+
+ if types.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) {
+ // sender chain is not the source, unescrow tokens
+
+ // remove prefix added by sender chain
+ voucherPrefix := types.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel())
+ unprefixedDenom := data.Denom[len(voucherPrefix):]
+
+ // coin denomination used in sending from the escrow address
+ denom := unprefixedDenom
+
+ // The denomination used to send the coins is either the native denom or the hash of the path
+ // if the denomination is not native.
+ denomTrace := types.ParseDenomTrace(unprefixedDenom)
+ if denomTrace.Path != "" {
+ denom = denomTrace.IBCDenom()
+ }
+ token := sdk.NewCoin(denom, sdk.NewIntFromUint64(data.Amount))
+
+ // unescrow tokens
+ escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel())
+ if err := k.bankKeeper.SendCoins(ctx, escrowAddress, receiver, sdk.NewCoins(token)); err != nil {
+ // NOTE: this error is only expected to occur given an unexpected bug or a malicious
+ // counterparty module. The bug may occur in bank or any part of the code that allows
+ // the escrow address to be drained. A malicious counterparty module could drain the
+ // escrow address by allowing more tokens to be sent back then were escrowed.
+ return sdkerrors.Wrap(err, "unable to unescrow tokens, this may be caused by a malicious counterparty module or a bug: please open an issue on counterparty module")
+ }
+
+ defer func() {
+ telemetry.SetGaugeWithLabels(
+ []string{"ibc", types.ModuleName, "packet", "receive"},
+ float32(data.Amount),
+ []metrics.Label{telemetry.NewLabel("denom", unprefixedDenom)},
+ )
+
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", types.ModuleName, "receive"},
+ 1,
+ append(
+ labels, telemetry.NewLabel("source", "true"),
+ ),
+ )
+ }()
+
+ return nil
+ }
+
+ // sender chain is the source, mint vouchers
+
+ // since SendPacket did not prefix the denomination, we must prefix denomination here
+ sourcePrefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())
+ // NOTE: sourcePrefix contains the trailing "/"
+ prefixedDenom := sourcePrefix + data.Denom
+
+ // construct the denomination trace from the full raw denomination
+ denomTrace := types.ParseDenomTrace(prefixedDenom)
+
+ traceHash := denomTrace.Hash()
+ if !k.HasDenomTrace(ctx, traceHash) {
+ k.SetDenomTrace(ctx, denomTrace)
+ }
+
+ voucherDenom := denomTrace.IBCDenom()
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypeDenomTrace,
+ sdk.NewAttribute(types.AttributeKeyTraceHash, traceHash.String()),
+ sdk.NewAttribute(types.AttributeKeyDenom, voucherDenom),
+ ),
+ )
+
+ voucher := sdk.NewCoin(voucherDenom, sdk.NewIntFromUint64(data.Amount))
+
+ // mint new tokens if the source of the transfer is the same chain
+ if err := k.bankKeeper.MintCoins(
+ ctx, types.ModuleName, sdk.NewCoins(voucher),
+ ); err != nil {
+ return err
+ }
+
+ // send to receiver
+ if err := k.bankKeeper.SendCoinsFromModuleToAccount(
+ ctx, types.ModuleName, receiver, sdk.NewCoins(voucher),
+ ); err != nil {
+ panic(fmt.Sprintf("unable to send coins from module to account despite previously minting coins to module account: %v", err))
+ }
+
+ defer func() {
+ telemetry.SetGaugeWithLabels(
+ []string{"ibc", types.ModuleName, "packet", "receive"},
+ float32(data.Amount),
+ []metrics.Label{telemetry.NewLabel("denom", data.Denom)},
+ )
+
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", types.ModuleName, "receive"},
+ 1,
+ append(
+ labels, telemetry.NewLabel("source", "false"),
+ ),
+ )
+ }()
+
+ return nil
+}
+
+// OnAcknowledgementPacket responds to the the success or failure of a packet
+// acknowledgement written on the receiving chain. If the acknowledgement
+// was a success then nothing occurs. If the acknowledgement failed, then
+// the sender is refunded their tokens using the refundPacketToken function.
+func (k Keeper) OnAcknowledgementPacket(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData, ack channeltypes.Acknowledgement) error {
+ switch ack.Response.(type) {
+ case *channeltypes.Acknowledgement_Error:
+ return k.refundPacketToken(ctx, packet, data)
+ default:
+ // the acknowledgement succeeded on the receiving chain so nothing
+ // needs to be executed and no error needs to be returned
+ return nil
+ }
+}
+
+// OnTimeoutPacket refunds the sender since the original packet sent was
+// never received and has been timed out.
+func (k Keeper) OnTimeoutPacket(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData) error {
+ return k.refundPacketToken(ctx, packet, data)
+}
+
+// refundPacketToken will unescrow and send back the tokens back to sender
+// if the sending chain was the source chain. Otherwise, the sent tokens
+// were burnt in the original send so new tokens are minted and sent to
+// the sending address.
+func (k Keeper) refundPacketToken(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData) error {
+ // NOTE: packet data type already checked in handler.go
+
+ // parse the denomination from the full denom path
+ trace := types.ParseDenomTrace(data.Denom)
+
+ token := sdk.NewCoin(trace.IBCDenom(), sdk.NewIntFromUint64(data.Amount))
+
+ // decode the sender address
+ sender, err := sdk.AccAddressFromBech32(data.Sender)
+ if err != nil {
+ return err
+ }
+
+ if types.SenderChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) {
+ // unescrow tokens back to sender
+ escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel())
+ if err := k.bankKeeper.SendCoins(ctx, escrowAddress, sender, sdk.NewCoins(token)); err != nil {
+ // NOTE: this error is only expected to occur given an unexpected bug or a malicious
+ // counterparty module. The bug may occur in bank or any part of the code that allows
+ // the escrow address to be drained. A malicious counterparty module could drain the
+ // escrow address by allowing more tokens to be sent back then were escrowed.
+ return sdkerrors.Wrap(err, "unable to unescrow tokens, this may be caused by a malicious counterparty module or a bug: please open an issue on counterparty module")
+ }
+
+ return nil
+ }
+
+ // mint vouchers back to sender
+ if err := k.bankKeeper.MintCoins(
+ ctx, types.ModuleName, sdk.NewCoins(token),
+ ); err != nil {
+ return err
+ }
+
+ if err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, sender, sdk.NewCoins(token)); err != nil {
+ panic(fmt.Sprintf("unable to send coins from module to account despite previously minting coins to module account: %v", err))
+ }
+
+ return nil
+}
+
+// DenomPathFromHash returns the full denomination path prefix from an ibc denom with a hash
+// component.
+func (k Keeper) DenomPathFromHash(ctx sdk.Context, denom string) (string, error) {
+ // trim the denomination prefix, by default "ibc/"
+ hexHash := denom[len(types.DenomPrefix+"/"):]
+
+ hash, err := types.ParseHexHash(hexHash)
+ if err != nil {
+ return "", sdkerrors.Wrap(types.ErrInvalidDenomForTransfer, err.Error())
+ }
+
+ denomTrace, found := k.GetDenomTrace(ctx, hash)
+ if !found {
+ return "", sdkerrors.Wrap(types.ErrTraceNotFound, hexHash)
+ }
+
+ fullDenomPath := denomTrace.GetFullDenomPath()
+ return fullDenomPath, nil
+}
diff --git a/applications/transfer/keeper/relay_model/account.tla b/applications/transfer/keeper/relay_model/account.tla
new file mode 100644
index 00000000..84d743f6
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/account.tla
@@ -0,0 +1,36 @@
+-------------------------- MODULE account ----------------------------
+
+(**
+ The accounts interface; please ignore the definition bodies.
+*)
+
+EXTENDS identifiers
+
+CONSTANT
+ AccountIds
+
+\* a non-account
+NullAccount == "NullAccount"
+
+\* All accounts
+Accounts == { NullAccount }
+
+\* Make an escrow account for the given port and channel
+MakeEscrowAccount(port, channel) == NullAccount
+
+\* Make an account from the accound id
+MakeAccount(accountId) == NullAccount
+
+\* Type constraints for accounts
+AccountTypeOK ==
+ /\ NullAccount \in Accounts
+ /\ \A p \in Identifiers, c \in Identifiers:
+ MakeEscrowAccount(p, c) \in Accounts
+ /\ \A a \in Identifiers:
+ MakeAccount(a) \in Accounts
+
+=============================================================================
+\* Modification History
+\* Last modified Thu Nov 19 18:21:10 CET 2020 by c
+\* Last modified Thu Nov 05 14:44:18 CET 2020 by andrey
+\* Created Thu Nov 05 13:22:40 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/account_record.tla b/applications/transfer/keeper/relay_model/account_record.tla
new file mode 100644
index 00000000..c7eed27a
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/account_record.tla
@@ -0,0 +1,46 @@
+-------------------------- MODULE account_record ----------------------------
+
+(**
+ The most basic implementation of accounts, which is a union of normal and escrow accounts
+ Represented via records.
+*)
+
+EXTENDS identifiers
+
+CONSTANT
+ AccountIds
+
+NullAccount == [
+ port |-> NullId,
+ channel |-> NullId,
+ id |-> NullId
+]
+
+Accounts == [
+ port: Identifiers,
+ channel: Identifiers,
+ id: AccountIds
+]
+
+MakeEscrowAccount(port, channel) == [
+ port |-> port,
+ channel |-> channel,
+ id |-> NullId
+]
+
+MakeAccount(accountId) == [
+ port |-> NullId,
+ channel |-> NullId,
+ id |-> accountId
+]
+
+
+ACCOUNT == INSTANCE account
+AccountTypeOK == ACCOUNT!AccountTypeOK
+
+
+=============================================================================
+\* Modification History
+\* Last modified Thu Nov 19 18:21:46 CET 2020 by c
+\* Last modified Thu Nov 05 14:49:10 CET 2020 by andrey
+\* Created Thu Nov 05 13:22:40 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/apalache-to-relay-test.json b/applications/transfer/keeper/relay_model/apalache-to-relay-test.json
new file mode 100644
index 00000000..c8d70a33
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/apalache-to-relay-test.json
@@ -0,0 +1,100 @@
+{
+ "description": "Transforms an Apalache counterexample into the test for ICS20 Token Transfer OnRecvPacket",
+ "usage": "jsonatr --use apalache-to-recv-test.json --in counterexample.json --out recv-test.json",
+ "input": [
+ {
+ "name": "history",
+ "description": "extract history from the last state of Apalache CE",
+ "kind": "INLINE",
+ "source": "$.declarations[-2].body.and..[?(@.eq == 'history')].arg.atat..arg.record"
+ },
+ {
+ "name": "bankRecordToBalance",
+ "description": "",
+ "kind": "INLINE",
+ "source": {
+ "address": [
+ "$.colonGreater.tuple[0]..[?(@.key.str == 'port')].value.str | unwrap",
+ "$.colonGreater.tuple[0]..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$.colonGreater.tuple[0]..[?(@.key.str == 'id')].value.str | unwrap"
+ ],
+ "denom": [
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'port')].value.str | unwrap",
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'denom')].value.str | unwrap"
+ ],
+ "amount": "$.arg | unwrap"
+ }
+ },
+ {
+ "name": "bankBefore",
+ "description": "extract bankBefore from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'bankBefore')].value.atat | unwrap | map(bankRecordToBalance)"
+ },
+ {
+ "name": "bankAfter",
+ "description": "extract bankAfter from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'bankAfter')].value.atat | unwrap | map(bankRecordToBalance)"
+ },
+ {
+ "name": "packet",
+ "description": "extract packet from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'packet')].value.record"
+ },
+ {
+ "name": "packetData",
+ "description": "extract bankAfter from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'data')].value.record"
+ },
+ {
+ "name": "packetDataDenom",
+ "description": "extract bankAfter from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'data')].value.record.[?(@.key.str == 'denomTrace')].value.record"
+ },
+ {
+ "name": "packetRecord",
+ "description": "decompose packet",
+ "kind": "INLINE",
+ "source": {
+ "sourceChannel" : "$.[?(@.key.str == 'sourceChannel')].value.str | unwrap",
+ "sourcePort" : "$.[?(@.key.str == 'sourcePort')].value.str | unwrap",
+ "destChannel" : "$.[?(@.key.str == 'destChannel')].value.str | unwrap",
+ "destPort" : "$.[?(@.key.str == 'destPort')].value.str | unwrap",
+ "data": {
+ "sender": "$packetData.[?(@.key.str == 'sender')].value.str | unwrap",
+ "receiver": "$packetData.[?(@.key.str == 'receiver')].value.str | unwrap",
+ "amount": "$packetData.[?(@.key.str == 'amount')].value | unwrap",
+ "denom": [
+ "$packetDataDenom.[?(@.key.str == 'port')].value.str | unwrap",
+ "$packetDataDenom.[?(@.key.str == 'channel')].value.str | unwrap",
+ "$packetDataDenom.[?(@.key.str == 'denom')].value.str | unwrap"
+ ]
+ }
+ }
+ },
+ {
+ "name": "handler",
+ "description": "extract handler from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'handler')].value.str"
+ },
+ {
+ "name": "historyState",
+ "description": "decompose single history state",
+ "kind": "INLINE",
+ "source": {
+ "packet": "$packet | unwrap | packetRecord",
+ "handler": "$handler | unwrap",
+ "bankBefore": "$bankBefore",
+ "bankAfter": "$bankAfter",
+ "error": "$..[?(@.key.str == 'error')].value | unwrap"
+ }
+ }
+ ],
+ "output": "$history[1:] | map(historyState)"
+}
\ No newline at end of file
diff --git a/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json b/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json
new file mode 100644
index 00000000..a2c821c4
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json
@@ -0,0 +1,104 @@
+{
+ "description": "Transforms an Apalache counterexample into the test for ICS20 Token Transfer OnRecvPacket",
+ "usage": "jsonatr --use apalache-to-recv-test.json --in counterexample.json --out recv-test.json",
+ "input": [
+ {
+ "name": "history",
+ "description": "extract history from the last state of Apalache CE",
+ "kind": "INLINE",
+ "source": "$.declarations[-2].body.and..[?(@.eq == 'history')].arg.atat..arg.record"
+ },
+ {
+ "name": "bankRecordToBalance",
+ "description": "",
+ "kind": "INLINE",
+ "source": {
+ "address": [
+ "$.colonGreater.tuple[0]..[?(@.key.str == 'port')].value.str | unwrap",
+ "$.colonGreater.tuple[0]..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$.colonGreater.tuple[0]..[?(@.key.str == 'id')].value.str | unwrap"
+ ],
+ "denom": [
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'port')].value.str | unwrap",
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'port')].value.str | unwrap",
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$.colonGreater.tuple[1]..[?(@.key.str == 'denom')].value.str | unwrap"
+ ],
+ "amount": "$.arg | unwrap"
+ }
+ },
+ {
+ "name": "bankBefore",
+ "description": "extract bankBefore from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'bankBefore')].value.atat | unwrap | map(bankRecordToBalance)"
+ },
+ {
+ "name": "bankAfter",
+ "description": "extract bankAfter from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'bankAfter')].value.atat | unwrap | map(bankRecordToBalance)"
+ },
+ {
+ "name": "packet",
+ "description": "extract packet from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'packet')].value.record"
+ },
+ {
+ "name": "packetData",
+ "description": "extract bankAfter from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'data')].value.record"
+ },
+ {
+ "name": "packetDataDenom",
+ "description": "extract bankAfter from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'data')].value.record.[?(@.key.str == 'denomTrace')].value.record"
+ },
+ {
+ "name": "packetRecord",
+ "description": "decompose packet",
+ "kind": "INLINE",
+ "source": {
+ "sourceChannel" : "$.[?(@.key.str == 'sourceChannel')].value.str | unwrap",
+ "sourcePort" : "$.[?(@.key.str == 'sourcePort')].value.str | unwrap",
+ "destChannel" : "$.[?(@.key.str == 'destChannel')].value.str | unwrap",
+ "destPort" : "$.[?(@.key.str == 'destPort')].value.str | unwrap",
+ "data": {
+ "sender": "$packetData.[?(@.key.str == 'sender')].value.str | unwrap",
+ "receiver": "$packetData.[?(@.key.str == 'receiver')].value.str | unwrap",
+ "amount": "$packetData.[?(@.key.str == 'amount')].value | unwrap",
+ "denom": [
+ "$packetDataDenom.[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'port')].value.str | unwrap",
+ "$packetDataDenom.[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$packetDataDenom.[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'port')].value.str | unwrap",
+ "$packetDataDenom.[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'channel')].value.str | unwrap",
+ "$packetDataDenom.[?(@.key.str == 'denom')].value.str | unwrap"
+ ]
+ }
+ }
+ },
+ {
+ "name": "handler",
+ "description": "extract handler from the history state",
+ "kind": "INLINE",
+ "source": "$..[?(@.key.str == 'handler')].value.str"
+ },
+ {
+ "name": "historyState",
+ "description": "decompose single history state",
+ "kind": "INLINE",
+ "source": {
+ "packet": "$packet | unwrap | packetRecord",
+ "handler": "$handler | unwrap",
+ "bankBefore": "$bankBefore",
+ "bankAfter": "$bankAfter",
+ "error": "$..[?(@.key.str == 'error')].value | unwrap"
+ }
+ }
+ ],
+ "output": "$history[1:] | map(historyState)"
+}
\ No newline at end of file
diff --git a/applications/transfer/keeper/relay_model/denom.tla b/applications/transfer/keeper/relay_model/denom.tla
new file mode 100644
index 00000000..f729e7e1
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/denom.tla
@@ -0,0 +1,50 @@
+-------------------------- MODULE denom ----------------------------
+
+(**
+ The denomination traces interface; please ignore the definition bodies.
+*)
+
+EXTENDS identifiers
+
+CONSTANT
+ Denoms
+
+\* A non-account
+NullDenomTrace == "NullDenomTrace"
+
+\* All denomination traces
+DenomTraces == {NullDenomTrace}
+
+\* Make a new denomination trace from the port/channel prefix and the basic denom
+MakeDenomTrace(port, channel, denom) == NullDenomTrace
+
+\* Get the denomination trace port
+GetPort(trace) == NullId
+
+\* Get the denomination trace port
+GetChannel(trace) == NullId
+
+\* Get the denomination trace basic denomination
+GetDenom(trace) == NullDenomTrace
+
+\* Is this denomination trace a native denomination, or is it a prefixed trace
+\* Note that those cases are exclusive, but not exhaustive
+IsNativeDenomTrace(trace) == GetPort(trace) = NullId /\ GetChannel(trace) = NullId
+IsPrefixedDenomTrace(trace) == GetPort(trace) /= NullId /\ GetChannel(trace) /= NullId
+
+DenomTypeOK ==
+ /\ NullDenomTrace \in DenomTraces
+ /\ \A p \in Identifiers, c \in Identifiers, d \in Denoms:
+ MakeDenomTrace(p, c, d) \in DenomTraces
+ /\ \A t \in DenomTraces:
+ /\ GetPort(t) \in Identifiers
+ /\ GetChannel(t) \in Identifiers
+ /\ GetDenom(t) \in DenomTraces
+
+
+
+
+=============================================================================
+\* Modification History
+\* Last modified Thu Nov 05 15:49:23 CET 2020 by andrey
+\* Created Thu Nov 05 13:22:40 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/denom_record.tla b/applications/transfer/keeper/relay_model/denom_record.tla
new file mode 100644
index 00000000..2eb0d06f
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/denom_record.tla
@@ -0,0 +1,53 @@
+-------------------------- MODULE denom_record ----------------------------
+
+(**
+ The most basic implementation of denomination traces that allows only one-step sequences
+ Represented via records
+*)
+
+EXTENDS identifiers
+
+CONSTANT
+ Denoms
+
+MaxDenomLength == 3
+
+DenomTraces == [
+ port: Identifiers,
+ channel: Identifiers,
+ denom: Denoms
+]
+
+NullDenomTrace == [
+ port |-> NullId,
+ channel |-> NullId,
+ denom |-> NullId
+]
+
+GetPort(trace) == trace.port
+GetChannel(trace) == trace.channel
+GetDenom(trace) == trace.denom
+
+IsNativeDenomTrace(trace) == GetPort(trace) = NullId /\ GetChannel(trace) = NullId /\ GetDenom(trace) /= NullId
+IsPrefixedDenomTrace(trace) == GetPort(trace) /= NullId /\ GetChannel(trace) /= NullId /\ GetDenom(trace) /= NullId
+
+ExtendDenomTrace(port, channel, trace) ==
+ IF GetPort(trace) = NullId /\ GetChannel(trace) = NullId
+ THEN
+ [
+ port |-> port,
+ channel |-> channel,
+ denom |-> trace.denom
+ ]
+ ELSE
+ NullDenomTrace
+
+
+DENOM == INSTANCE denom
+DenomTypeOK == DENOM!DenomTypeOK
+
+
+=============================================================================
+\* Modification History
+\* Last modified Thu Nov 05 16:41:47 CET 2020 by andrey
+\* Created Thu Nov 05 13:22:40 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/denom_record2.tla b/applications/transfer/keeper/relay_model/denom_record2.tla
new file mode 100644
index 00000000..a49d6c98
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/denom_record2.tla
@@ -0,0 +1,114 @@
+-------------------------- MODULE denom_record2 ----------------------------
+
+(**
+ The implementation of denomination traces that allows one- or two-step sequences
+ Represented via records
+*)
+
+EXTENDS identifiers
+
+CONSTANT
+ Denoms
+
+MaxDenomLength == 5
+
+DenomPrefixes == [
+ port: Identifiers,
+ channel: Identifiers
+]
+
+NullDenomPrefix == [
+ port |-> NullId,
+ channel |-> NullId
+]
+
+MakeDenomPrefix(port, channel) == [
+ port |-> port,
+ channel |-> channel
+]
+
+IsValidDenomPrefix(prefix) ==
+ /\ prefix.port /= NullId
+ /\ prefix.channel /= NullId
+
+DenomTraces == [
+ prefix1: DenomPrefixes, \* the most recent prefix
+ prefix0: DenomPrefixes, \* the deepest prefix
+ denom: Denoms
+]
+
+NullDenomTrace == [
+ prefix1 |-> NullDenomPrefix,
+ prefix0 |-> NullDenomPrefix,
+ denom |-> NullId
+]
+
+
+TraceLen(trace) ==
+ IF trace.prefix0 = NullDenomPrefix
+ THEN 1
+ ELSE IF trace.prefix1 = NullDenomPrefix
+ THEN 3
+ ELSE 5
+
+LatestPrefix(trace) ==
+ IF trace.prefix0 = NullDenomPrefix
+ THEN NullDenomPrefix
+ ELSE IF trace.prefix1 = NullDenomPrefix
+ THEN trace.prefix0
+ ELSE trace.prefix1
+
+
+ExtendDenomTrace(port, channel, trace) ==
+ IF trace.prefix0 = NullDenomPrefix
+ THEN [
+ prefix1 |-> NullDenomPrefix,
+ prefix0 |-> MakeDenomPrefix(port, channel),
+ denom |-> trace.denom
+ ]
+ ELSE IF trace.prefix1 = NullDenomPrefix
+ THEN [
+ prefix1 |-> MakeDenomPrefix(port, channel),
+ prefix0 |-> trace.prefix0,
+ denom |-> trace.denom
+ ]
+ ELSE NullDenomTrace \* can extend only for two steps
+
+ReduceDenomTrace(trace) ==
+ IF trace.prefix1 /= NullDenomPrefix
+ THEN [
+ prefix1 |-> NullDenomPrefix,
+ prefix0 |-> trace.prefix0,
+ denom |-> trace.denom
+ ]
+ ELSE IF trace.prefix0 /= NullDenomPrefix
+ THEN [
+ prefix1 |-> NullDenomPrefix,
+ prefix0 |-> NullDenomPrefix,
+ denom |-> trace.denom
+ ]
+ ELSE NullDenomTrace \* cannot reduce further
+
+GetPort(trace) == LatestPrefix(trace).port
+GetChannel(trace) == LatestPrefix(trace).channel
+GetDenom(trace) == trace.denom
+
+IsValidDenomTrace(trace) ==
+ /\ GetDenom(trace) /= NullId
+ /\ IF IsValidDenomPrefix(trace.prefix1)
+ THEN IsValidDenomPrefix(trace.prefix0)
+ ELSE
+ /\ trace.prefix1 = NullDenomPrefix
+ /\ (IsValidDenomPrefix(trace.prefix0) \/ trace.prefix0 = NullDenomPrefix)
+
+IsNativeDenomTrace(trace) == LatestPrefix(trace) = NullDenomPrefix /\ GetDenom(trace) /= NullId
+IsPrefixedDenomTrace(trace) == LatestPrefix(trace) /= NullDenomPrefix /\ GetDenom(trace) /= NullId
+
+DENOM == INSTANCE denom
+DenomTypeOK == DENOM!DenomTypeOK
+
+
+=============================================================================
+\* Modification History
+\* Last modified Fri Dec 04 10:38:10 CET 2020 by andrey
+\* Created Fri Dec 04 10:22:10 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/denom_sequence.tla b/applications/transfer/keeper/relay_model/denom_sequence.tla
new file mode 100644
index 00000000..29b5f4ed
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/denom_sequence.tla
@@ -0,0 +1,47 @@
+-------------------------- MODULE denom_sequence ----------------------------
+
+(**
+ The implementation of denomination traces via sequences
+*)
+
+EXTENDS Integers, Sequences, identifiers
+
+CONSTANT
+ Denoms,
+ MaxDenomLength
+
+
+a <: b == a
+AsAddress(seq) == seq <: Seq(STRING)
+
+UNROLL_DEFAULT_GenSeq == { AsAddress(<< >>) }
+UNROLL_TIMES_GenSeq == 5
+
+\* This produces denomination sequences up to the given bound
+RECURSIVE GenSeq(_)
+GenSeq(n) ==
+ IF n = 0 THEN { AsAddress(<< >>) }
+ ELSE LET Shorter == GenSeq(n-1) IN
+ { Append(s,x): x \in Identifiers, s \in Shorter } \union Shorter
+
+DenomTraces == GenSeq(MaxDenomLength)
+
+ExtendDenomTrace(port, channel, denom) == AsAddress(<>) \o denom
+
+GetPort(trace) == trace[1]
+GetChannel(trace) == trace[2]
+GetDenom(trace) == SubSeq(trace, 3, Len(trace))
+
+NullDenomTrace == AsAddress(<< >>)
+
+IsNativeDenomTrace(trace) == GetPort(trace) = NullId /\ GetChannel(trace) = NullId /\ GetDenom(trace) /= NullDenomTrace
+IsPrefixedDenomTrace(trace) == GetPort(trace) /= NullId /\ GetChannel(trace) /= NullId /\ GetDenom(trace) /= NullDenomTrace
+
+DENOM == INSTANCE denom
+DenomTypeOK == DENOM!DenomTypeOK
+
+
+=============================================================================
+\* Modification History
+\* Last modified Thu Nov 05 15:29:21 CET 2020 by andrey
+\* Created Thu Nov 05 13:22:40 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/identifiers.tla b/applications/transfer/keeper/relay_model/identifiers.tla
new file mode 100644
index 00000000..089f276d
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/identifiers.tla
@@ -0,0 +1,10 @@
+-------------------------- MODULE identifiers ----------------------------
+
+CONSTANT
+ Identifiers,
+ NullId
+
+=============================================================================
+\* Modification History
+\* Last modified Thu Nov 05 13:23:12 CET 2020 by andrey
+\* Created Thu Nov 05 13:22:40 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/relay.tla b/applications/transfer/keeper/relay_model/relay.tla
new file mode 100644
index 00000000..029df3d7
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/relay.tla
@@ -0,0 +1,278 @@
+-------------------------- MODULE relay ----------------------------
+(**
+ * A primitive model for account arithmetics and token movement
+ * of the Cosmos SDK ICS20 Token Transfer
+ * We completely abstract away many details,
+ * and want to focus on a minimal spec useful for testing
+ *
+ * We also try to make the model modular in that it uses
+ * denomination traces and accounts via abstract interfaces,
+ * outlined in denom.tla and account.tla
+ *)
+
+EXTENDS Integers, FiniteSets, Sequences, identifiers, denom_record2, account_record
+
+CONSTANT
+ MaxAmount
+
+VARIABLE
+ error,
+ bank,
+ p, \* we want to start with generating single packets,
+ handler,
+ history,
+ count
+
+Amounts == 0..MaxAmount
+
+GetSourceEscrowAccount(packet) == MakeEscrowAccount(packet.sourcePort, packet.sourceChannel)
+GetDestEscrowAccount(packet) == MakeEscrowAccount(packet.destPort, packet.destChannel)
+
+FungibleTokenPacketData == [
+ sender: AccountIds,
+ receiver: AccountIds,
+ denomTrace: DenomTraces,
+ amount: Amounts
+]
+
+Packets == [
+ \* We abstract those packet fields away
+ \* sequence: uint64
+ \* timeoutHeight: Height
+ \* timeoutTimestamp: uint64
+ sourcePort: Identifiers,
+ sourceChannel: Identifiers,
+ destPort: Identifiers,
+ destChannel: Identifiers,
+ data: FungibleTokenPacketData
+]
+
+
+IsSource(packet) ==
+ /\ GetPort(packet.data.denomTrace) = packet.sourcePort
+ /\ GetChannel(packet.data.denomTrace) = packet.sourceChannel
+
+\* This function models the port and channel checks that happen when the packet is sent
+IsValidSendChannel(packet) ==
+ /\ packet.sourcePort = "transfer"
+ /\ (packet.sourceChannel = "channel-0" \/ packet.sourceChannel = "channel-1")
+ /\ packet.destPort = "transfer"
+ /\ packet.destChannel = "channel-0"
+
+\* This function models the port and channel checks that happen when relay gets the packet
+IsValidRecvChannel(packet) ==
+ /\ packet.sourcePort = "transfer"
+ /\ packet.sourceChannel = "channel-0"
+ /\ packet.destPort = "transfer"
+ /\ (packet.destChannel = "channel-0" \/ packet.destChannel = "channel-1")
+
+
+WellFormedPacket(packet) ==
+ /\ packet.sourcePort /= NullId
+ /\ packet.sourceChannel /= NullId
+ /\ packet.destPort /= NullId
+ /\ packet.destChannel /= NullId
+
+BankWithAccount(abank, account, denom) ==
+ IF <> \in DOMAIN abank
+ THEN abank
+ ELSE [x \in DOMAIN bank \union { <> }
+ |-> IF x = <>
+ THEN 0
+ ELSE bank[x] ]
+
+IsKnownDenomTrace(trace) ==
+ \E account \in Accounts :
+ <> \in DOMAIN bank
+
+
+SendTransferPre(packet, pbank) ==
+ LET data == packet.data
+ trace == data.denomTrace
+ sender == data.sender
+ amount == data.amount
+ escrow == GetSourceEscrowAccount(packet)
+ IN
+ /\ WellFormedPacket(packet)
+ /\ IsValidSendChannel(packet)
+ /\ IsNativeDenomTrace(trace) \/ (IsValidDenomTrace(trace) /\ IsKnownDenomTrace(trace))
+ /\ data.sender /= NullId
+ /\ <> \in DOMAIN pbank
+ /\ \/ amount = 0 \* SendTrasfer actually allows for 0 amount
+ \/ <> \in DOMAIN pbank /\ bank[MakeAccount(sender), trace] >= amount
+
+SendTransferNext(packet) ==
+ LET data == packet.data IN
+ LET denom == GetDenom(data.denomTrace) IN
+ LET amount == data.amount IN
+ LET sender == data.sender IN
+ LET escrow == GetSourceEscrowAccount(packet) IN
+ LET bankwithescrow == BankWithAccount(bank, escrow, data.denomTrace) IN
+ IF SendTransferPre(packet,bankwithescrow)
+ THEN
+ /\ error' = FALSE
+ \*/\ IBCsend(chain, packet)
+ /\ IF ~IsSource(packet)
+ \* This is how the check is encoded in ICS20 and the implementation.
+ \* The meaning is "IF denom = AsAddress(NativeDenom)" because of the following argument:
+ \* observe that due to the disjunction in SendTransferPre(packet), we have
+ \* ~IsSource(packet) /\ SendTransferPre(packet) => denom = AsAddress(NativeDenom)
+ THEN
+ \* tokens are from this chain
+ \* transfer tokens from sender into escrow account
+ bank' = [bankwithescrow EXCEPT ![MakeAccount(sender), data.denomTrace] = @ - amount,
+ ![escrow, data.denomTrace] = @ + amount]
+ ELSE
+ \* tokens are from other chain. We forward them.
+ \* burn sender's money
+ bank' = [bankwithescrow EXCEPT ![MakeAccount(sender), data.denomTrace] = @ - amount]
+ ELSE
+ /\ error' = TRUE
+ /\ UNCHANGED bank
+
+
+OnRecvPacketPre(packet) ==
+ LET data == packet.data
+ trace == data.denomTrace
+ denom == GetDenom(trace)
+ amount == data.amount
+ IN
+ /\ WellFormedPacket(packet)
+ /\ IsValidRecvChannel(packet)
+ /\ IsValidDenomTrace(trace)
+ /\ amount > 0
+ \* if there is no receiver account, it is created by the bank
+ /\ data.receiver /= NullId
+ /\ IsSource(packet) =>
+ LET escrow == GetDestEscrowAccount(packet) IN
+ LET denomTrace == ReduceDenomTrace(trace) IN
+ /\ <> \in DOMAIN bank
+ /\ bank[escrow, denomTrace] >= amount
+
+
+OnRecvPacketNext(packet) ==
+ LET data == packet.data IN
+ LET trace == data.denomTrace IN
+ LET denom == GetDenom(trace) IN
+ LET amount == data.amount IN
+ LET receiver == data.receiver IN
+ /\ IF OnRecvPacketPre(packet)
+ THEN
+ \* This condition is necessary so that denomination traces do not exceed the maximum length
+ /\ (IsSource(packet) \/ TraceLen(trace) < MaxDenomLength)
+ /\ error' = FALSE
+ /\ IF IsSource(packet)
+ THEN
+ \* transfer from the escrow account to the receiver account
+ LET denomTrace == ReduceDenomTrace(trace) IN
+ LET escrow == GetDestEscrowAccount(packet) IN
+ LET bankwithreceiver == BankWithAccount(bank, MakeAccount(receiver), denomTrace) IN
+ bank' = [bankwithreceiver
+ EXCEPT ![MakeAccount(receiver), denomTrace] = @ + amount,
+ ![escrow, denomTrace] = @ - amount]
+ ELSE
+ \* create new tokens with new denomination and transfer it to the receiver account
+ LET denomTrace == ExtendDenomTrace(packet.destPort, packet.destChannel, trace) IN
+ LET bankwithreceiver ==
+ BankWithAccount(bank, MakeAccount(receiver), denomTrace) IN
+ bank' = [bankwithreceiver
+ EXCEPT ![MakeAccount(receiver), denomTrace] = @ + amount]
+ ELSE
+ /\ error' = TRUE
+ /\ UNCHANGED bank
+
+
+OnTimeoutPacketPre(packet) ==
+ LET data == packet.data
+ trace == data.denomTrace
+ denom == GetDenom(trace)
+ amount == data.amount
+ IN
+ /\ WellFormedPacket(packet)
+ /\ IsValidSendChannel(packet)
+ /\ IsValidDenomTrace(trace)
+ /\ data.sender /= NullId
+ /\ ~IsSource(packet) =>
+ LET escrow == GetSourceEscrowAccount(packet)
+ IN /\ <> \in DOMAIN bank
+ /\ bank[escrow, trace] >= amount
+
+
+OnTimeoutPacketNext(packet) ==
+ LET data == packet.data IN
+ LET trace == data.denomTrace IN
+ LET denom == GetDenom(data.denomTrace) IN
+ LET amount == data.amount IN
+ LET sender == data.sender IN
+ LET bankwithsender == BankWithAccount(bank, MakeAccount(sender), trace) IN
+ IF OnTimeoutPacketPre(packet)
+ THEN
+ /\ error' = FALSE
+ /\ IF ~IsSource(packet)
+ THEN
+ \* transfer from the escrow acount to the sender account
+ \* LET denomsuffix == SubSeq(denom, 3, Len(denom)) IN
+ LET escrow == GetSourceEscrowAccount(packet) IN
+ bank' = [bankwithsender
+ EXCEPT ![MakeAccount(sender), trace] = @ + amount,
+ ![escrow, trace] = @ - amount]
+ ELSE
+ \* mint back the money
+ bank' = [bankwithsender EXCEPT ![MakeAccount(sender), trace] = @ + amount]
+
+ ELSE
+ /\ error' = TRUE
+ /\ UNCHANGED bank
+
+
+OnAcknowledgementPacketResultNext(packet) ==
+ IF WellFormedPacket(packet)
+ THEN
+ /\ error' = FALSE
+ /\ UNCHANGED bank
+ ELSE
+ /\ error' = TRUE
+ /\ UNCHANGED bank
+
+
+OnAcknowledgementPacketErrorNext(packet) ==
+ OnTimeoutPacketNext(packet)
+
+Init ==
+ /\ p \in Packets
+ /\ bank = [ x \in {<>} |-> 0 ]
+ /\ count = 0
+ /\ history = [
+ n \in {0} |-> [
+ error |-> FALSE,
+ packet |-> p,
+ handler |-> "",
+ bankBefore |-> bank,
+ bankAfter |-> bank
+ ]
+ ]
+ /\ error = FALSE
+ /\ handler = ""
+
+Next ==
+ /\ p' \in Packets
+ /\ count'= count + 1
+ /\
+ \/ (SendTransferNext(p) /\ handler' = "SendTransfer")
+ \/ (OnRecvPacketNext(p) /\ handler' = "OnRecvPacket")
+ \/ (OnTimeoutPacketNext(p) /\ handler' = "OnTimeoutPacket")
+ \/ (OnAcknowledgementPacketResultNext(p) /\ handler' = "OnRecvAcknowledgementResult")
+ \/ (OnAcknowledgementPacketErrorNext(p) /\ handler' = "OnRecvAcknowledgementError")
+ /\ history' = [ n \in DOMAIN history \union {count'} |->
+ IF n = count' THEN
+ [ packet |-> p, handler |-> handler', error |-> error', bankBefore |-> bank, bankAfter |-> bank' ]
+ ELSE history[n]
+ ]
+
+=============================================================================
+\* Modification History
+\* Last modified Wed Dec 2 10:15:45 CET 2020 by andrey
+\* Last modified Fri Nov 20 12:37:38 CET 2020 by c
+\* Last modified Thu Nov 05 20:56:37 CET 2020 by andrey
+\* Last modified Fri Oct 30 21:52:38 CET 2020 by widder
+\* Created Thu Oct 29 20:45:55 CET 2020 by andrey
diff --git a/applications/transfer/keeper/relay_model/relay_tests.tla b/applications/transfer/keeper/relay_model/relay_tests.tla
new file mode 100644
index 00000000..7e757752
--- /dev/null
+++ b/applications/transfer/keeper/relay_model/relay_tests.tla
@@ -0,0 +1,96 @@
+-------------------------- MODULE relay_tests ----------------------------
+
+EXTENDS Integers, FiniteSets
+
+Identifiers == {"", "transfer", "channel-0", "channel-1", "cosmos-hub", "ethereum-hub", "bitcoin-hub"}
+NullId == ""
+MaxAmount == 5
+Denoms == {"", "atom", "eth", "btc" }
+AccountIds == {"", "a1", "a2", "a3" }
+
+VARIABLES error, bank, p, count, history, handler
+
+INSTANCE relay
+
+\************************** Tests ******************************
+
+\* Generic test for handler pass
+TestHandlerPass(handlerName) ==
+ \E s \in DOMAIN history :
+ /\ history[s].handler = handlerName
+ /\ history[s].error = FALSE
+ /\ history[s].packet.data.amount > 0
+
+\* Generic test for handler fail
+TestHandlerFail(handlerName) ==
+ \E s \in DOMAIN history :
+ /\ history[s].handler = handlerName
+ /\ history[s].error = TRUE
+ /\ history[s].packet.data.amount > 0
+
+TestSendTransferPass == TestHandlerPass("SendTransfer")
+TestSendTransferPassInv == ~TestSendTransferPass
+
+TestSendTransferFail == TestHandlerFail("SendTransfer")
+TestSendTransferFailInv == ~TestSendTransferFail
+
+TestOnRecvPacketPass == TestHandlerPass("OnRecvPacket")
+TestOnRecvPacketPassInv == ~TestOnRecvPacketPass
+
+TestOnRecvPacketFail == TestHandlerFail("OnRecvPacket")
+TestOnRecvPacketFailInv == ~TestOnRecvPacketFail
+
+TestOnTimeoutPass == TestHandlerPass("OnTimeoutPacket")
+TestOnTimeoutPassInv == ~TestOnTimeoutPass
+
+TestOnTimeoutFail == TestHandlerFail("OnTimeoutPacket")
+TestOnTimeoutFailInv == ~TestOnTimeoutFail
+
+TestOnRecvAcknowledgementResultPass == TestHandlerPass("OnRecvAcknowledgementResult")
+TestOnRecvAcknowledgementResultPassInv == ~TestOnRecvAcknowledgementResultPass
+
+TestOnRecvAcknowledgementResultFail == TestHandlerFail("OnRecvAcknowledgementResult")
+TestOnRecvAcknowledgementResultFailInv == ~TestOnRecvAcknowledgementResultFail
+
+TestOnRecvAcknowledgementErrorPass == TestHandlerPass("OnRecvAcknowledgementError")
+TestOnRecvAcknowledgementErrorPassInv == ~TestOnRecvAcknowledgementErrorPass
+
+TestOnRecvAcknowledgementErrorFail == TestHandlerFail("OnRecvAcknowledgementError")
+TestOnRecvAcknowledgementErrorFailInv == ~TestOnRecvAcknowledgementErrorFail
+
+Test5Packets ==
+ count >= 5
+
+Test5PacketsInv == ~Test5Packets
+
+Test5Packets2Different ==
+ /\ count >= 5
+ /\ \E s1, s2 \in DOMAIN history :
+ history[s1].handler /= history[s2].handler
+
+Test5Packets2DifferentInv == ~Test5Packets2Different
+
+Test5PacketsAllDifferent ==
+ /\ count >= 5
+ /\ \A s1, s2 \in DOMAIN history :
+ s1 /= s2 => history[s1].handler /= history[s2].handler
+
+Test5PacketsAllDifferentInv == ~Test5PacketsAllDifferent
+
+Test5PacketsAllDifferentPass ==
+ /\ Test5PacketsAllDifferent
+ /\ \A s \in DOMAIN history :
+ s > 0 =>
+ /\ history[s].error = FALSE
+ /\ history[s].packet.data.amount > 0
+
+Test5PacketsAllDifferentPassInv == ~Test5PacketsAllDifferentPass
+
+TestUnescrowTokens ==
+ \E s \in DOMAIN history :
+ /\ IsSource(history[s].packet)
+ /\ history[s].handler = "OnRecvPacket"
+ /\ history[s].error = FALSE
+TestUnescrowTokensInv == ~TestUnescrowTokens
+
+=============================================================================
diff --git a/applications/transfer/keeper/relay_test.go b/applications/transfer/keeper/relay_test.go
new file mode 100644
index 00000000..89058ac2
--- /dev/null
+++ b/applications/transfer/keeper/relay_test.go
@@ -0,0 +1,392 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+// test sending from chainA to chainB using both coin that orignate on
+// chainA and coin that orignate on chainB
+func (suite *KeeperTestSuite) TestSendTransfer() {
+ var (
+ amount sdk.Coin
+ channelA, channelB ibctesting.TestChannel
+ err error
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ sendFromSource bool
+ expPass bool
+ }{
+ {"successful transfer from source chain",
+ func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+ }, true, true},
+ {"successful transfer with coin from counterparty chain",
+ func() {
+ // send coin from chainA back to chainB
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ amount = types.GetTransferCoin(channelA.PortID, channelA.ID, sdk.DefaultBondDenom, 100)
+ }, false, true},
+ {"source channel not found",
+ func() {
+ // channel references wrong ID
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ channelA.ID = ibctesting.InvalidID
+ amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+ }, true, false},
+ {"next seq send not found",
+ func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ channelB = suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ // manually create channel so next seq send is never set
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, channeltypes.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, ibctesting.DefaultChannelVersion),
+ )
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+ }, true, false},
+
+ // createOutgoingPacket tests
+ // - source chain
+ {"send coin failed",
+ func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ amount = sdk.NewCoin("randomdenom", sdk.NewInt(100))
+ }, true, false},
+ // - receiving chain
+ {"send from module account failed",
+ func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ amount = types.GetTransferCoin(channelA.PortID, channelA.ID, " randomdenom", 100)
+ }, false, false},
+ {"channel capability not found",
+ func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ cap := suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+
+ // Release channel capability
+ suite.chainA.App.ScopedTransferKeeper.ReleaseCapability(suite.chainA.GetContext(), cap)
+ amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+ }, true, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ if !tc.sendFromSource {
+ // send coin from chainB to chainA
+ coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+ transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
+ err = suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg)
+ suite.Require().NoError(err) // message committed
+
+ // receive coin on chainA from chainB
+ fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String())
+ packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0)
+
+ // get proof of packet commitment from chainB
+ packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainB.QueryProof(packetKey)
+
+ recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())
+ err = suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, recvMsg)
+ suite.Require().NoError(err) // message committed
+ }
+
+ err = suite.chainA.App.TransferKeeper.SendTransfer(
+ suite.chainA.GetContext(), channelA.PortID, channelA.ID, amount,
+ suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// test receiving coin on chainB with coin that orignate on chainA and
+// coin that orignated on chainB (source). The bulk of the testing occurs
+// in the test case for loop since setup is intensive for all cases. The
+// malleate function allows for testing invalid cases.
+func (suite *KeeperTestSuite) TestOnRecvPacket() {
+ var (
+ channelA, channelB ibctesting.TestChannel
+ trace types.DenomTrace
+ amount sdk.Int
+ receiver string
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ recvIsSource bool // the receiving chain is the source of the coin originally
+ expPass bool
+ }{
+ {"success receive on source chain", func() {}, true, true},
+ {"success receive with coin from another chain as source", func() {}, false, true},
+ {"empty coin", func() {
+ trace = types.DenomTrace{}
+ amount = sdk.ZeroInt()
+ }, true, false},
+ {"invalid receiver address", func() {
+ receiver = "gaia1scqhwpgsmr6vmztaa7suurfl52my6nd2kmrudl"
+ }, true, false},
+
+ // onRecvPacket
+ // - coin from chain chainA
+ {"failure: mint zero coin", func() {
+ amount = sdk.ZeroInt()
+ }, false, false},
+
+ // - coin being sent back to original chain (chainB)
+ {"tries to unescrow more tokens than allowed", func() {
+ amount = sdk.NewInt(1000000)
+ }, true, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ clientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ receiver = suite.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate
+
+ amount = sdk.NewInt(100) // must be explicitly changed in malleate
+ seq := uint64(1)
+
+ if tc.recvIsSource {
+ // send coin from chainB to chainA, receive them, acknowledge them, and send back to chainB
+ coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+ transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
+ err := suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg)
+ suite.Require().NoError(err) // message committed
+
+ // relay send packet
+ fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String())
+ packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0)
+ ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
+ err = suite.coordinator.RelayPacket(suite.chainB, suite.chainA, clientB, clientA, packet, ack.GetBytes())
+ suite.Require().NoError(err) // relay committed
+
+ seq++
+
+ // NOTE: trace must be explicitly changed in malleate to test invalid cases
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ } else {
+ trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
+ }
+
+ // send coin from chainA to chainB
+ transferMsg := types.NewMsgTransfer(channelA.PortID, channelA.ID, sdk.NewCoin(trace.IBCDenom(), amount), suite.chainA.SenderAccount.GetAddress(), receiver, clienttypes.NewHeight(0, 110), 0)
+ err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, transferMsg)
+ suite.Require().NoError(err) // message committed
+
+ tc.malleate()
+
+ data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), receiver)
+ packet := channeltypes.NewPacket(data.GetBytes(), seq, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+
+ err = suite.chainB.App.TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, data)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestOnAcknowledgementPacket tests that successful acknowledgement is a no-op
+// and failure acknowledment leads to refund when attempting to send from chainA
+// to chainB. If sender is source than the denomination being refunded has no
+// trace.
+func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {
+ var (
+ successAck = channeltypes.NewResultAcknowledgement([]byte{byte(1)})
+ failedAck = channeltypes.NewErrorAcknowledgement("failed packet transfer")
+
+ channelA, channelB ibctesting.TestChannel
+ trace types.DenomTrace
+ amount sdk.Int
+ )
+
+ testCases := []struct {
+ msg string
+ ack channeltypes.Acknowledgement
+ malleate func()
+ success bool // success of ack
+ expPass bool
+ }{
+ {"success ack causes no-op", successAck, func() {
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelB.PortID, channelB.ID, sdk.DefaultBondDenom))
+ }, true, true},
+ {"successful refund from source chain", failedAck, func() {
+ escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
+ trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
+ coin := sdk.NewCoin(sdk.DefaultBondDenom, amount)
+
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ }, false, true},
+ {"unsuccessful refund from source", failedAck,
+ func() {
+ trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
+ }, false, false},
+ {"successful refund from with coin from external chain", failedAck,
+ func() {
+ escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ coin := sdk.NewCoin(trace.IBCDenom(), amount)
+
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ }, false, true},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+ _, _, _, _, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ amount = sdk.NewInt(100) // must be explicitly changed
+
+ tc.malleate()
+
+ data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
+ packet := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+
+ preCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+
+ err := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)
+ if tc.expPass {
+ suite.Require().NoError(err)
+ postCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+ deltaAmount := postCoin.Amount.Sub(preCoin.Amount)
+
+ if tc.success {
+ suite.Require().Equal(int64(0), deltaAmount.Int64(), "successful ack changed balance")
+ } else {
+ suite.Require().Equal(amount, deltaAmount, "failed ack did not trigger refund")
+ }
+
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestOnTimeoutPacket test private refundPacket function since it is a simple
+// wrapper over it. The actual timeout does not matter since IBC core logic
+// is not being tested. The test is timing out a send from chainA to chainB
+// so the refunds are occurring on chainA.
+func (suite *KeeperTestSuite) TestOnTimeoutPacket() {
+ var (
+ channelA, channelB ibctesting.TestChannel
+ trace types.DenomTrace
+ amount sdk.Int
+ sender string
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"successful timeout from sender as source chain",
+ func() {
+ escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
+ trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
+ coin := sdk.NewCoin(trace.IBCDenom(), amount)
+
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ }, true},
+ {"successful timeout from external chain",
+ func() {
+ escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ coin := sdk.NewCoin(trace.IBCDenom(), amount)
+
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ }, true},
+ {"no balance for coin denom",
+ func() {
+ trace = types.ParseDenomTrace("bitcoin")
+ }, false},
+ {"unescrow failed",
+ func() {
+ trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
+ }, false},
+ {"mint failed",
+ func() {
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ amount = sdk.OneInt()
+ sender = "invalid address"
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ amount = sdk.NewInt(100) // must be explicitly changed
+ sender = suite.chainA.SenderAccount.GetAddress().String()
+
+ tc.malleate()
+
+ data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), sender, suite.chainB.SenderAccount.GetAddress().String())
+ packet := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+
+ preCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+
+ err := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)
+
+ postCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+ deltaAmount := postCoin.Amount.Sub(preCoin.Amount)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(amount.Int64(), deltaAmount.Int64(), "successful timeout did not trigger refund")
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/applications/transfer/module.go b/applications/transfer/module.go
new file mode 100644
index 00000000..25290d69
--- /dev/null
+++ b/applications/transfer/module.go
@@ -0,0 +1,438 @@
+package transfer
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "math/rand"
+
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+
+ "github.com/gorilla/mux"
+ "github.com/spf13/cobra"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+var (
+ _ module.AppModule = AppModule{}
+ _ porttypes.IBCModule = AppModule{}
+ _ module.AppModuleBasic = AppModuleBasic{}
+)
+
+// AppModuleBasic is the IBC Transfer AppModuleBasic
+type AppModuleBasic struct{}
+
+// Name implements AppModuleBasic interface
+func (AppModuleBasic) Name() string {
+ return types.ModuleName
+}
+
+// RegisterLegacyAminoCodec implements AppModuleBasic interface
+func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ types.RegisterLegacyAminoCodec(cdc)
+}
+
+// RegisterInterfaces registers module concrete types into protobuf Any.
+func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+}
+
+// DefaultGenesis returns default genesis state as raw bytes for the ibc
+// transfer module.
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the ibc transfer module.
+func (AppModuleBasic) ValidateGenesis(cdc codec.JSONMarshaler, config client.TxEncodingConfig, bz json.RawMessage) error {
+ var gs types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &gs); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
+ }
+
+ return gs.Validate()
+}
+
+// RegisterRESTRoutes implements AppModuleBasic interface
+func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) {
+}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the ibc-transfer module.
+func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) {
+ types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx))
+}
+
+// GetTxCmd implements AppModuleBasic interface
+func (AppModuleBasic) GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd()
+}
+
+// GetQueryCmd implements AppModuleBasic interface
+func (AppModuleBasic) GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd()
+}
+
+// AppModule represents the AppModule for this module
+type AppModule struct {
+ AppModuleBasic
+ keeper keeper.Keeper
+}
+
+// NewAppModule creates a new 20-transfer module
+func NewAppModule(k keeper.Keeper) AppModule {
+ return AppModule{
+ keeper: k,
+ }
+}
+
+// RegisterInvariants implements the AppModule interface
+func (AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {
+ // TODO
+}
+
+// Route implements the AppModule interface
+func (am AppModule) Route() sdk.Route {
+ return sdk.NewRoute(types.RouterKey, NewHandler(am.keeper))
+}
+
+// QuerierRoute implements the AppModule interface
+func (AppModule) QuerierRoute() string {
+ return types.QuerierRoute
+}
+
+// LegacyQuerierHandler implements the AppModule interface
+func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier {
+ return nil
+}
+
+// RegisterServices registers module services.
+func (am AppModule) RegisterServices(cfg module.Configurator) {
+ types.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+ types.RegisterQueryServer(cfg.QueryServer(), am.keeper)
+}
+
+// InitGenesis performs genesis initialization for the ibc-transfer module. It returns
+// no validator updates.
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate {
+ var genesisState types.GenesisState
+ cdc.MustUnmarshalJSON(data, &genesisState)
+ am.keeper.InitGenesis(ctx, genesisState)
+ return []abci.ValidatorUpdate{}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the ibc-transfer
+// module.
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage {
+ gs := am.keeper.ExportGenesis(ctx)
+ return cdc.MustMarshalJSON(gs)
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule) ConsensusVersion() uint64 { return 1 }
+
+// BeginBlock implements the AppModule interface
+func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
+}
+
+// EndBlock implements the AppModule interface
+func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate {
+ return []abci.ValidatorUpdate{}
+}
+
+//____________________________________________________________________________
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the transfer module.
+func (AppModule) GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// ProposalContents doesn't return any content functions for governance proposals.
+func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent {
+ return nil
+}
+
+// RandomizedParams creates randomized ibc-transfer param changes for the simulator.
+func (AppModule) RandomizedParams(r *rand.Rand) []simtypes.ParamChange {
+ return simulation.ParamChanges(r)
+}
+
+// RegisterStoreDecoder registers a decoder for transfer module's types
+func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) {
+ sdr[types.StoreKey] = simulation.NewDecodeStore(am.keeper)
+}
+
+// WeightedOperations returns the all the transfer module operations with their respective weights.
+func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation {
+ return nil
+}
+
+//____________________________________________________________________________
+
+// ValidateTransferChannelParams does validation of a newly created transfer channel. A transfer
+// channel must be UNORDERED, use the correct port (by default 'transfer'), and use the current
+// supported version. Only 2^32 channels are allowed to be created.
+func ValidateTransferChannelParams(
+ ctx sdk.Context,
+ keeper keeper.Keeper,
+ order channeltypes.Order,
+ portID string,
+ channelID string,
+ version string,
+) error {
+ // NOTE: for escrow address security only 2^32 channels are allowed to be created
+ // Issue: https://github.com/cosmos/cosmos-sdk/issues/7737
+ channelSequence, err := channeltypes.ParseChannelSequence(channelID)
+ if err != nil {
+ return err
+ }
+ if channelSequence > uint64(math.MaxUint32) {
+ return sdkerrors.Wrapf(types.ErrMaxTransferChannels, "channel sequence %d is greater than max allowed transfer channels %d", channelSequence, uint64(math.MaxUint32))
+ }
+ if order != channeltypes.UNORDERED {
+ return sdkerrors.Wrapf(channeltypes.ErrInvalidChannelOrdering, "expected %s channel, got %s ", channeltypes.UNORDERED, order)
+ }
+
+ // Require portID is the portID transfer module is bound to
+ boundPort := keeper.GetPort(ctx)
+ if boundPort != portID {
+ return sdkerrors.Wrapf(porttypes.ErrInvalidPort, "invalid port: %s, expected %s", portID, boundPort)
+ }
+
+ if version != types.Version {
+ return sdkerrors.Wrapf(types.ErrInvalidVersion, "got %s, expected %s", version, types.Version)
+ }
+ return nil
+}
+
+// OnChanOpenInit implements the IBCModule interface
+func (am AppModule) OnChanOpenInit(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID string,
+ channelID string,
+ chanCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version string,
+) error {
+ if err := ValidateTransferChannelParams(ctx, am.keeper, order, portID, channelID, version); err != nil {
+ return err
+ }
+
+ // Claim channel capability passed back by IBC module
+ if err := am.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// OnChanOpenTry implements the IBCModule interface
+func (am AppModule) OnChanOpenTry(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID,
+ channelID string,
+ chanCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version,
+ counterpartyVersion string,
+) error {
+ if err := ValidateTransferChannelParams(ctx, am.keeper, order, portID, channelID, version); err != nil {
+ return err
+ }
+
+ if counterpartyVersion != types.Version {
+ return sdkerrors.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: got: %s, expected %s", counterpartyVersion, types.Version)
+ }
+
+ // Module may have already claimed capability in OnChanOpenInit in the case of crossing hellos
+ // (ie chainA and chainB both call ChanOpenInit before one of them calls ChanOpenTry)
+ // If module can already authenticate the capability then module already owns it so we don't need to claim
+ // Otherwise, module does not have channel capability and we must claim it from IBC
+ if !am.keeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) {
+ // Only claim channel capability passed back by IBC module if we do not already own it
+ if err := am.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// OnChanOpenAck implements the IBCModule interface
+func (am AppModule) OnChanOpenAck(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ counterpartyVersion string,
+) error {
+ if counterpartyVersion != types.Version {
+ return sdkerrors.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: %s, expected %s", counterpartyVersion, types.Version)
+ }
+ return nil
+}
+
+// OnChanOpenConfirm implements the IBCModule interface
+func (am AppModule) OnChanOpenConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ return nil
+}
+
+// OnChanCloseInit implements the IBCModule interface
+func (am AppModule) OnChanCloseInit(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // Disallow user-initiated channel closing for transfer channels
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel")
+}
+
+// OnChanCloseConfirm implements the IBCModule interface
+func (am AppModule) OnChanCloseConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ return nil
+}
+
+// OnRecvPacket implements the IBCModule interface
+func (am AppModule) OnRecvPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+) (*sdk.Result, []byte, error) {
+ var data types.FungibleTokenPacketData
+ if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
+ return nil, nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
+ }
+
+ acknowledgement := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
+
+ err := am.keeper.OnRecvPacket(ctx, packet, data)
+ if err != nil {
+ acknowledgement = channeltypes.NewErrorAcknowledgement(err.Error())
+ }
+
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypePacket,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName),
+ sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver),
+ sdk.NewAttribute(types.AttributeKeyDenom, data.Denom),
+ sdk.NewAttribute(types.AttributeKeyAmount, fmt.Sprintf("%d", data.Amount)),
+ sdk.NewAttribute(types.AttributeKeyAckSuccess, fmt.Sprintf("%t", err != nil)),
+ ),
+ )
+
+ // NOTE: acknowledgement will be written synchronously during IBC handler execution.
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, acknowledgement.GetBytes(), nil
+}
+
+// OnAcknowledgementPacket implements the IBCModule interface
+func (am AppModule) OnAcknowledgementPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ acknowledgement []byte,
+) (*sdk.Result, error) {
+ var ack channeltypes.Acknowledgement
+ if err := types.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %v", err)
+ }
+ var data types.FungibleTokenPacketData
+ if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
+ }
+
+ if err := am.keeper.OnAcknowledgementPacket(ctx, packet, data, ack); err != nil {
+ return nil, err
+ }
+
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypePacket,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName),
+ sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver),
+ sdk.NewAttribute(types.AttributeKeyDenom, data.Denom),
+ sdk.NewAttribute(types.AttributeKeyAmount, fmt.Sprintf("%d", data.Amount)),
+ sdk.NewAttribute(types.AttributeKeyAck, ack.String()),
+ ),
+ )
+
+ switch resp := ack.Response.(type) {
+ case *channeltypes.Acknowledgement_Result:
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypePacket,
+ sdk.NewAttribute(types.AttributeKeyAckSuccess, string(resp.Result)),
+ ),
+ )
+ case *channeltypes.Acknowledgement_Error:
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypePacket,
+ sdk.NewAttribute(types.AttributeKeyAckError, resp.Error),
+ ),
+ )
+ }
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, nil
+}
+
+// OnTimeoutPacket implements the IBCModule interface
+func (am AppModule) OnTimeoutPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+) (*sdk.Result, error) {
+ var data types.FungibleTokenPacketData
+ if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
+ }
+ // refund tokens
+ if err := am.keeper.OnTimeoutPacket(ctx, packet, data); err != nil {
+ return nil, err
+ }
+
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypeTimeout,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName),
+ sdk.NewAttribute(types.AttributeKeyRefundReceiver, data.Sender),
+ sdk.NewAttribute(types.AttributeKeyRefundDenom, data.Denom),
+ sdk.NewAttribute(types.AttributeKeyRefundAmount, fmt.Sprintf("%d", data.Amount)),
+ ),
+ )
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, nil
+}
diff --git a/applications/transfer/module_test.go b/applications/transfer/module_test.go
new file mode 100644
index 00000000..d2acfb40
--- /dev/null
+++ b/applications/transfer/module_test.go
@@ -0,0 +1,246 @@
+package transfer_test
+
+import (
+ "math"
+
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *TransferTestSuite) TestOnChanOpenInit() {
+ var (
+ channel *channeltypes.Channel
+ testChannel ibctesting.TestChannel
+ connA *ibctesting.TestConnection
+ chanCap *capabilitytypes.Capability
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+
+ {
+ "success", func() {}, true,
+ },
+ {
+ "max channels reached", func() {
+ testChannel.ID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1)
+ }, false,
+ },
+ {
+ "invalid order - ORDERED", func() {
+ channel.Ordering = channeltypes.ORDERED
+ }, false,
+ },
+ {
+ "invalid port ID", func() {
+ testChannel = suite.chainA.NextTestChannel(connA, ibctesting.MockPort)
+ }, false,
+ },
+ {
+ "invalid version", func() {
+ channel.Version = "version"
+ }, false,
+ },
+ {
+ "capability already claimed", func() {
+ err := suite.chainA.App.ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(testChannel.PortID, testChannel.ID))
+ suite.Require().NoError(err)
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ counterparty := channeltypes.NewCounterparty(testChannel.PortID, testChannel.ID)
+ channel = &channeltypes.Channel{
+ State: channeltypes.INIT,
+ Ordering: channeltypes.UNORDERED,
+ Counterparty: counterparty,
+ ConnectionHops: []string{connA.ID},
+ Version: types.Version,
+ }
+
+ module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
+ suite.Require().NoError(err)
+
+ chanCap, err = suite.chainA.App.ScopedIBCKeeper.NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, testChannel.ID))
+ suite.Require().NoError(err)
+
+ cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module)
+ suite.Require().True(ok)
+
+ tc.malleate() // explicitly change fields in channel and testChannel
+
+ err = cbs.OnChanOpenInit(suite.chainA.GetContext(), channel.Ordering, channel.GetConnectionHops(),
+ testChannel.PortID, testChannel.ID, chanCap, channel.Counterparty, channel.GetVersion(),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+
+ })
+ }
+}
+
+func (suite *TransferTestSuite) TestOnChanOpenTry() {
+ var (
+ channel *channeltypes.Channel
+ testChannel ibctesting.TestChannel
+ connA *ibctesting.TestConnection
+ chanCap *capabilitytypes.Capability
+ counterpartyVersion string
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+
+ {
+ "success", func() {}, true,
+ },
+ {
+ "max channels reached", func() {
+ testChannel.ID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1)
+ }, false,
+ },
+ {
+ "capability already claimed in INIT should pass", func() {
+ err := suite.chainA.App.ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(testChannel.PortID, testChannel.ID))
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "invalid order - ORDERED", func() {
+ channel.Ordering = channeltypes.ORDERED
+ }, false,
+ },
+ {
+ "invalid port ID", func() {
+ testChannel = suite.chainA.NextTestChannel(connA, ibctesting.MockPort)
+ }, false,
+ },
+ {
+ "invalid version", func() {
+ channel.Version = "version"
+ }, false,
+ },
+ {
+ "invalid counterparty version", func() {
+ counterpartyVersion = "version"
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ counterparty := channeltypes.NewCounterparty(testChannel.PortID, testChannel.ID)
+ channel = &channeltypes.Channel{
+ State: channeltypes.TRYOPEN,
+ Ordering: channeltypes.UNORDERED,
+ Counterparty: counterparty,
+ ConnectionHops: []string{connA.ID},
+ Version: types.Version,
+ }
+ counterpartyVersion = types.Version
+
+ module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
+ suite.Require().NoError(err)
+
+ chanCap, err = suite.chainA.App.ScopedIBCKeeper.NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, testChannel.ID))
+ suite.Require().NoError(err)
+
+ cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module)
+ suite.Require().True(ok)
+
+ tc.malleate() // explicitly change fields in channel and testChannel
+
+ err = cbs.OnChanOpenTry(suite.chainA.GetContext(), channel.Ordering, channel.GetConnectionHops(),
+ testChannel.PortID, testChannel.ID, chanCap, channel.Counterparty, channel.GetVersion(), counterpartyVersion,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+
+ })
+ }
+}
+
+func (suite *TransferTestSuite) TestOnChanOpenAck() {
+ var (
+ testChannel ibctesting.TestChannel
+ connA *ibctesting.TestConnection
+ counterpartyVersion string
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+
+ {
+ "success", func() {}, true,
+ },
+ {
+ "invalid counterparty version", func() {
+ counterpartyVersion = "version"
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ counterpartyVersion = types.Version
+
+ module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
+ suite.Require().NoError(err)
+
+ cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module)
+ suite.Require().True(ok)
+
+ tc.malleate() // explicitly change fields in channel and testChannel
+
+ err = cbs.OnChanOpenAck(suite.chainA.GetContext(), testChannel.PortID, testChannel.ID, counterpartyVersion)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+
+ })
+ }
+}
diff --git a/applications/transfer/simulation/decoder.go b/applications/transfer/simulation/decoder.go
new file mode 100644
index 00000000..df783450
--- /dev/null
+++ b/applications/transfer/simulation/decoder.go
@@ -0,0 +1,33 @@
+package simulation
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// TransferUnmarshaler defines the expected encoding store functions.
+type TransferUnmarshaler interface {
+ MustUnmarshalDenomTrace([]byte) types.DenomTrace
+}
+
+// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
+// Value to the corresponding DenomTrace type.
+func NewDecodeStore(cdc TransferUnmarshaler) func(kvA, kvB kv.Pair) string {
+ return func(kvA, kvB kv.Pair) string {
+ switch {
+ case bytes.Equal(kvA.Key[:1], types.PortKey):
+ return fmt.Sprintf("Port A: %s\nPort B: %s", string(kvA.Value), string(kvB.Value))
+
+ case bytes.Equal(kvA.Key[:1], types.DenomTraceKey):
+ denomTraceA := cdc.MustUnmarshalDenomTrace(kvA.Value)
+ denomTraceB := cdc.MustUnmarshalDenomTrace(kvB.Value)
+ return fmt.Sprintf("DenomTrace A: %s\nDenomTrace B: %s", denomTraceA.IBCDenom(), denomTraceB.IBCDenom())
+
+ default:
+ panic(fmt.Sprintf("invalid %s key prefix %X", types.ModuleName, kvA.Key[:1]))
+ }
+ }
+}
diff --git a/applications/transfer/simulation/decoder_test.go b/applications/transfer/simulation/decoder_test.go
new file mode 100644
index 00000000..729a067e
--- /dev/null
+++ b/applications/transfer/simulation/decoder_test.go
@@ -0,0 +1,59 @@
+package simulation_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+func TestDecodeStore(t *testing.T) {
+ app := simapp.Setup(false)
+ dec := simulation.NewDecodeStore(app.TransferKeeper)
+
+ trace := types.DenomTrace{
+ BaseDenom: "uatom",
+ Path: "transfer/channelToA",
+ }
+
+ kvPairs := kv.Pairs{
+ Pairs: []kv.Pair{
+ {
+ Key: types.PortKey,
+ Value: []byte(types.PortID),
+ },
+ {
+ Key: types.DenomTraceKey,
+ Value: app.TransferKeeper.MustMarshalDenomTrace(trace),
+ },
+ {
+ Key: []byte{0x99},
+ Value: []byte{0x99},
+ },
+ },
+ }
+ tests := []struct {
+ name string
+ expectedLog string
+ }{
+ {"PortID", fmt.Sprintf("Port A: %s\nPort B: %s", types.PortID, types.PortID)},
+ {"DenomTrace", fmt.Sprintf("DenomTrace A: %s\nDenomTrace B: %s", trace.IBCDenom(), trace.IBCDenom())},
+ {"other", ""},
+ }
+
+ for i, tt := range tests {
+ i, tt := i, tt
+ t.Run(tt.name, func(t *testing.T) {
+ if i == len(tests)-1 {
+ require.Panics(t, func() { dec(kvPairs.Pairs[i], kvPairs.Pairs[i]) }, tt.name)
+ } else {
+ require.Equal(t, tt.expectedLog, dec(kvPairs.Pairs[i], kvPairs.Pairs[i]), tt.name)
+ }
+ })
+ }
+}
diff --git a/applications/transfer/simulation/genesis.go b/applications/transfer/simulation/genesis.go
new file mode 100644
index 00000000..a51bce9f
--- /dev/null
+++ b/applications/transfer/simulation/genesis.go
@@ -0,0 +1,54 @@
+package simulation
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "strings"
+
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// Simulation parameter constants
+const port = "port_id"
+
+// RadomEnabled randomized send or receive enabled param with 75% prob of being true.
+func RadomEnabled(r *rand.Rand) bool {
+ return r.Int63n(101) <= 75
+}
+
+// RandomizedGenState generates a random GenesisState for transfer.
+func RandomizedGenState(simState *module.SimulationState) {
+ var portID string
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, port, &portID, simState.Rand,
+ func(r *rand.Rand) { portID = strings.ToLower(simtypes.RandStringOfLength(r, 20)) },
+ )
+
+ var sendEnabled bool
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, string(types.KeySendEnabled), &sendEnabled, simState.Rand,
+ func(r *rand.Rand) { sendEnabled = RadomEnabled(r) },
+ )
+
+ var receiveEnabled bool
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, string(types.KeyReceiveEnabled), &receiveEnabled, simState.Rand,
+ func(r *rand.Rand) { receiveEnabled = RadomEnabled(r) },
+ )
+
+ transferGenesis := types.GenesisState{
+ PortId: portID,
+ DenomTraces: types.Traces{},
+ Params: types.NewParams(sendEnabled, receiveEnabled),
+ }
+
+ bz, err := json.MarshalIndent(&transferGenesis, "", " ")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("Selected randomly generated %s parameters:\n%s\n", types.ModuleName, bz)
+ simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&transferGenesis)
+}
diff --git a/applications/transfer/simulation/genesis_test.go b/applications/transfer/simulation/genesis_test.go
new file mode 100644
index 00000000..12791d74
--- /dev/null
+++ b/applications/transfer/simulation/genesis_test.go
@@ -0,0 +1,74 @@
+package simulation_test
+
+import (
+ "encoding/json"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState.
+// Abonormal scenarios are not tested here.
+func TestRandomizedGenState(t *testing.T) {
+ interfaceRegistry := codectypes.NewInterfaceRegistry()
+ cdc := codec.NewProtoCodec(interfaceRegistry)
+
+ s := rand.NewSource(1)
+ r := rand.New(s)
+
+ simState := module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ Rand: r,
+ NumBonded: 3,
+ Accounts: simtypes.RandomAccounts(r, 3),
+ InitialStake: 1000,
+ GenState: make(map[string]json.RawMessage),
+ }
+
+ simulation.RandomizedGenState(&simState)
+
+ var ibcTransferGenesis types.GenesisState
+ simState.Cdc.MustUnmarshalJSON(simState.GenState[types.ModuleName], &ibcTransferGenesis)
+
+ require.Equal(t, "euzxpfgkqegqiqwixnku", ibcTransferGenesis.PortId)
+ require.True(t, ibcTransferGenesis.Params.SendEnabled)
+ require.True(t, ibcTransferGenesis.Params.ReceiveEnabled)
+ require.Len(t, ibcTransferGenesis.DenomTraces, 0)
+
+}
+
+// TestRandomizedGenState tests abnormal scenarios of applying RandomizedGenState.
+func TestRandomizedGenState1(t *testing.T) {
+ interfaceRegistry := codectypes.NewInterfaceRegistry()
+ cdc := codec.NewProtoCodec(interfaceRegistry)
+
+ s := rand.NewSource(1)
+ r := rand.New(s)
+ // all these tests will panic
+ tests := []struct {
+ simState module.SimulationState
+ panicMsg string
+ }{
+ { // panic => reason: incomplete initialization of the simState
+ module.SimulationState{}, "invalid memory address or nil pointer dereference"},
+ { // panic => reason: incomplete initialization of the simState
+ module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ Rand: r,
+ }, "assignment to entry in nil map"},
+ }
+
+ for _, tt := range tests {
+ require.Panicsf(t, func() { simulation.RandomizedGenState(&tt.simState) }, tt.panicMsg)
+ }
+}
diff --git a/applications/transfer/simulation/params.go b/applications/transfer/simulation/params.go
new file mode 100644
index 00000000..67c61f51
--- /dev/null
+++ b/applications/transfer/simulation/params.go
@@ -0,0 +1,32 @@
+package simulation
+
+import (
+ "fmt"
+ "math/rand"
+
+ gogotypes "github.com/gogo/protobuf/types"
+
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// ParamChanges defines the parameters that can be modified by param change proposals
+// on the simulation
+func ParamChanges(r *rand.Rand) []simtypes.ParamChange {
+ return []simtypes.ParamChange{
+ simulation.NewSimParamChange(types.ModuleName, string(types.KeySendEnabled),
+ func(r *rand.Rand) string {
+ sendEnabled := RadomEnabled(r)
+ return fmt.Sprintf("%s", types.ModuleCdc.MustMarshalJSON(&gogotypes.BoolValue{Value: sendEnabled}))
+ },
+ ),
+ simulation.NewSimParamChange(types.ModuleName, string(types.KeyReceiveEnabled),
+ func(r *rand.Rand) string {
+ receiveEnabled := RadomEnabled(r)
+ return fmt.Sprintf("%s", types.ModuleCdc.MustMarshalJSON(&gogotypes.BoolValue{Value: receiveEnabled}))
+ },
+ ),
+ }
+}
diff --git a/applications/transfer/simulation/params_test.go b/applications/transfer/simulation/params_test.go
new file mode 100644
index 00000000..a692d432
--- /dev/null
+++ b/applications/transfer/simulation/params_test.go
@@ -0,0 +1,36 @@
+package simulation_test
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
+)
+
+func TestParamChanges(t *testing.T) {
+ s := rand.NewSource(1)
+ r := rand.New(s)
+
+ expected := []struct {
+ composedKey string
+ key string
+ simValue string
+ subspace string
+ }{
+ {"transfer/SendEnabled", "SendEnabled", "false", "transfer"},
+ {"transfer/ReceiveEnabled", "ReceiveEnabled", "true", "transfer"},
+ }
+
+ paramChanges := simulation.ParamChanges(r)
+
+ require.Len(t, paramChanges, 2)
+
+ for i, p := range paramChanges {
+ require.Equal(t, expected[i].composedKey, p.ComposedKey())
+ require.Equal(t, expected[i].key, p.Key())
+ require.Equal(t, expected[i].simValue, p.SimValue()(r), p.Key())
+ require.Equal(t, expected[i].subspace, p.Subspace())
+ }
+}
diff --git a/applications/transfer/spec/01_concepts.md b/applications/transfer/spec/01_concepts.md
new file mode 100644
index 00000000..96f05f12
--- /dev/null
+++ b/applications/transfer/spec/01_concepts.md
@@ -0,0 +1,117 @@
+
+
+# Concepts
+
+## Acknowledgements
+
+ICS20 uses the recommended acknowledgement format as specified by [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope).
+
+A successful receive of a transfer packet will result in a Result Acknowledgement being written
+with the value `[]byte(byte(1))` in the `Response` field.
+
+An unsuccessful receive of a transfer packet will result in an Error Acknowledgement being written
+with the error message in the `Response` field.
+
+## Denomination Trace
+
+The denomination trace corresponds to the information that allows a token to be traced back to its
+origin chain. It contains a sequence of port and channel identifiers ordered from the most recent to
+the oldest in the timeline of transfers.
+
+This information is included on the token denomination field in the form of a hash to prevent an
+unbounded denomination length. For example, the token `transfer/channelToA/uatom` will be displayed
+as `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2`.
+
+Each send to any chain other than the one it was previously received from is a movement forwards in
+the token's timeline. This causes trace to be added to the token's history and the destination port
+and destination channel to be prefixed to the denomination. In these instances the sender chain is
+acting as the "source zone". When the token is sent back to the chain it previously received from, the
+prefix is removed. This is a backwards movement in the token's timeline and the sender chain is
+acting as the "sink zone".
+
+It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](./../../../../../docs/architecture/adr-001-coin-source-tracing.md) to understand the implications and context of the IBC token representations.
+
+### UX suggestions for clients
+
+For clients (wallets, exchanges, applications, block explorers, etc) that want to display the source of the token, it is recommended to use the following
+alternatives for each of the cases below:
+
+#### Direct connection
+
+If the denomination trace contains a single identifier prefix pair (as in the example above), then
+the easiest way to retrieve the chain and light client identifier is to map the trace information
+directly. In summary, this requires querying the channel from the denomination trace identifiers,
+and then the counterparty client state using the counterparty port and channel identifiers from the
+retrieved channel.
+
+A general pseudo algorithm would look like the following:
+
+1. Query the full denomination trace.
+2. Query the channel with the `portID/channelID` pair, which corresponds to the first destination of the
+ token.
+3. Query the client state using the identifiers pair. Note that this query will return a `"Not
+ Found"` response if the current chain is not connected to this channel.
+4. Retrieve the the client identifier or chain identifier from the client state (eg: on
+ Tendermint clients) and store it locally.
+
+Using the gRPC gataway client service the steps above would be, with a given IBC token `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` stored on `chainB`:
+
+1. `GET /ibc_transfer/v1beta1/denom_traces/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` -> `{"path": "transfer/channelToA", "base_denom": "uatom"}`
+2. `GET /ibc/channel/v1beta1/channels/channelToA/ports/transfer/client_state"` -> `{"client_id": "clientA", "chain-id": "chainA", ...}`
+3. `GET /ibc/channel/v1beta1/channels/channelToA/ports/transfer"` -> `{"channel_id": "channelToA", port_id": "transfer", counterparty: {"channel_id": "channelToB", port_id": "transfer"}, ...}`
+4. `GET /ibc/channel/v1beta1/channels/channelToB/ports/transfer/client_state" -> {"client_id": "clientB", "chain-id": "chainB", ...}`
+
+Then, the token transfer chain path for the `uatom` denomination would be: `chainA` -> `chainB`.
+
+### Multiple hops
+
+The multiple channel hops case applies when the token has passed through multiple chains between the original source and final destination chains.
+
+The IBC protocol doesn't know the topology of the overall network (i.e connections between chains and identifier names between them). For this reason, in the the multiple hops case, a particular chain in the timeline of the individual transfers can't query the chain and client identifiers of the other chains.
+
+Take for example the following sequence of transfers `A -> B -> C` for an IBC token, with a final prefix path (trace info) of `transfer/channelChainC/transfer/channelChainB`. What the paragraph above means is that is that even in the case that chain `C` is directly connected to chain `A`, querying the port and channel identifiers that chain `B` uses to connect to chain `A` (eg: `transfer/channelChainA`) can be completely different from the one that chain `C` uses to connect to chain `A` (eg: `transfer/channelToChainA`).
+
+Thus the proposed solution for clients that the IBC team recommends are the following:
+
+- **Connect to all chains**: Connecting to all the chains in the timeline would allow clients to
+ perform the queries outlined in the [direct connection](#direct-connection) section to each
+ relevant chain. By repeatedly following the port and channel denomination trace transfer timeline,
+ clients should always be able to find all the relevant identifiers. This comes at the tradeoff
+ that the client must connect to nodes on each of the chains in order to perform the queries.
+- **Relayer as a Service (RaaS)**: A longer term solution is to use/create a relayer service that
+ could map the denomination trace to the chain path timeline for each token (i.e `origin chain ->
+ chain #1 -> ... -> chain #(n-1) -> final chain`). These services could provide merkle proofs in
+ order to allow clients to optionally verify the path timeline correctness for themselves by
+ running light clients. If the proofs are not verified, they should be considered as trusted third
+ parties services. Additionally, client would be advised in the future to use RaaS that support the
+ largest number of connections between chains in the ecosystem. Unfortunately, none of the existing
+ public relayers (in [Golang](https://github.com/cosmos/relayer) and
+ [Rust](https://github.com/informalsystems/ibc-rs)), provide this service to clients.
+
+::: tip
+The only viable alternative for clients (at the time of writing) to tokens with multiple connection hops, is to connect to all chains directly and perform relevant queries to each of them in the sequence.
+:::
+
+## Locked Funds
+
+In some [exceptional cases](./../../../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
+
+To mitigate this, a client update governance proposal can be submitted to update the frozen client
+with a new valid header. Once the proposal passes the client state will be unfrozen and the funds
+from the associated channels will then be unlocked. This mechanism only applies to clients that
+allow updates via governance, such as Tendermint clients.
+
+In addition to this, it's important to mention that a token must be sent back along the exact route
+that it took originally un order to return it to its original form on the source chain (eg: the
+Cosmos Hub for the `uatom`). Sending a token back to the same chain across a different channel will
+**not** move the token back across its timeline. If a channel in the chain history closes before the
+token can be sent back across that channel, then the token will not be returnable to its original
+form.
+
+
+## Security Considerations
+
+For safety, no other module must be capable of minting tokens with the `ibc/` prefix. The IBC
+transfer module needs a subset of the denomination space that only it can create tokens in.
diff --git a/applications/transfer/spec/02_state.md b/applications/transfer/spec/02_state.md
new file mode 100644
index 00000000..9cab8d67
--- /dev/null
+++ b/applications/transfer/spec/02_state.md
@@ -0,0 +1,10 @@
+
+
+# State
+
+The transfer IBC application module keeps state of the port to which the module is binded and the denomination trace information as outlined in [ADR 01](./../../../../../docs/architecture/adr-001-coin-source-tracing.md).
+
+- `Port`: `0x01 -> ProtocolBuffer(string)`
+- `DenomTrace`: `0x02 | []bytes(traceHash) -> ProtocolBuffer(DenomTrace)`
diff --git a/applications/transfer/spec/03_state_transitions.md b/applications/transfer/spec/03_state_transitions.md
new file mode 100644
index 00000000..9090da54
--- /dev/null
+++ b/applications/transfer/spec/03_state_transitions.md
@@ -0,0 +1,36 @@
+
+
+# State Transitions
+
+## Send Fungible Tokens
+
+A successful fungible token send has two state transitions depending if the
+transfer is a movement forward or backwards in the token's timeline:
+
+1. Sender chain is the source chain, *i.e* a transfer to any chain other than the one it was previously received from is a movement forwards in the token's timeline. This results in the following state transitions:
+
+- The coins are transferred to an escrow address (i.e locked) on the sender chain
+- The coins are transferred to the receiving chain through IBC TAO logic.
+
+2. Sender chain is the sink chain, *i.e* the token is sent back to the chain it previously received from. This is a backwards movement in the token's timeline. This results in the following state transitions:
+
+- The coins (vouchers) are burned on the sender chain
+- The coins transferred to the receiving chain though IBC TAO logic.
+
+## Receive Fungible Tokens
+
+A successful fungible token receive has two state transitions depending if the
+transfer is a movement forward or backwards in the token's timeline:
+
+1. Receiver chain is the source chain. This is a backwards movement in the token's timeline. This results in the following state transitions:
+
+- The leftmost port and channel identifier pair is removed from the token denomination prefix.
+- The tokens are unescrowed and sent to the receiving address.
+
+2. Receiver chain is the sink chain. This is a movement forwards in the token's timeline. This results in the following state transitions:
+
+- Token vouchers are minted by prefixing the destination port and channel identifiers to the trace information.
+- The receiving chain stores the new trace information in the store (if not set already).
+- The vouchers are sent to the receiving address.
diff --git a/applications/transfer/spec/04_messages.md b/applications/transfer/spec/04_messages.md
new file mode 100644
index 00000000..9da7673e
--- /dev/null
+++ b/applications/transfer/spec/04_messages.md
@@ -0,0 +1,40 @@
+
+
+# Messages
+
+## MsgTransfer
+
+A fungible token cross chain transfer is achieved by using the `MsgTransfer`:
+
+```go
+type MsgTransfer struct {
+ SourcePort string
+ SourceChannel string
+ Token sdk.Coin
+ Sender string
+ Receiver string
+ TimeoutHeight ibcexported.Height
+ TimeoutTimestamp uint64
+}
+```
+
+This message is expected to fail if:
+
+- `SourcePort` is invalid (see 24-host naming requirements)
+- `SourceChannel` is invalid (see 24-host naming requirements)
+- `Token` is invalid (denom is invalid or amount is negative)
+- `Token.Amount` is not positive
+- `Sender` is empty
+- `Receiver` is empty
+- `TimeoutHeight` and `TimeoutTimestamp` are both zero
+- `Token.Denom` is not a valid IBC denomination as per [ADR 001 - Coin Source Tracing](./../../../../../docs/architecture/adr-001-coin-source-tracing.md).
+
+This message will send a fungible token to the counterparty chain represented
+by the counterparty Channel End connected to the Channel End with the identifiers
+`SourcePort` and `SourceChannel`.
+
+The denomination provided for transfer should correspond to the same denomination
+represented on this chain. The prefixes will be added as necessary upon by the
+receiving chain.
diff --git a/applications/transfer/spec/05_events.md b/applications/transfer/spec/05_events.md
new file mode 100644
index 00000000..51b49da4
--- /dev/null
+++ b/applications/transfer/spec/05_events.md
@@ -0,0 +1,44 @@
+
+
+# Events
+
+## MsgTransfer
+
+| Type | Attribute Key | Attribute Value |
+|--------------|---------------|-----------------|
+| ibc_transfer | sender | {sender} |
+| ibc_transfer | receiver | {receiver} |
+| message | action | transfer |
+| message | module | transfer |
+
+## OnRecvPacket callback
+
+| Type | Attribute Key | Attribute Value |
+|-----------------------|---------------|-----------------|
+| fungible_token_packet | module | transfer |
+| fungible_token_packet | receiver | {receiver} |
+| fungible_token_packet | denom | {denom} |
+| fungible_token_packet | amount | {amount} |
+| fungible_token_packet | success | {ackSuccess} |
+| denomination_trace | trace_hash | {hex_hash} |
+
+## OnAcknowledgePacket callback
+
+| Type | Attribute Key | Attribute Value |
+|-----------------------|-----------------|-------------------|
+| fungible_token_packet | module | transfer |
+| fungible_token_packet | receiver | {receiver} |
+| fungible_token_packet | denom | {denom} |
+| fungible_token_packet | amount | {amount} |
+| fungible_token_packet | success | error | {ack.Response} |
+
+## OnTimeoutPacket callback
+
+| Type | Attribute Key | Attribute Value |
+|-----------------------|-----------------|-----------------|
+| fungible_token_packet | module | transfer |
+| fungible_token_packet | refund_receiver | {receiver} |
+| fungible_token_packet | denom | {denom} |
+| fungible_token_packet | amount | {amount} |
diff --git a/applications/transfer/spec/06_metrics.md b/applications/transfer/spec/06_metrics.md
new file mode 100644
index 00000000..21bb51c0
--- /dev/null
+++ b/applications/transfer/spec/06_metrics.md
@@ -0,0 +1,14 @@
+
+
+# Metrics
+
+The transfer IBC application module exposes the following set of [metrics](./../../../../../docs/core/telemetry.md).
+
+| Metric | Description | Unit | Type |
+|:--------------------------------|:------------------------------------------------------------------------------------------|:----------------|:--------|
+| `tx_msg_ibc_transfer` | The total amount of tokens transferred via IBC in a `MsgTransfer` (source or sink chain) | token | gauge |
+| `ibc_transfer_packet_receive` | The total amount of tokens received in a `FungibleTokenPacketData` (source or sink chain) | token | gauge |
+| `ibc_transfer_send` | Total number of IBC transfers sent from a chain (source or sink) | transfer | counter |
+| `ibc_transfer_receive` | Total number of IBC transfers received to a chain (source or sink) | transfer | counter |
diff --git a/applications/transfer/spec/07_params.md b/applications/transfer/spec/07_params.md
new file mode 100644
index 00000000..8d2b97c5
--- /dev/null
+++ b/applications/transfer/spec/07_params.md
@@ -0,0 +1,30 @@
+
+
+# Parameters
+
+The ibc-transfer module contains the following parameters:
+
+| Key | Type | Default Value |
+|------------------|------|---------------|
+| `SendEnabled` | bool | `true` |
+| `ReceiveEnabled` | bool | `true` |
+
+## SendEnabled
+
+The transfers enabled parameter controls send cross-chain transfer capabilities for all fungible
+tokens.
+
+To prevent a single token from being transferred from the chain, set the `SendEnabled` parameter to `true` and
+then set the bank module's [`SendEnabled` parameter](./../../../../bank/spec/05_params.md#sendenabled) for
+the denomination to `false`.
+
+## ReceiveEnabled
+
+The transfers enabled parameter controls receive cross-chain transfer capabilities for all fungible
+tokens.
+
+To prevent a single token from being transferred to the chain, set the `ReceiveEnabled` parameter to `true` and
+then set the bank module's [`SendEnabled` parameter](./../../../../bank/spec/05_params.md#sendenabled) for
+the denomination to `false`.
diff --git a/applications/transfer/spec/README.md b/applications/transfer/spec/README.md
new file mode 100644
index 00000000..5230fdde
--- /dev/null
+++ b/applications/transfer/spec/README.md
@@ -0,0 +1,24 @@
+
+
+# `ibc-transfer`
+
+## Abstract
+
+This paper defines the implementation of the ICS20 protocol on the Cosmos SDK.
+
+For the general specification please refer to the [ICS20 Specification](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer).
+
+## Contents
+
+1. **[Concepts](01_concepts.md)**
+2. **[State](02_state.md)**
+3. **[State Transitions](03_state_transitions.md)**
+4. **[Messages](04_messages.md)**
+5. **[Events](05_events.md)**
+6. **[Metrics](06_metrics.md)**
+7. **[Parameters](07_params.md)**
diff --git a/applications/transfer/types/codec.go b/applications/transfer/types/codec.go
new file mode 100644
index 00000000..24ad7e5a
--- /dev/null
+++ b/applications/transfer/types/codec.go
@@ -0,0 +1,41 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+)
+
+// RegisterLegacyAminoCodec registers the necessary x/ibc transfer interfaces and concrete types
+// on the provided LegacyAmino codec. These types are used for Amino JSON serialization.
+func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {
+ cdc.RegisterConcrete(&MsgTransfer{}, "cosmos-sdk/MsgTransfer", nil)
+}
+
+// RegisterInterfaces register the ibc transfer module interfaces to protobuf
+// Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterImplementations((*sdk.Msg)(nil), &MsgTransfer{})
+
+ msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
+}
+
+var (
+ amino = codec.NewLegacyAmino()
+
+ // ModuleCdc references the global x/ibc-transfer module codec. Note, the codec
+ // should ONLY be used in certain instances of tests and for JSON encoding.
+ //
+ // The actual codec used for serialization should be provided to x/ibc transfer and
+ // defined at the application level.
+ ModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+
+ // AminoCdc is a amino codec created to support amino json compatible msgs.
+ AminoCdc = codec.NewAminoCodec(amino)
+)
+
+func init() {
+ RegisterLegacyAminoCodec(amino)
+ amino.Seal()
+}
diff --git a/applications/transfer/types/coin.go b/applications/transfer/types/coin.go
new file mode 100644
index 00000000..08ae9a8d
--- /dev/null
+++ b/applications/transfer/types/coin.go
@@ -0,0 +1,48 @@
+package types
+
+import (
+ "fmt"
+ "strings"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// SenderChainIsSource returns false if the denomination originally came
+// from the receiving chain and true otherwise.
+func SenderChainIsSource(sourcePort, sourceChannel, denom string) bool {
+ // This is the prefix that would have been prefixed to the denomination
+ // on sender chain IF and only if the token originally came from the
+ // receiving chain.
+
+ return !ReceiverChainIsSource(sourcePort, sourceChannel, denom)
+}
+
+// ReceiverChainIsSource returns true if the denomination originally came
+// from the receiving chain and false otherwise.
+func ReceiverChainIsSource(sourcePort, sourceChannel, denom string) bool {
+ // The prefix passed in should contain the SourcePort and SourceChannel.
+ // If the receiver chain originally sent the token to the sender chain
+ // the denom will have the sender's SourcePort and SourceChannel as the
+ // prefix.
+
+ voucherPrefix := GetDenomPrefix(sourcePort, sourceChannel)
+ return strings.HasPrefix(denom, voucherPrefix)
+
+}
+
+// GetDenomPrefix returns the receiving denomination prefix
+func GetDenomPrefix(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s/", portID, channelID)
+}
+
+// GetPrefixedDenom returns the denomination with the portID and channelID prefixed
+func GetPrefixedDenom(portID, channelID, baseDenom string) string {
+ return fmt.Sprintf("%s/%s/%s", portID, channelID, baseDenom)
+}
+
+// GetTransferCoin creates a transfer coin with the port ID and channel ID
+// prefixed to the base denom.
+func GetTransferCoin(portID, channelID, baseDenom string, amount int64) sdk.Coin {
+ denomTrace := ParseDenomTrace(GetPrefixedDenom(portID, channelID, baseDenom))
+ return sdk.NewInt64Coin(denomTrace.IBCDenom(), amount)
+}
diff --git a/applications/transfer/types/errors.go b/applications/transfer/types/errors.go
new file mode 100644
index 00000000..07cba194
--- /dev/null
+++ b/applications/transfer/types/errors.go
@@ -0,0 +1,17 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// IBC channel sentinel errors
+var (
+ ErrInvalidPacketTimeout = sdkerrors.Register(ModuleName, 2, "invalid packet timeout")
+ ErrInvalidDenomForTransfer = sdkerrors.Register(ModuleName, 3, "invalid denomination for cross-chain transfer")
+ ErrInvalidVersion = sdkerrors.Register(ModuleName, 4, "invalid ICS20 version")
+ ErrInvalidAmount = sdkerrors.Register(ModuleName, 5, "invalid token amount")
+ ErrTraceNotFound = sdkerrors.Register(ModuleName, 6, "denomination trace not found")
+ ErrSendDisabled = sdkerrors.Register(ModuleName, 7, "fungible token transfers from this chain are disabled")
+ ErrReceiveDisabled = sdkerrors.Register(ModuleName, 8, "fungible token transfers to this chain are disabled")
+ ErrMaxTransferChannels = sdkerrors.Register(ModuleName, 9, "max transfer channels")
+)
diff --git a/applications/transfer/types/events.go b/applications/transfer/types/events.go
new file mode 100644
index 00000000..a3ed5b41
--- /dev/null
+++ b/applications/transfer/types/events.go
@@ -0,0 +1,21 @@
+package types
+
+// IBC transfer events
+const (
+ EventTypeTimeout = "timeout"
+ EventTypePacket = "fungible_token_packet"
+ EventTypeTransfer = "ibc_transfer"
+ EventTypeChannelClose = "channel_closed"
+ EventTypeDenomTrace = "denomination_trace"
+
+ AttributeKeyReceiver = "receiver"
+ AttributeKeyDenom = "denom"
+ AttributeKeyAmount = "amount"
+ AttributeKeyRefundReceiver = "refund_receiver"
+ AttributeKeyRefundDenom = "refund_denom"
+ AttributeKeyRefundAmount = "refund_amount"
+ AttributeKeyAckSuccess = "success"
+ AttributeKeyAck = "acknowledgement"
+ AttributeKeyAckError = "error"
+ AttributeKeyTraceHash = "trace_hash"
+)
diff --git a/applications/transfer/types/expected_keepers.go b/applications/transfer/types/expected_keepers.go
new file mode 100644
index 00000000..28446335
--- /dev/null
+++ b/applications/transfer/types/expected_keepers.go
@@ -0,0 +1,48 @@
+package types
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/types"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ ibcexported "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// AccountKeeper defines the contract required for account APIs.
+type AccountKeeper interface {
+ GetModuleAddress(name string) sdk.AccAddress
+ GetModuleAccount(ctx sdk.Context, name string) types.ModuleAccountI
+}
+
+// BankKeeper defines the expected bank keeper
+type BankKeeper interface {
+ SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error
+ MintCoins(ctx sdk.Context, moduleName string, amt sdk.Coins) error
+ BurnCoins(ctx sdk.Context, moduleName string, amt sdk.Coins) error
+ SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error
+ SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error
+}
+
+// ChannelKeeper defines the expected IBC channel keeper
+type ChannelKeeper interface {
+ GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool)
+ GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool)
+ SendPacket(ctx sdk.Context, channelCap *capabilitytypes.Capability, packet ibcexported.PacketI) error
+ ChanCloseInit(ctx sdk.Context, portID, channelID string, chanCap *capabilitytypes.Capability) error
+}
+
+// ClientKeeper defines the expected IBC client keeper
+type ClientKeeper interface {
+ GetClientConsensusState(ctx sdk.Context, clientID string) (connection ibcexported.ConsensusState, found bool)
+}
+
+// ConnectionKeeper defines the expected IBC connection keeper
+type ConnectionKeeper interface {
+ GetConnection(ctx sdk.Context, connectionID string) (connection connectiontypes.ConnectionEnd, found bool)
+}
+
+// PortKeeper defines the expected IBC port keeper
+type PortKeeper interface {
+ BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability
+}
diff --git a/applications/transfer/types/genesis.go b/applications/transfer/types/genesis.go
new file mode 100644
index 00000000..682b04c4
--- /dev/null
+++ b/applications/transfer/types/genesis.go
@@ -0,0 +1,35 @@
+package types
+
+import (
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// NewGenesisState creates a new ibc-transfer GenesisState instance.
+func NewGenesisState(portID string, denomTraces Traces, params Params) *GenesisState {
+ return &GenesisState{
+ PortId: portID,
+ DenomTraces: denomTraces,
+ Params: params,
+ }
+}
+
+// DefaultGenesisState returns a GenesisState with "transfer" as the default PortID.
+func DefaultGenesisState() *GenesisState {
+ return &GenesisState{
+ PortId: PortID,
+ DenomTraces: Traces{},
+ Params: DefaultParams(),
+ }
+}
+
+// Validate performs basic genesis state validation returning an error upon any
+// failure.
+func (gs GenesisState) Validate() error {
+ if err := host.PortIdentifierValidator(gs.PortId); err != nil {
+ return err
+ }
+ if err := gs.DenomTraces.Validate(); err != nil {
+ return err
+ }
+ return gs.Params.Validate()
+}
diff --git a/applications/transfer/types/genesis.pb.go b/applications/transfer/types/genesis.pb.go
new file mode 100644
index 00000000..3ae0442f
--- /dev/null
+++ b/applications/transfer/types/genesis.pb.go
@@ -0,0 +1,443 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibc/applications/transfer/v1/genesis.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibc-transfer genesis state
+type GenesisState struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ DenomTraces Traces `protobuf:"bytes,2,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces" yaml:"denom_traces"`
+ Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a4f788affd5bea89, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *GenesisState) GetDenomTraces() Traces {
+ if m != nil {
+ return m.DenomTraces
+ }
+ return nil
+}
+
+func (m *GenesisState) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "ibc.applications.transfer.v1.GenesisState")
+}
+
+func init() {
+ proto.RegisterFile("ibc/applications/transfer/v1/genesis.proto", fileDescriptor_a4f788affd5bea89)
+}
+
+var fileDescriptor_a4f788affd5bea89 = []byte{
+ // 317 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xca, 0x4c, 0x4a, 0xd6,
+ 0x4f, 0x2c, 0x28, 0xc8, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2f, 0x29, 0x4a,
+ 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c,
+ 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc9, 0x4c, 0x4a, 0xd6, 0x43, 0x56, 0xab, 0x07,
+ 0x53, 0xab, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xa8, 0x0f, 0x62, 0x41,
+ 0xf4, 0x48, 0x69, 0xe3, 0x35, 0x1f, 0xae, 0x1f, 0xac, 0x58, 0xe9, 0x33, 0x23, 0x17, 0x8f, 0x3b,
+ 0xc4, 0xca, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0x21, 0x6d, 0x2e, 0xf6, 0x82, 0xfc, 0xa2, 0x92, 0xf8,
+ 0xcc, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xa1, 0x4f, 0xf7, 0xe4, 0xf9, 0x2a, 0x13,
+ 0x73, 0x73, 0xac, 0x94, 0xa0, 0x12, 0x4a, 0x41, 0x6c, 0x20, 0x96, 0x67, 0x8a, 0x50, 0x11, 0x17,
+ 0x4f, 0x4a, 0x6a, 0x5e, 0x7e, 0x6e, 0x7c, 0x49, 0x51, 0x62, 0x72, 0x6a, 0xb1, 0x04, 0x93, 0x02,
+ 0xb3, 0x06, 0xb7, 0x91, 0x86, 0x1e, 0x3e, 0x57, 0xeb, 0xb9, 0x80, 0x74, 0x84, 0x80, 0x34, 0x38,
+ 0xa9, 0x9e, 0xb8, 0x27, 0xcf, 0xf0, 0xe9, 0x9e, 0xbc, 0x30, 0xc4, 0x7c, 0x64, 0xb3, 0x94, 0x56,
+ 0xdd, 0x97, 0x67, 0x03, 0xab, 0x2a, 0x0e, 0xe2, 0x4e, 0x81, 0x6b, 0x29, 0x16, 0x72, 0xe2, 0x62,
+ 0x2b, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x96, 0x60, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xc1, 0x6f,
+ 0x5b, 0x00, 0x58, 0xad, 0x13, 0x0b, 0xc8, 0xa6, 0x20, 0xa8, 0x4e, 0xa7, 0x88, 0x13, 0x8f, 0xe4,
+ 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f,
+ 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b,
+ 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0x2f, 0xce, 0xcd, 0x2f, 0x86, 0x52, 0xba, 0xc5, 0x29, 0xd9, 0xfa,
+ 0x15, 0xfa, 0xb8, 0xc3, 0xb6, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x1c, 0xac, 0xc6, 0x80,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0xbb, 0x81, 0x1e, 0xe5, 0x01, 0x00, 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.DenomTraces) > 0 {
+ for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ if len(m.DenomTraces) > 0 {
+ for _, e := range m.DenomTraces {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ l = m.Params.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DenomTraces = append(m.DenomTraces, DenomTrace{})
+ if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/applications/transfer/types/genesis_test.go b/applications/transfer/types/genesis_test.go
new file mode 100644
index 00000000..a2aba58c
--- /dev/null
+++ b/applications/transfer/types/genesis_test.go
@@ -0,0 +1,47 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+func TestValidateGenesis(t *testing.T) {
+ testCases := []struct {
+ name string
+ genState *types.GenesisState
+ expPass bool
+ }{
+ {
+ name: "default",
+ genState: types.DefaultGenesisState(),
+ expPass: true,
+ },
+ {
+ "valid genesis",
+ &types.GenesisState{
+ PortId: "portidone",
+ },
+ true,
+ },
+ {
+ "invalid client",
+ &types.GenesisState{
+ PortId: "(INVALIDPORT)",
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ err := tc.genState.Validate()
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/applications/transfer/types/keys.go b/applications/transfer/types/keys.go
new file mode 100644
index 00000000..c156af3f
--- /dev/null
+++ b/applications/transfer/types/keys.go
@@ -0,0 +1,55 @@
+package types
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+const (
+ // ModuleName defines the IBC transfer name
+ ModuleName = "transfer"
+
+ // Version defines the current version the IBC tranfer
+ // module supports
+ Version = "ics20-1"
+
+ // PortID is the default port id that transfer module binds to
+ PortID = "transfer"
+
+ // StoreKey is the store key string for IBC transfer
+ StoreKey = ModuleName
+
+ // RouterKey is the message route for IBC transfer
+ RouterKey = ModuleName
+
+ // QuerierRoute is the querier route for IBC transfer
+ QuerierRoute = ModuleName
+
+ // DenomPrefix is the prefix used for internal SDK coin representation.
+ DenomPrefix = "ibc"
+)
+
+var (
+ // PortKey defines the key to store the port ID in store
+ PortKey = []byte{0x01}
+ // DenomTraceKey defines the key to store the denomination trace info in store
+ DenomTraceKey = []byte{0x02}
+)
+
+// GetEscrowAddress returns the escrow address for the specified channel.
+// The escrow address follows the format as outlined in ADR 028:
+// https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md
+func GetEscrowAddress(portID, channelID string) sdk.AccAddress {
+ // a slash is used to create domain separation between port and channel identifiers to
+ // prevent address collisions between escrow addresses created for different channels
+ contents := fmt.Sprintf("%s/%s", portID, channelID)
+
+ // ADR 028 AddressHash construction
+ preImage := []byte(Version)
+ preImage = append(preImage, 0)
+ preImage = append(preImage, contents...)
+ hash := sha256.Sum256(preImage)
+ return hash[:20]
+}
diff --git a/applications/transfer/types/keys_test.go b/applications/transfer/types/keys_test.go
new file mode 100644
index 00000000..9ab3314c
--- /dev/null
+++ b/applications/transfer/types/keys_test.go
@@ -0,0 +1,24 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+)
+
+// Test that there is domain separation between the port id and the channel id otherwise an
+// escrow address may overlap with another channel end
+func TestGetEscrowAddress(t *testing.T) {
+ var (
+ port1 = "transfer"
+ channel1 = "channel"
+ port2 = "transfercha"
+ channel2 = "nnel"
+ )
+
+ escrow1 := types.GetEscrowAddress(port1, channel1)
+ escrow2 := types.GetEscrowAddress(port2, channel2)
+ require.NotEqual(t, escrow1, escrow2)
+}
diff --git a/applications/transfer/types/msgs.go b/applications/transfer/types/msgs.go
new file mode 100644
index 00000000..cf229321
--- /dev/null
+++ b/applications/transfer/types/msgs.go
@@ -0,0 +1,85 @@
+package types
+
+import (
+ "strings"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// msg types
+const (
+ TypeMsgTransfer = "transfer"
+)
+
+// NewMsgTransfer creates a new MsgTransfer instance
+//nolint:interfacer
+func NewMsgTransfer(
+ sourcePort, sourceChannel string,
+ token sdk.Coin, sender sdk.AccAddress, receiver string,
+ timeoutHeight clienttypes.Height, timeoutTimestamp uint64,
+) *MsgTransfer {
+ return &MsgTransfer{
+ SourcePort: sourcePort,
+ SourceChannel: sourceChannel,
+ Token: token,
+ Sender: sender.String(),
+ Receiver: receiver,
+ TimeoutHeight: timeoutHeight,
+ TimeoutTimestamp: timeoutTimestamp,
+ }
+}
+
+// Route implements sdk.Msg
+func (MsgTransfer) Route() string {
+ return RouterKey
+}
+
+// Type implements sdk.Msg
+func (MsgTransfer) Type() string {
+ return TypeMsgTransfer
+}
+
+// ValidateBasic performs a basic check of the MsgTransfer fields.
+// NOTE: timeout height or timestamp values can be 0 to disable the timeout.
+// NOTE: The recipient addresses format is not validated as the format defined by
+// the chain is not known to IBC.
+func (msg MsgTransfer) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.SourcePort); err != nil {
+ return sdkerrors.Wrap(err, "invalid source port ID")
+ }
+ if err := host.ChannelIdentifierValidator(msg.SourceChannel); err != nil {
+ return sdkerrors.Wrap(err, "invalid source channel ID")
+ }
+ if !msg.Token.IsValid() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, msg.Token.String())
+ }
+ if !msg.Token.IsPositive() {
+ return sdkerrors.Wrap(sdkerrors.ErrInsufficientFunds, msg.Token.String())
+ }
+ // NOTE: sender format must be validated as it is required by the GetSigners function.
+ _, err := sdk.AccAddressFromBech32(msg.Sender)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ if strings.TrimSpace(msg.Receiver) == "" {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "missing recipient address")
+ }
+ return ValidateIBCDenom(msg.Token.Denom)
+}
+
+// GetSignBytes implements sdk.Msg.
+func (msg MsgTransfer) GetSignBytes() []byte {
+ return sdk.MustSortJSON(AminoCdc.MustMarshalJSON(&msg))
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgTransfer) GetSigners() []sdk.AccAddress {
+ valAddr, err := sdk.AccAddressFromBech32(msg.Sender)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{valAddr}
+}
diff --git a/applications/transfer/types/msgs_test.go b/applications/transfer/types/msgs_test.go
new file mode 100644
index 00000000..1fc70c54
--- /dev/null
+++ b/applications/transfer/types/msgs_test.go
@@ -0,0 +1,103 @@
+package types
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// define constants used for testing
+const (
+ validPort = "testportid"
+ invalidPort = "(invalidport1)"
+ invalidShortPort = "p"
+ invalidLongPort = "invalidlongportinvalidlongportinvalidlongportinvalidlongportinvalid"
+
+ validChannel = "testchannel"
+ invalidChannel = "(invalidchannel1)"
+ invalidShortChannel = "invalid"
+ invalidLongChannel = "invalidlongchannelinvalidlongchannelinvalidlongchannelinvalidlongchannel"
+)
+
+var (
+ addr1 = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address())
+ addr2 = sdk.AccAddress("testaddr2").String()
+ emptyAddr sdk.AccAddress
+
+ coin = sdk.NewCoin("atom", sdk.NewInt(100))
+ ibcCoin = sdk.NewCoin("ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", sdk.NewInt(100))
+ invalidIBCCoin = sdk.NewCoin("notibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", sdk.NewInt(100))
+ invalidDenomCoin = sdk.Coin{Denom: "0atom", Amount: sdk.NewInt(100)}
+ zeroCoin = sdk.Coin{Denom: "atoms", Amount: sdk.NewInt(0)}
+
+ timeoutHeight = clienttypes.NewHeight(0, 10)
+)
+
+// TestMsgTransferRoute tests Route for MsgTransfer
+func TestMsgTransferRoute(t *testing.T) {
+ msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0)
+
+ require.Equal(t, RouterKey, msg.Route())
+}
+
+// TestMsgTransferType tests Type for MsgTransfer
+func TestMsgTransferType(t *testing.T) {
+ msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0)
+
+ require.Equal(t, "transfer", msg.Type())
+}
+
+func TestMsgTransferGetSignBytes(t *testing.T) {
+ msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0)
+ expected := fmt.Sprintf(`{"type":"cosmos-sdk/MsgTransfer","value":{"receiver":"%s","sender":"%s","source_channel":"testchannel","source_port":"testportid","timeout_height":{"revision_height":"10"},"token":{"amount":"100","denom":"atom"}}}`, addr2, addr1)
+ require.NotPanics(t, func() {
+ res := msg.GetSignBytes()
+ require.Equal(t, expected, string(res))
+ })
+}
+
+// TestMsgTransferValidation tests ValidateBasic for MsgTransfer
+func TestMsgTransferValidation(t *testing.T) {
+ testCases := []struct {
+ name string
+ msg *MsgTransfer
+ expPass bool
+ }{
+ {"valid msg with base denom", NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), true},
+ {"valid msg with trace hash", NewMsgTransfer(validPort, validChannel, ibcCoin, addr1, addr2, timeoutHeight, 0), true},
+ {"invalid ibc denom", NewMsgTransfer(validPort, validChannel, invalidIBCCoin, addr1, addr2, timeoutHeight, 0), false},
+ {"too short port id", NewMsgTransfer(invalidShortPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), false},
+ {"too long port id", NewMsgTransfer(invalidLongPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), false},
+ {"port id contains non-alpha", NewMsgTransfer(invalidPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), false},
+ {"too short channel id", NewMsgTransfer(validPort, invalidShortChannel, coin, addr1, addr2, timeoutHeight, 0), false},
+ {"too long channel id", NewMsgTransfer(validPort, invalidLongChannel, coin, addr1, addr2, timeoutHeight, 0), false},
+ {"channel id contains non-alpha", NewMsgTransfer(validPort, invalidChannel, coin, addr1, addr2, timeoutHeight, 0), false},
+ {"invalid denom", NewMsgTransfer(validPort, validChannel, invalidDenomCoin, addr1, addr2, timeoutHeight, 0), false},
+ {"zero coin", NewMsgTransfer(validPort, validChannel, zeroCoin, addr1, addr2, timeoutHeight, 0), false},
+ {"missing sender address", NewMsgTransfer(validPort, validChannel, coin, emptyAddr, addr2, timeoutHeight, 0), false},
+ {"missing recipient address", NewMsgTransfer(validPort, validChannel, coin, addr1, "", timeoutHeight, 0), false},
+ {"empty coin", NewMsgTransfer(validPort, validChannel, sdk.Coin{}, addr1, addr2, timeoutHeight, 0), false},
+ }
+
+ for i, tc := range testCases {
+ err := tc.msg.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+// TestMsgTransferGetSigners tests GetSigners for MsgTransfer
+func TestMsgTransferGetSigners(t *testing.T) {
+ msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0)
+ res := msg.GetSigners()
+
+ require.Equal(t, []sdk.AccAddress{addr1}, res)
+}
diff --git a/applications/transfer/types/packet.go b/applications/transfer/types/packet.go
new file mode 100644
index 00000000..d726577f
--- /dev/null
+++ b/applications/transfer/types/packet.go
@@ -0,0 +1,56 @@
+package types
+
+import (
+ "strings"
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+var (
+ // DefaultRelativePacketTimeoutHeight is the default packet timeout height (in blocks) relative
+ // to the current block height of the counterparty chain provided by the client state. The
+ // timeout is disabled when set to 0.
+ DefaultRelativePacketTimeoutHeight = "0-1000"
+
+ // DefaultRelativePacketTimeoutTimestamp is the default packet timeout timestamp (in nanoseconds)
+ // relative to the current block timestamp of the counterparty chain provided by the client
+ // state. The timeout is disabled when set to 0. The default is currently set to a 10 minute
+ // timeout.
+ DefaultRelativePacketTimeoutTimestamp = uint64((time.Duration(10) * time.Minute).Nanoseconds())
+)
+
+// NewFungibleTokenPacketData contructs a new FungibleTokenPacketData instance
+func NewFungibleTokenPacketData(
+ denom string, amount uint64,
+ sender, receiver string,
+) FungibleTokenPacketData {
+ return FungibleTokenPacketData{
+ Denom: denom,
+ Amount: amount,
+ Sender: sender,
+ Receiver: receiver,
+ }
+}
+
+// ValidateBasic is used for validating the token transfer.
+// NOTE: The addresses formats are not validated as the sender and recipient can have different
+// formats defined by their corresponding chains that are not known to IBC.
+func (ftpd FungibleTokenPacketData) ValidateBasic() error {
+ if ftpd.Amount == 0 {
+ return sdkerrors.Wrap(ErrInvalidAmount, "amount cannot be 0")
+ }
+ if strings.TrimSpace(ftpd.Sender) == "" {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "sender address cannot be blank")
+ }
+ if strings.TrimSpace(ftpd.Receiver) == "" {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "receiver address cannot be blank")
+ }
+ return ValidatePrefixedDenom(ftpd.Denom)
+}
+
+// GetBytes is a helper for serialising
+func (ftpd FungibleTokenPacketData) GetBytes() []byte {
+ return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&ftpd))
+}
diff --git a/applications/transfer/types/packet_test.go b/applications/transfer/types/packet_test.go
new file mode 100644
index 00000000..1edcb093
--- /dev/null
+++ b/applications/transfer/types/packet_test.go
@@ -0,0 +1,36 @@
+package types
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ denom = "transfer/gaiachannel/atom"
+ amount = uint64(100)
+)
+
+// TestFungibleTokenPacketDataValidateBasic tests ValidateBasic for FungibleTokenPacketData
+func TestFungibleTokenPacketDataValidateBasic(t *testing.T) {
+ testCases := []struct {
+ name string
+ packetData FungibleTokenPacketData
+ expPass bool
+ }{
+ {"valid packet", NewFungibleTokenPacketData(denom, amount, addr1.String(), addr2), true},
+ {"invalid denom", NewFungibleTokenPacketData("", amount, addr1.String(), addr2), false},
+ {"invalid amount", NewFungibleTokenPacketData(denom, 0, addr1.String(), addr2), false},
+ {"missing sender address", NewFungibleTokenPacketData(denom, amount, emptyAddr.String(), addr2), false},
+ {"missing recipient address", NewFungibleTokenPacketData(denom, amount, addr1.String(), emptyAddr.String()), false},
+ }
+
+ for i, tc := range testCases {
+ err := tc.packetData.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %v", i, err)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
diff --git a/applications/transfer/types/params.go b/applications/transfer/types/params.go
new file mode 100644
index 00000000..4ecdfab7
--- /dev/null
+++ b/applications/transfer/types/params.go
@@ -0,0 +1,65 @@
+package types
+
+import (
+ "fmt"
+
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+)
+
+const (
+ // DefaultSendEnabled enabled
+ DefaultSendEnabled = true
+ // DefaultReceiveEnabled enabled
+ DefaultReceiveEnabled = true
+)
+
+var (
+ // KeySendEnabled is store's key for SendEnabled Params
+ KeySendEnabled = []byte("SendEnabled")
+ // KeyReceiveEnabled is store's key for ReceiveEnabled Params
+ KeyReceiveEnabled = []byte("ReceiveEnabled")
+)
+
+// ParamKeyTable type declaration for parameters
+func ParamKeyTable() paramtypes.KeyTable {
+ return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
+}
+
+// NewParams creates a new parameter configuration for the ibc transfer module
+func NewParams(enableSend, enableReceive bool) Params {
+ return Params{
+ SendEnabled: enableSend,
+ ReceiveEnabled: enableReceive,
+ }
+}
+
+// DefaultParams is the default parameter configuration for the ibc-transfer module
+func DefaultParams() Params {
+ return NewParams(DefaultSendEnabled, DefaultReceiveEnabled)
+}
+
+// Validate all ibc-transfer module parameters
+func (p Params) Validate() error {
+ if err := validateEnabled(p.SendEnabled); err != nil {
+ return err
+ }
+
+ return validateEnabled(p.ReceiveEnabled)
+}
+
+// ParamSetPairs implements params.ParamSet
+func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs {
+ return paramtypes.ParamSetPairs{
+ paramtypes.NewParamSetPair(KeySendEnabled, p.SendEnabled, validateEnabled),
+ paramtypes.NewParamSetPair(KeyReceiveEnabled, p.ReceiveEnabled, validateEnabled),
+ }
+}
+
+func validateEnabled(i interface{}) error {
+ _, ok := i.(bool)
+ if !ok {
+ return fmt.Errorf("invalid parameter type: %T", i)
+ }
+
+ return nil
+}
diff --git a/applications/transfer/types/params_test.go b/applications/transfer/types/params_test.go
new file mode 100644
index 00000000..825efb82
--- /dev/null
+++ b/applications/transfer/types/params_test.go
@@ -0,0 +1,12 @@
+package types
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidateParams(t *testing.T) {
+ require.NoError(t, DefaultParams().Validate())
+ require.NoError(t, NewParams(true, false).Validate())
+}
diff --git a/applications/transfer/types/query.pb.go b/applications/transfer/types/query.pb.go
new file mode 100644
index 00000000..1c1d6929
--- /dev/null
+++ b/applications/transfer/types/query.pb.go
@@ -0,0 +1,1418 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibc/applications/transfer/v1/query.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ query "github.com/cosmos/cosmos-sdk/types/query"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
+// method
+type QueryDenomTraceRequest struct {
+ // hash (in hex format) of the denomination trace information.
+ Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{} }
+func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTraceRequest) ProtoMessage() {}
+func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a638e2800a01538c, []int{0}
+}
+func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTraceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTraceRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTraceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTraceRequest.Merge(m, src)
+}
+func (m *QueryDenomTraceRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTraceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTraceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTraceRequest proto.InternalMessageInfo
+
+func (m *QueryDenomTraceRequest) GetHash() string {
+ if m != nil {
+ return m.Hash
+ }
+ return ""
+}
+
+// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
+// method.
+type QueryDenomTraceResponse struct {
+ // denom_trace returns the requested denomination trace information.
+ DenomTrace *DenomTrace `protobuf:"bytes,1,opt,name=denom_trace,json=denomTrace,proto3" json:"denom_trace,omitempty"`
+}
+
+func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse{} }
+func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTraceResponse) ProtoMessage() {}
+func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a638e2800a01538c, []int{1}
+}
+func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTraceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTraceResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTraceResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTraceResponse.Merge(m, src)
+}
+func (m *QueryDenomTraceResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTraceResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTraceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTraceResponse proto.InternalMessageInfo
+
+func (m *QueryDenomTraceResponse) GetDenomTrace() *DenomTrace {
+ if m != nil {
+ return m.DenomTrace
+ }
+ return nil
+}
+
+// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
+// method
+type QueryDenomTracesRequest struct {
+ // pagination defines an optional pagination for the request.
+ Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest{} }
+func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTracesRequest) ProtoMessage() {}
+func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a638e2800a01538c, []int{2}
+}
+func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTracesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTracesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTracesRequest.Merge(m, src)
+}
+func (m *QueryDenomTracesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTracesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTracesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTracesRequest proto.InternalMessageInfo
+
+func (m *QueryDenomTracesRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
+// method.
+type QueryDenomTracesResponse struct {
+ // denom_traces returns all denominations trace information.
+ DenomTraces Traces `protobuf:"bytes,1,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces"`
+ // pagination defines the pagination in the response.
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesResponse{} }
+func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTracesResponse) ProtoMessage() {}
+func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a638e2800a01538c, []int{3}
+}
+func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTracesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTracesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTracesResponse.Merge(m, src)
+}
+func (m *QueryDenomTracesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTracesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTracesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTracesResponse proto.InternalMessageInfo
+
+func (m *QueryDenomTracesResponse) GetDenomTraces() Traces {
+ if m != nil {
+ return m.DenomTraces
+ }
+ return nil
+}
+
+func (m *QueryDenomTracesResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryParamsRequest is the request type for the Query/Params RPC method.
+type QueryParamsRequest struct {
+}
+
+func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} }
+func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryParamsRequest) ProtoMessage() {}
+func (*QueryParamsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a638e2800a01538c, []int{4}
+}
+func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryParamsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryParamsRequest.Merge(m, src)
+}
+func (m *QueryParamsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryParamsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo
+
+// QueryParamsResponse is the response type for the Query/Params RPC method.
+type QueryParamsResponse struct {
+ // params defines the parameters of the module.
+ Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"`
+}
+
+func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} }
+func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryParamsResponse) ProtoMessage() {}
+func (*QueryParamsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a638e2800a01538c, []int{5}
+}
+func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryParamsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryParamsResponse.Merge(m, src)
+}
+func (m *QueryParamsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryParamsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo
+
+func (m *QueryParamsResponse) GetParams() *Params {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTraceRequest")
+ proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTraceResponse")
+ proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTracesRequest")
+ proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTracesResponse")
+ proto.RegisterType((*QueryParamsRequest)(nil), "ibc.applications.transfer.v1.QueryParamsRequest")
+ proto.RegisterType((*QueryParamsResponse)(nil), "ibc.applications.transfer.v1.QueryParamsResponse")
+}
+
+func init() {
+ proto.RegisterFile("ibc/applications/transfer/v1/query.proto", fileDescriptor_a638e2800a01538c)
+}
+
+var fileDescriptor_a638e2800a01538c = []byte{
+ // 528 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x3f, 0x6f, 0xd3, 0x40,
+ 0x14, 0xcf, 0x95, 0x12, 0x89, 0x17, 0xc4, 0x70, 0x54, 0x10, 0x59, 0x95, 0x5b, 0x59, 0x08, 0x02,
+ 0x85, 0x3b, 0x5c, 0xa0, 0x30, 0xa0, 0x0e, 0x15, 0x02, 0xb1, 0x95, 0xc0, 0x80, 0x60, 0x40, 0x67,
+ 0xe7, 0x70, 0x2c, 0x1a, 0x9f, 0xeb, 0xbb, 0x44, 0x54, 0x88, 0x85, 0x4f, 0x80, 0xc4, 0x8e, 0x98,
+ 0xd9, 0x19, 0xd8, 0x18, 0x3b, 0x56, 0x62, 0x61, 0x02, 0x94, 0xf0, 0x41, 0x90, 0xef, 0xce, 0x8d,
+ 0xa3, 0x20, 0x13, 0x4f, 0x39, 0x5d, 0xde, 0xef, 0xfd, 0xfe, 0xbc, 0xe7, 0x83, 0x4e, 0x1c, 0x84,
+ 0x94, 0xa5, 0xe9, 0x5e, 0x1c, 0x32, 0x15, 0x8b, 0x44, 0x52, 0x95, 0xb1, 0x44, 0xbe, 0xe4, 0x19,
+ 0x1d, 0xf9, 0x74, 0x7f, 0xc8, 0xb3, 0x03, 0x92, 0x66, 0x42, 0x09, 0xbc, 0x1a, 0x07, 0x21, 0x29,
+ 0x57, 0x92, 0xa2, 0x92, 0x8c, 0x7c, 0x67, 0x25, 0x12, 0x91, 0xd0, 0x85, 0x34, 0x3f, 0x19, 0x8c,
+ 0x73, 0x25, 0x14, 0x72, 0x20, 0x24, 0x0d, 0x98, 0xe4, 0xa6, 0x19, 0x1d, 0xf9, 0x01, 0x57, 0xcc,
+ 0xa7, 0x29, 0x8b, 0xe2, 0x44, 0x37, 0xb2, 0xb5, 0x1b, 0x95, 0x4a, 0x8e, 0xb9, 0x4c, 0xf1, 0x6a,
+ 0x24, 0x44, 0xb4, 0xc7, 0x29, 0x4b, 0x63, 0xca, 0x92, 0x44, 0x28, 0x2b, 0x49, 0xff, 0xeb, 0x5d,
+ 0x85, 0x73, 0x8f, 0x72, 0xb2, 0x7b, 0x3c, 0x11, 0x83, 0x27, 0x19, 0x0b, 0x79, 0x97, 0xef, 0x0f,
+ 0xb9, 0x54, 0x18, 0xc3, 0x72, 0x9f, 0xc9, 0x7e, 0x1b, 0xad, 0xa3, 0xce, 0xa9, 0xae, 0x3e, 0x7b,
+ 0x3d, 0x38, 0x3f, 0x57, 0x2d, 0x53, 0x91, 0x48, 0x8e, 0x1f, 0x42, 0xab, 0x97, 0xdf, 0xbe, 0x50,
+ 0xf9, 0xb5, 0x46, 0xb5, 0x36, 0x3b, 0xa4, 0x2a, 0x09, 0x52, 0x6a, 0x03, 0xbd, 0xe3, 0xb3, 0xc7,
+ 0xe6, 0x58, 0x64, 0x21, 0xea, 0x3e, 0xc0, 0x34, 0x0d, 0x4b, 0x72, 0x91, 0x98, 0xe8, 0x48, 0x1e,
+ 0x1d, 0x31, 0x73, 0xb0, 0xd1, 0x91, 0x5d, 0x16, 0x15, 0x86, 0xba, 0x25, 0xa4, 0xf7, 0x0d, 0x41,
+ 0x7b, 0x9e, 0xc3, 0x5a, 0x79, 0x0e, 0xa7, 0x4b, 0x56, 0x64, 0x1b, 0xad, 0x9f, 0xa8, 0xe3, 0x65,
+ 0xe7, 0xcc, 0xe1, 0xcf, 0xb5, 0xc6, 0xe7, 0x5f, 0x6b, 0x4d, 0xdb, 0xb7, 0x35, 0xf5, 0x26, 0xf1,
+ 0x83, 0x19, 0x07, 0x4b, 0xda, 0xc1, 0xa5, 0xff, 0x3a, 0x30, 0xca, 0x66, 0x2c, 0xac, 0x00, 0xd6,
+ 0x0e, 0x76, 0x59, 0xc6, 0x06, 0x45, 0x40, 0xde, 0x63, 0x38, 0x3b, 0x73, 0x6b, 0x2d, 0xdd, 0x85,
+ 0x66, 0xaa, 0x6f, 0x6c, 0x66, 0x17, 0xaa, 0xcd, 0x58, 0xb4, 0xc5, 0x6c, 0x7e, 0x5c, 0x86, 0x93,
+ 0xba, 0x2b, 0xfe, 0x8a, 0x00, 0xa6, 0x4e, 0xf1, 0xcd, 0xea, 0x36, 0xff, 0xde, 0x2c, 0xe7, 0x56,
+ 0x4d, 0x94, 0xf1, 0xe0, 0x6d, 0xbf, 0xfb, 0xfe, 0xe7, 0xc3, 0xd2, 0x1d, 0xbc, 0x45, 0xab, 0xd6,
+ 0xdf, 0x7c, 0x32, 0xe5, 0xf9, 0xd1, 0x37, 0xf9, 0xee, 0xbe, 0xc5, 0x5f, 0x10, 0xb4, 0x4a, 0xe3,
+ 0xc6, 0xf5, 0x64, 0x14, 0x09, 0x3b, 0x5b, 0x75, 0x61, 0x56, 0xfe, 0x6d, 0x2d, 0xdf, 0xc7, 0xb4,
+ 0xa6, 0x7c, 0xfc, 0x09, 0x41, 0xd3, 0x0c, 0x04, 0x5f, 0x5f, 0x80, 0x7b, 0x66, 0x1f, 0x1c, 0xbf,
+ 0x06, 0xc2, 0x0a, 0xf5, 0xb5, 0xd0, 0x0d, 0x7c, 0x79, 0x01, 0xa1, 0x66, 0x41, 0x76, 0x9e, 0x1e,
+ 0x8e, 0x5d, 0x74, 0x34, 0x76, 0xd1, 0xef, 0xb1, 0x8b, 0xde, 0x4f, 0xdc, 0xc6, 0xd1, 0xc4, 0x6d,
+ 0xfc, 0x98, 0xb8, 0x8d, 0x67, 0xdb, 0x51, 0xac, 0xfa, 0xc3, 0x80, 0x84, 0x62, 0x40, 0xed, 0x0b,
+ 0x67, 0x7e, 0xae, 0xc9, 0xde, 0x2b, 0xfa, 0xba, 0x82, 0x42, 0x1d, 0xa4, 0x5c, 0x06, 0x4d, 0xfd,
+ 0x4c, 0xdd, 0xf8, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x7f, 0xfe, 0xbd, 0x7d, 0x05, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// QueryClient is the client API for Query service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryClient interface {
+ // DenomTrace queries a denomination trace information.
+ DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error)
+ // DenomTraces queries all denomination traces.
+ DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error)
+ // Params queries all parameters of the ibc-transfer module.
+ Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
+}
+
+type queryClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewQueryClient(cc grpc1.ClientConn) QueryClient {
+ return &queryClient{cc}
+}
+
+func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) {
+ out := new(QueryDenomTraceResponse)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTrace", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) {
+ out := new(QueryDenomTracesResponse)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTraces", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) {
+ out := new(QueryParamsResponse)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/Params", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServer is the server API for Query service.
+type QueryServer interface {
+ // DenomTrace queries a denomination trace information.
+ DenomTrace(context.Context, *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error)
+ // DenomTraces queries all denomination traces.
+ DenomTraces(context.Context, *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error)
+ // Params queries all parameters of the ibc-transfer module.
+ Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error)
+}
+
+// UnimplementedQueryServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServer struct {
+}
+
+func (*UnimplementedQueryServer) DenomTrace(ctx context.Context, req *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DenomTrace not implemented")
+}
+func (*UnimplementedQueryServer) DenomTraces(ctx context.Context, req *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DenomTraces not implemented")
+}
+func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
+}
+
+func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
+ s.RegisterService(&_Query_serviceDesc, srv)
+}
+
+func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryDenomTraceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).DenomTrace(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibc.applications.transfer.v1.Query/DenomTrace",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryDenomTracesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).DenomTraces(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibc.applications.transfer.v1.Query/DenomTraces",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryParamsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Params(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibc.applications.transfer.v1.Query/Params",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Query_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibc.applications.transfer.v1.Query",
+ HandlerType: (*QueryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "DenomTrace",
+ Handler: _Query_DenomTrace_Handler,
+ },
+ {
+ MethodName: "DenomTraces",
+ Handler: _Query_DenomTraces_Handler,
+ },
+ {
+ MethodName: "Params",
+ Handler: _Query_Params_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibc/applications/transfer/v1/query.proto",
+}
+
+func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTraceRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hash) > 0 {
+ i -= len(m.Hash)
+ copy(dAtA[i:], m.Hash)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryDenomTraceResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTraceResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTraceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.DenomTrace != nil {
+ {
+ size, err := m.DenomTrace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryDenomTracesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTracesRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryDenomTracesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTracesResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTracesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.DenomTraces) > 0 {
+ for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Params != nil {
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
+ offset -= sovQuery(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *QueryDenomTraceRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Hash)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryDenomTraceResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DenomTrace != nil {
+ l = m.DenomTrace.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryDenomTracesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryDenomTracesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.DenomTraces) > 0 {
+ for _, e := range m.DenomTraces {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryParamsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *QueryParamsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Params != nil {
+ l = m.Params.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func sovQuery(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozQuery(x uint64) (n int) {
+ return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *QueryDenomTraceRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTraceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTraceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hash = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryDenomTraceResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTraceResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTraceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DenomTrace", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DenomTrace == nil {
+ m.DenomTrace = &DenomTrace{}
+ }
+ if err := m.DenomTrace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryDenomTracesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTracesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTracesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryDenomTracesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTracesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTracesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DenomTraces = append(m.DenomTraces, DenomTrace{})
+ if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Params == nil {
+ m.Params = &Params{}
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipQuery(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupQuery
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/applications/transfer/types/query.pb.gw.go b/applications/transfer/types/query.pb.gw.go
new file mode 100644
index 00000000..007ed668
--- /dev/null
+++ b/applications/transfer/types/query.pb.gw.go
@@ -0,0 +1,326 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: ibc/applications/transfer/v1/query.proto
+
+/*
+Package types is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package types
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+
+func request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTraceRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["hash"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash")
+ }
+
+ protoReq.Hash, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err)
+ }
+
+ msg, err := client.DenomTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTraceRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["hash"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash")
+ }
+
+ protoReq.Hash, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err)
+ }
+
+ msg, err := server.DenomTrace(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_DenomTraces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
+)
+
+func request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTracesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.DenomTraces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTracesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.DenomTraces(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := server.Params(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
+// UnaryRPC :call QueryServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
+func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
+
+ mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_DenomTrace_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_DenomTraces_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterQueryHandler(ctx, mux, conn)
+}
+
+// RegisterQueryHandler registers the http handlers for service Query to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
+}
+
+// RegisterQueryHandlerClient registers the http handlers for service Query
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "QueryClient" to call the correct interceptors.
+func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
+
+ mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_DenomTrace_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_DenomTraces_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Query_DenomTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "applications", "transfer", "v1beta1", "denom_traces", "hash"}, "", runtime.AssumeColonVerbOpt(false)))
+
+ pattern_Query_DenomTraces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "applications", "transfer", "v1beta1", "denom_traces"}, "", runtime.AssumeColonVerbOpt(false)))
+
+ pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "applications", "transfer", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(false)))
+)
+
+var (
+ forward_Query_DenomTrace_0 = runtime.ForwardResponseMessage
+
+ forward_Query_DenomTraces_0 = runtime.ForwardResponseMessage
+
+ forward_Query_Params_0 = runtime.ForwardResponseMessage
+)
diff --git a/applications/transfer/types/trace.go b/applications/transfer/types/trace.go
new file mode 100644
index 00000000..f45113ef
--- /dev/null
+++ b/applications/transfer/types/trace.go
@@ -0,0 +1,203 @@
+package types
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "sort"
+ "strings"
+
+ tmbytes "github.com/tendermint/tendermint/libs/bytes"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// ParseDenomTrace parses a string with the ibc prefix (denom trace) and the base denomination
+// into a DenomTrace type.
+//
+// Examples:
+//
+// - "portidone/channelidone/uatom" => DenomTrace{Path: "portidone/channelidone", BaseDenom: "uatom"}
+// - "uatom" => DenomTrace{Path: "", BaseDenom: "uatom"}
+func ParseDenomTrace(rawDenom string) DenomTrace {
+ denomSplit := strings.Split(rawDenom, "/")
+
+ if denomSplit[0] == rawDenom {
+ return DenomTrace{
+ Path: "",
+ BaseDenom: rawDenom,
+ }
+ }
+
+ return DenomTrace{
+ Path: strings.Join(denomSplit[:len(denomSplit)-1], "/"),
+ BaseDenom: denomSplit[len(denomSplit)-1],
+ }
+}
+
+// Hash returns the hex bytes of the SHA256 hash of the DenomTrace fields using the following formula:
+//
+// hash = sha256(tracePath + "/" + baseDenom)
+func (dt DenomTrace) Hash() tmbytes.HexBytes {
+ hash := sha256.Sum256([]byte(dt.GetFullDenomPath()))
+ return hash[:]
+}
+
+// GetPrefix returns the receiving denomination prefix composed by the trace info and a separator.
+func (dt DenomTrace) GetPrefix() string {
+ return dt.Path + "/"
+}
+
+// IBCDenom a coin denomination for an ICS20 fungible token in the format
+// 'ibc/{hash(tracePath + baseDenom)}'. If the trace is empty, it will return the base denomination.
+func (dt DenomTrace) IBCDenom() string {
+ if dt.Path != "" {
+ return fmt.Sprintf("%s/%s", DenomPrefix, dt.Hash())
+ }
+ return dt.BaseDenom
+}
+
+// GetFullDenomPath returns the full denomination according to the ICS20 specification:
+// tracePath + "/" + baseDenom
+// If there exists no trace then the base denomination is returned.
+func (dt DenomTrace) GetFullDenomPath() string {
+ if dt.Path == "" {
+ return dt.BaseDenom
+ }
+ return dt.GetPrefix() + dt.BaseDenom
+}
+
+func validateTraceIdentifiers(identifiers []string) error {
+ if len(identifiers) == 0 || len(identifiers)%2 != 0 {
+ return fmt.Errorf("trace info must come in pairs of port and channel identifiers '{portID}/{channelID}', got the identifiers: %s", identifiers)
+ }
+
+ // validate correctness of port and channel identifiers
+ for i := 0; i < len(identifiers); i += 2 {
+ if err := host.PortIdentifierValidator(identifiers[i]); err != nil {
+ return sdkerrors.Wrapf(err, "invalid port ID at position %d", i)
+ }
+ if err := host.ChannelIdentifierValidator(identifiers[i+1]); err != nil {
+ return sdkerrors.Wrapf(err, "invalid channel ID at position %d", i)
+ }
+ }
+ return nil
+}
+
+// Validate performs a basic validation of the DenomTrace fields.
+func (dt DenomTrace) Validate() error {
+ // empty trace is accepted when token lives on the original chain
+ switch {
+ case dt.Path == "" && dt.BaseDenom != "":
+ return nil
+ case strings.TrimSpace(dt.BaseDenom) == "":
+ return fmt.Errorf("base denomination cannot be blank")
+ }
+
+ // NOTE: no base denomination validation
+
+ identifiers := strings.Split(dt.Path, "/")
+ return validateTraceIdentifiers(identifiers)
+}
+
+// Traces defines a wrapper type for a slice of DenomTrace.
+type Traces []DenomTrace
+
+// Validate performs a basic validation of each denomination trace info.
+func (t Traces) Validate() error {
+ seenTraces := make(map[string]bool)
+ for i, trace := range t {
+ hash := trace.Hash().String()
+ if seenTraces[hash] {
+ return fmt.Errorf("duplicated denomination trace with hash %s", trace.Hash())
+ }
+
+ if err := trace.Validate(); err != nil {
+ return sdkerrors.Wrapf(err, "failed denom trace %d validation", i)
+ }
+ seenTraces[hash] = true
+ }
+ return nil
+}
+
+var _ sort.Interface = Traces{}
+
+// Len implements sort.Interface for Traces
+func (t Traces) Len() int { return len(t) }
+
+// Less implements sort.Interface for Traces
+func (t Traces) Less(i, j int) bool { return t[i].GetFullDenomPath() < t[j].GetFullDenomPath() }
+
+// Swap implements sort.Interface for Traces
+func (t Traces) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+
+// Sort is a helper function to sort the set of denomination traces in-place
+func (t Traces) Sort() Traces {
+ sort.Sort(t)
+ return t
+}
+
+// ValidatePrefixedDenom checks that the denomination for an IBC fungible token packet denom is correctly prefixed.
+// The function will return no error if the given string follows one of the two formats:
+//
+// - Prefixed denomination: '{portIDN}/{channelIDN}/.../{portID0}/{channelID0}/baseDenom'
+// - Unprefixed denomination: 'baseDenom'
+func ValidatePrefixedDenom(denom string) error {
+ denomSplit := strings.Split(denom, "/")
+ if denomSplit[0] == denom && strings.TrimSpace(denom) != "" {
+ // NOTE: no base denomination validation
+ return nil
+ }
+
+ if strings.TrimSpace(denomSplit[len(denomSplit)-1]) == "" {
+ return sdkerrors.Wrap(ErrInvalidDenomForTransfer, "base denomination cannot be blank")
+ }
+
+ identifiers := denomSplit[:len(denomSplit)-1]
+ return validateTraceIdentifiers(identifiers)
+}
+
+// ValidateIBCDenom validates that the given denomination is either:
+//
+// - A valid base denomination (eg: 'uatom')
+// - A valid fungible token representation (i.e 'ibc/{hash}') per ADR 001 https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-001-coin-source-tracing.md
+func ValidateIBCDenom(denom string) error {
+ if err := sdk.ValidateDenom(denom); err != nil {
+ return err
+ }
+
+ denomSplit := strings.SplitN(denom, "/", 2)
+
+ switch {
+ case strings.TrimSpace(denom) == "",
+ len(denomSplit) == 1 && denomSplit[0] == DenomPrefix,
+ len(denomSplit) == 2 && (denomSplit[0] != DenomPrefix || strings.TrimSpace(denomSplit[1]) == ""):
+ return sdkerrors.Wrapf(ErrInvalidDenomForTransfer, "denomination should be prefixed with the format 'ibc/{hash(trace + \"/\" + %s)}'", denom)
+
+ case denomSplit[0] == denom && strings.TrimSpace(denom) != "":
+ return nil
+ }
+
+ if _, err := ParseHexHash(denomSplit[1]); err != nil {
+ return sdkerrors.Wrapf(err, "invalid denom trace hash %s", denomSplit[1])
+ }
+
+ return nil
+}
+
+// ParseHexHash parses a hex hash in string format to bytes and validates its correctness.
+func ParseHexHash(hexHash string) (tmbytes.HexBytes, error) {
+ hash, err := hex.DecodeString(hexHash)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := tmtypes.ValidateHash(hash); err != nil {
+ return nil, err
+ }
+
+ return hash, nil
+}
diff --git a/applications/transfer/types/trace_test.go b/applications/transfer/types/trace_test.go
new file mode 100644
index 00000000..f0868d56
--- /dev/null
+++ b/applications/transfer/types/trace_test.go
@@ -0,0 +1,150 @@
+package types
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseDenomTrace(t *testing.T) {
+ testCases := []struct {
+ name string
+ denom string
+ expTrace DenomTrace
+ }{
+ {"empty denom", "", DenomTrace{}},
+ {"base denom", "uatom", DenomTrace{BaseDenom: "uatom"}},
+ {"trace info", "transfer/channelToA/uatom", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA"}},
+ {"incomplete path", "transfer/uatom", DenomTrace{BaseDenom: "uatom", Path: "transfer"}},
+ {"invalid path (1)", "transfer//uatom", DenomTrace{BaseDenom: "uatom", Path: "transfer/"}},
+ {"invalid path (2)", "transfer/channelToA/uatom/", DenomTrace{BaseDenom: "", Path: "transfer/channelToA/uatom"}},
+ }
+
+ for _, tc := range testCases {
+ trace := ParseDenomTrace(tc.denom)
+ require.Equal(t, tc.expTrace, trace, tc.name)
+ }
+}
+
+func TestDenomTrace_IBCDenom(t *testing.T) {
+ testCases := []struct {
+ name string
+ trace DenomTrace
+ expDenom string
+ }{
+ {"base denom", DenomTrace{BaseDenom: "uatom"}, "uatom"},
+ {"trace info", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA"}, "ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2"},
+ }
+
+ for _, tc := range testCases {
+ denom := tc.trace.IBCDenom()
+ require.Equal(t, tc.expDenom, denom, tc.name)
+ }
+}
+
+func TestDenomTrace_Validate(t *testing.T) {
+ testCases := []struct {
+ name string
+ trace DenomTrace
+ expError bool
+ }{
+ {"base denom only", DenomTrace{BaseDenom: "uatom"}, false},
+ {"empty DenomTrace", DenomTrace{}, true},
+ {"valid single trace info", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA"}, false},
+ {"valid multiple trace info", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"}, false},
+ {"single trace identifier", DenomTrace{BaseDenom: "uatom", Path: "transfer"}, true},
+ {"invalid port ID", DenomTrace{BaseDenom: "uatom", Path: "(transfer)/channelToA"}, true},
+ {"invalid channel ID", DenomTrace{BaseDenom: "uatom", Path: "transfer/(channelToA)"}, true},
+ {"empty base denom with trace", DenomTrace{BaseDenom: "", Path: "transfer/channelToA"}, true},
+ }
+
+ for _, tc := range testCases {
+ err := tc.trace.Validate()
+ if tc.expError {
+ require.Error(t, err, tc.name)
+ continue
+ }
+ require.NoError(t, err, tc.name)
+ }
+}
+
+func TestTraces_Validate(t *testing.T) {
+ testCases := []struct {
+ name string
+ traces Traces
+ expError bool
+ }{
+ {"empty Traces", Traces{}, false},
+ {"valid multiple trace info", Traces{{BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"}}, false},
+ {
+ "valid multiple trace info",
+ Traces{
+ {BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"},
+ {BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"},
+ },
+ true,
+ },
+ {"empty base denom with trace", Traces{{BaseDenom: "", Path: "transfer/channelToA"}}, true},
+ }
+
+ for _, tc := range testCases {
+ err := tc.traces.Validate()
+ if tc.expError {
+ require.Error(t, err, tc.name)
+ continue
+ }
+ require.NoError(t, err, tc.name)
+ }
+}
+
+func TestValidatePrefixedDenom(t *testing.T) {
+ testCases := []struct {
+ name string
+ denom string
+ expError bool
+ }{
+ {"prefixed denom", "transfer/channelToA/uatom", false},
+ {"base denom", "uatom", false},
+ {"empty denom", "", true},
+ {"empty prefix", "/uatom", true},
+ {"empty identifiers", "//uatom", true},
+ {"single trace identifier", "transfer/", true},
+ {"invalid port ID", "(transfer)/channelToA/uatom", true},
+ {"invalid channel ID", "transfer/(channelToA)/uatom", true},
+ }
+
+ for _, tc := range testCases {
+ err := ValidatePrefixedDenom(tc.denom)
+ if tc.expError {
+ require.Error(t, err, tc.name)
+ continue
+ }
+ require.NoError(t, err, tc.name)
+ }
+}
+
+func TestValidateIBCDenom(t *testing.T) {
+ testCases := []struct {
+ name string
+ denom string
+ expError bool
+ }{
+ {"denom with trace hash", "ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", false},
+ {"base denom", "uatom", false},
+ {"empty denom", "", true},
+ {"invalid prefixed denom", "transfer/channelToA/uatom", true},
+ {"denom 'ibc'", "ibc", true},
+ {"denom 'ibc/'", "ibc/", true},
+ {"invald prefix", "notibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", true},
+ {"invald hash", "ibc/!@#$!@#", true},
+ }
+
+ for _, tc := range testCases {
+ err := ValidateIBCDenom(tc.denom)
+ if tc.expError {
+ require.Error(t, err, tc.name)
+ continue
+ }
+ require.NoError(t, err, tc.name)
+ }
+}
diff --git a/applications/transfer/types/transfer.pb.go b/applications/transfer/types/transfer.pb.go
new file mode 100644
index 00000000..62734b85
--- /dev/null
+++ b/applications/transfer/types/transfer.pb.go
@@ -0,0 +1,909 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibc/applications/transfer/v1/transfer.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// FungibleTokenPacketData defines a struct for the packet payload
+// See FungibleTokenPacketData spec:
+// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+type FungibleTokenPacketData struct {
+ // the token denomination to be transferred
+ Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"`
+ // the token amount to be transferred
+ Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
+ // the sender address
+ Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"`
+ // the recipient address on the destination chain
+ Receiver string `protobuf:"bytes,4,opt,name=receiver,proto3" json:"receiver,omitempty"`
+}
+
+func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData{} }
+func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) }
+func (*FungibleTokenPacketData) ProtoMessage() {}
+func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5041673e96e97901, []int{0}
+}
+func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *FungibleTokenPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_FungibleTokenPacketData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *FungibleTokenPacketData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FungibleTokenPacketData.Merge(m, src)
+}
+func (m *FungibleTokenPacketData) XXX_Size() int {
+ return m.Size()
+}
+func (m *FungibleTokenPacketData) XXX_DiscardUnknown() {
+ xxx_messageInfo_FungibleTokenPacketData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FungibleTokenPacketData proto.InternalMessageInfo
+
+func (m *FungibleTokenPacketData) GetDenom() string {
+ if m != nil {
+ return m.Denom
+ }
+ return ""
+}
+
+func (m *FungibleTokenPacketData) GetAmount() uint64 {
+ if m != nil {
+ return m.Amount
+ }
+ return 0
+}
+
+func (m *FungibleTokenPacketData) GetSender() string {
+ if m != nil {
+ return m.Sender
+ }
+ return ""
+}
+
+func (m *FungibleTokenPacketData) GetReceiver() string {
+ if m != nil {
+ return m.Receiver
+ }
+ return ""
+}
+
+// DenomTrace contains the base denomination for ICS20 fungible tokens and the
+// source tracing information path.
+type DenomTrace struct {
+ // path defines the chain of port/channel identifiers used for tracing the
+ // source of the fungible token.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // base denomination of the relayed fungible token.
+ BaseDenom string `protobuf:"bytes,2,opt,name=base_denom,json=baseDenom,proto3" json:"base_denom,omitempty"`
+}
+
+func (m *DenomTrace) Reset() { *m = DenomTrace{} }
+func (m *DenomTrace) String() string { return proto.CompactTextString(m) }
+func (*DenomTrace) ProtoMessage() {}
+func (*DenomTrace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5041673e96e97901, []int{1}
+}
+func (m *DenomTrace) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DenomTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DenomTrace.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DenomTrace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DenomTrace.Merge(m, src)
+}
+func (m *DenomTrace) XXX_Size() int {
+ return m.Size()
+}
+func (m *DenomTrace) XXX_DiscardUnknown() {
+ xxx_messageInfo_DenomTrace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DenomTrace proto.InternalMessageInfo
+
+func (m *DenomTrace) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *DenomTrace) GetBaseDenom() string {
+ if m != nil {
+ return m.BaseDenom
+ }
+ return ""
+}
+
+// Params defines the set of IBC transfer parameters.
+// NOTE: To prevent a single token from being transferred, set the
+// TransfersEnabled parameter to true and then set the bank module's SendEnabled
+// parameter for the denomination to false.
+type Params struct {
+ // send_enabled enables or disables all cross-chain token transfers from this
+ // chain.
+ SendEnabled bool `protobuf:"varint,1,opt,name=send_enabled,json=sendEnabled,proto3" json:"send_enabled,omitempty" yaml:"send_enabled"`
+ // receive_enabled enables or disables all cross-chain token transfers to this
+ // chain.
+ ReceiveEnabled bool `protobuf:"varint,2,opt,name=receive_enabled,json=receiveEnabled,proto3" json:"receive_enabled,omitempty" yaml:"receive_enabled"`
+}
+
+func (m *Params) Reset() { *m = Params{} }
+func (m *Params) String() string { return proto.CompactTextString(m) }
+func (*Params) ProtoMessage() {}
+func (*Params) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5041673e96e97901, []int{2}
+}
+func (m *Params) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Params.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Params) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Params.Merge(m, src)
+}
+func (m *Params) XXX_Size() int {
+ return m.Size()
+}
+func (m *Params) XXX_DiscardUnknown() {
+ xxx_messageInfo_Params.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Params proto.InternalMessageInfo
+
+func (m *Params) GetSendEnabled() bool {
+ if m != nil {
+ return m.SendEnabled
+ }
+ return false
+}
+
+func (m *Params) GetReceiveEnabled() bool {
+ if m != nil {
+ return m.ReceiveEnabled
+ }
+ return false
+}
+
+func init() {
+ proto.RegisterType((*FungibleTokenPacketData)(nil), "ibc.applications.transfer.v1.FungibleTokenPacketData")
+ proto.RegisterType((*DenomTrace)(nil), "ibc.applications.transfer.v1.DenomTrace")
+ proto.RegisterType((*Params)(nil), "ibc.applications.transfer.v1.Params")
+}
+
+func init() {
+ proto.RegisterFile("ibc/applications/transfer/v1/transfer.proto", fileDescriptor_5041673e96e97901)
+}
+
+var fileDescriptor_5041673e96e97901 = []byte{
+ // 362 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x41, 0x6b, 0xe2, 0x40,
+ 0x14, 0xc7, 0x8d, 0xeb, 0x8a, 0xce, 0x2e, 0xbb, 0x30, 0x2b, 0x1a, 0x64, 0x1b, 0x25, 0x27, 0xa1,
+ 0x34, 0x41, 0x7a, 0xf3, 0xd0, 0x82, 0xb5, 0x3d, 0x4b, 0xf0, 0x50, 0x7a, 0x91, 0xc9, 0xe4, 0x35,
+ 0x06, 0x93, 0x99, 0x30, 0x33, 0x4a, 0xa5, 0x9f, 0xa0, 0xb7, 0x7e, 0xac, 0x1e, 0x3d, 0xf6, 0x24,
+ 0x45, 0xbf, 0x81, 0x9f, 0xa0, 0x64, 0x12, 0x82, 0x14, 0x7a, 0x9a, 0xf7, 0x7b, 0xef, 0xff, 0xff,
+ 0xcf, 0x83, 0x87, 0xce, 0x23, 0x9f, 0xba, 0x24, 0x4d, 0xe3, 0x88, 0x12, 0x15, 0x71, 0x26, 0x5d,
+ 0x25, 0x08, 0x93, 0x8f, 0x20, 0xdc, 0xf5, 0xb0, 0xac, 0x9d, 0x54, 0x70, 0xc5, 0xf1, 0xff, 0xc8,
+ 0xa7, 0xce, 0xa9, 0xd8, 0x29, 0x05, 0xeb, 0x61, 0xb7, 0x15, 0xf2, 0x90, 0x6b, 0xa1, 0x9b, 0x55,
+ 0xb9, 0xc7, 0x7e, 0x46, 0x9d, 0xbb, 0x15, 0x0b, 0x23, 0x3f, 0x86, 0x19, 0x5f, 0x02, 0x9b, 0x12,
+ 0xba, 0x04, 0x35, 0x21, 0x8a, 0xe0, 0x16, 0xfa, 0x19, 0x00, 0xe3, 0x89, 0x69, 0xf4, 0x8d, 0x41,
+ 0xd3, 0xcb, 0x01, 0xb7, 0x51, 0x9d, 0x24, 0x7c, 0xc5, 0x94, 0x59, 0xed, 0x1b, 0x83, 0x9a, 0x57,
+ 0x50, 0xd6, 0x97, 0xc0, 0x02, 0x10, 0xe6, 0x0f, 0x2d, 0x2f, 0x08, 0x77, 0x51, 0x43, 0x00, 0x85,
+ 0x68, 0x0d, 0xc2, 0xac, 0xe9, 0x49, 0xc9, 0xf6, 0x35, 0x42, 0x93, 0x2c, 0x74, 0x26, 0x08, 0x05,
+ 0x8c, 0x51, 0x2d, 0x25, 0x6a, 0x51, 0x7c, 0xa7, 0x6b, 0x7c, 0x86, 0x90, 0x4f, 0x24, 0xcc, 0xf3,
+ 0x45, 0xaa, 0x7a, 0xd2, 0xcc, 0x3a, 0xda, 0x67, 0xbf, 0x18, 0xa8, 0x3e, 0x25, 0x82, 0x24, 0x12,
+ 0x8f, 0xd0, 0xef, 0xec, 0xc7, 0x39, 0x30, 0xe2, 0xc7, 0x10, 0xe8, 0x94, 0xc6, 0xb8, 0x73, 0xdc,
+ 0xf5, 0xfe, 0x6d, 0x48, 0x12, 0x8f, 0xec, 0xd3, 0xa9, 0xed, 0xfd, 0xca, 0xf0, 0x36, 0x27, 0x7c,
+ 0x83, 0xfe, 0x16, 0x3b, 0x95, 0xf6, 0xaa, 0xb6, 0x77, 0x8f, 0xbb, 0x5e, 0x3b, 0xb7, 0x7f, 0x11,
+ 0xd8, 0xde, 0x9f, 0xa2, 0x53, 0x84, 0x8c, 0xef, 0xdf, 0xf6, 0x96, 0xb1, 0xdd, 0x5b, 0xc6, 0xc7,
+ 0xde, 0x32, 0x5e, 0x0f, 0x56, 0x65, 0x7b, 0xb0, 0x2a, 0xef, 0x07, 0xab, 0xf2, 0x70, 0x15, 0x46,
+ 0x6a, 0xb1, 0xf2, 0x1d, 0xca, 0x13, 0x97, 0x72, 0x99, 0x70, 0x59, 0x3c, 0x17, 0x32, 0x58, 0xba,
+ 0x4f, 0xee, 0xf7, 0x37, 0x56, 0x9b, 0x14, 0xa4, 0x5f, 0xd7, 0xa7, 0xba, 0xfc, 0x0c, 0x00, 0x00,
+ 0xff, 0xff, 0x46, 0x73, 0x85, 0x0b, 0x0d, 0x02, 0x00, 0x00,
+}
+
+func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FungibleTokenPacketData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FungibleTokenPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Receiver) > 0 {
+ i -= len(m.Receiver)
+ copy(dAtA[i:], m.Receiver)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Receiver)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Sender) > 0 {
+ i -= len(m.Sender)
+ copy(dAtA[i:], m.Sender)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Sender)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Amount != 0 {
+ i = encodeVarintTransfer(dAtA, i, uint64(m.Amount))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Denom) > 0 {
+ i -= len(m.Denom)
+ copy(dAtA[i:], m.Denom)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Denom)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DenomTrace) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DenomTrace) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DenomTrace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.BaseDenom) > 0 {
+ i -= len(m.BaseDenom)
+ copy(dAtA[i:], m.BaseDenom)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.BaseDenom)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Params) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Params) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ReceiveEnabled {
+ i--
+ if m.ReceiveEnabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.SendEnabled {
+ i--
+ if m.SendEnabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTransfer(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTransfer(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *FungibleTokenPacketData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Denom)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ if m.Amount != 0 {
+ n += 1 + sovTransfer(uint64(m.Amount))
+ }
+ l = len(m.Sender)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ l = len(m.Receiver)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ return n
+}
+
+func (m *DenomTrace) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ l = len(m.BaseDenom)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ return n
+}
+
+func (m *Params) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.SendEnabled {
+ n += 2
+ }
+ if m.ReceiveEnabled {
+ n += 2
+ }
+ return n
+}
+
+func sovTransfer(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTransfer(x uint64) (n int) {
+ return sovTransfer(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *FungibleTokenPacketData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FungibleTokenPacketData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FungibleTokenPacketData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Denom = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType)
+ }
+ m.Amount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Amount |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Sender = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Receiver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTransfer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DenomTrace) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DenomTrace: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DenomTrace: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BaseDenom", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BaseDenom = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTransfer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Params) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Params: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SendEnabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SendEnabled = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEnabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReceiveEnabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTransfer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTransfer(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTransfer
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTransfer
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTransfer
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTransfer = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTransfer = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTransfer = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/applications/transfer/types/tx.pb.go b/applications/transfer/types/tx.pb.go
new file mode 100644
index 00000000..e3a630b4
--- /dev/null
+++ b/applications/transfer/types/tx.pb.go
@@ -0,0 +1,804 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibc/applications/transfer/v1/tx.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/types"
+ types1 "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
+// ICS20 enabled chains. See ICS Spec here:
+// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+type MsgTransfer struct {
+ // the port on which the packet will be sent
+ SourcePort string `protobuf:"bytes,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"`
+ // the channel by which the packet will be sent
+ SourceChannel string `protobuf:"bytes,2,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"`
+ // the tokens to be transferred
+ Token types.Coin `protobuf:"bytes,3,opt,name=token,proto3" json:"token"`
+ // the sender address
+ Sender string `protobuf:"bytes,4,opt,name=sender,proto3" json:"sender,omitempty"`
+ // the recipient address on the destination chain
+ Receiver string `protobuf:"bytes,5,opt,name=receiver,proto3" json:"receiver,omitempty"`
+ // Timeout height relative to the current block height.
+ // The timeout is disabled when set to 0.
+ TimeoutHeight types1.Height `protobuf:"bytes,6,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"`
+ // Timeout timestamp (in nanoseconds) relative to the current block timestamp.
+ // The timeout is disabled when set to 0.
+ TimeoutTimestamp uint64 `protobuf:"varint,7,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"`
+}
+
+func (m *MsgTransfer) Reset() { *m = MsgTransfer{} }
+func (m *MsgTransfer) String() string { return proto.CompactTextString(m) }
+func (*MsgTransfer) ProtoMessage() {}
+func (*MsgTransfer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7401ed9bed2f8e09, []int{0}
+}
+func (m *MsgTransfer) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTransfer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTransfer.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTransfer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTransfer.Merge(m, src)
+}
+func (m *MsgTransfer) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTransfer) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTransfer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTransfer proto.InternalMessageInfo
+
+// MsgTransferResponse defines the Msg/Transfer response type.
+type MsgTransferResponse struct {
+}
+
+func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} }
+func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgTransferResponse) ProtoMessage() {}
+func (*MsgTransferResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7401ed9bed2f8e09, []int{1}
+}
+func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTransferResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTransferResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTransferResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTransferResponse.Merge(m, src)
+}
+func (m *MsgTransferResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTransferResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTransferResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgTransfer)(nil), "ibc.applications.transfer.v1.MsgTransfer")
+ proto.RegisterType((*MsgTransferResponse)(nil), "ibc.applications.transfer.v1.MsgTransferResponse")
+}
+
+func init() {
+ proto.RegisterFile("ibc/applications/transfer/v1/tx.proto", fileDescriptor_7401ed9bed2f8e09)
+}
+
+var fileDescriptor_7401ed9bed2f8e09 = []byte{
+ // 488 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x41, 0x6f, 0xd3, 0x30,
+ 0x14, 0xc7, 0x13, 0xd6, 0x95, 0xe2, 0x6a, 0x13, 0x18, 0x36, 0x65, 0xd5, 0x48, 0xaa, 0x48, 0x48,
+ 0xe5, 0x80, 0xad, 0x0c, 0x21, 0xa4, 0x1d, 0x10, 0xca, 0x2e, 0x70, 0x98, 0x84, 0xa2, 0x1d, 0x10,
+ 0x97, 0x91, 0x78, 0x26, 0xb1, 0xd6, 0xd8, 0x91, 0xed, 0x46, 0xdb, 0x37, 0xe0, 0xc8, 0x47, 0xd8,
+ 0x99, 0x4f, 0xb2, 0xe3, 0x8e, 0x9c, 0x2a, 0xd4, 0x5e, 0x38, 0xf7, 0x13, 0xa0, 0xc4, 0x6e, 0x69,
+ 0x0f, 0x20, 0x4e, 0xf1, 0x7b, 0xff, 0xdf, 0xf3, 0x5f, 0xcf, 0xef, 0x05, 0x3c, 0x63, 0x19, 0xc1,
+ 0x69, 0x55, 0x8d, 0x19, 0x49, 0x35, 0x13, 0x5c, 0x61, 0x2d, 0x53, 0xae, 0xbe, 0x50, 0x89, 0xeb,
+ 0x08, 0xeb, 0x2b, 0x54, 0x49, 0xa1, 0x05, 0x3c, 0x64, 0x19, 0x41, 0xeb, 0x18, 0x5a, 0x62, 0xa8,
+ 0x8e, 0x06, 0x4f, 0x72, 0x91, 0x8b, 0x16, 0xc4, 0xcd, 0xc9, 0xd4, 0x0c, 0x7c, 0x22, 0x54, 0x29,
+ 0x14, 0xce, 0x52, 0x45, 0x71, 0x1d, 0x65, 0x54, 0xa7, 0x11, 0x26, 0x82, 0x71, 0xab, 0x07, 0x8d,
+ 0x35, 0x11, 0x92, 0x62, 0x32, 0x66, 0x94, 0xeb, 0xc6, 0xd0, 0x9c, 0x0c, 0x10, 0x7e, 0xdf, 0x02,
+ 0xfd, 0x53, 0x95, 0x9f, 0x59, 0x27, 0xf8, 0x1a, 0xf4, 0x95, 0x98, 0x48, 0x42, 0xcf, 0x2b, 0x21,
+ 0xb5, 0xe7, 0x0e, 0xdd, 0xd1, 0x83, 0x78, 0x7f, 0x31, 0x0d, 0xe0, 0x75, 0x5a, 0x8e, 0x8f, 0xc3,
+ 0x35, 0x31, 0x4c, 0x80, 0x89, 0x3e, 0x08, 0xa9, 0xe1, 0x5b, 0xb0, 0x6b, 0x35, 0x52, 0xa4, 0x9c,
+ 0xd3, 0xb1, 0x77, 0xaf, 0xad, 0x3d, 0x58, 0x4c, 0x83, 0xbd, 0x8d, 0x5a, 0xab, 0x87, 0xc9, 0x8e,
+ 0x49, 0x9c, 0x98, 0x18, 0xbe, 0x02, 0xdb, 0x5a, 0x5c, 0x52, 0xee, 0x6d, 0x0d, 0xdd, 0x51, 0xff,
+ 0xe8, 0x00, 0x99, 0xde, 0x50, 0xd3, 0x1b, 0xb2, 0xbd, 0xa1, 0x13, 0xc1, 0x78, 0xdc, 0xb9, 0x9d,
+ 0x06, 0x4e, 0x62, 0x68, 0xb8, 0x0f, 0xba, 0x8a, 0xf2, 0x0b, 0x2a, 0xbd, 0x4e, 0x63, 0x98, 0xd8,
+ 0x08, 0x0e, 0x40, 0x4f, 0x52, 0x42, 0x59, 0x4d, 0xa5, 0xb7, 0xdd, 0x2a, 0xab, 0x18, 0x7e, 0x06,
+ 0xbb, 0x9a, 0x95, 0x54, 0x4c, 0xf4, 0x79, 0x41, 0x59, 0x5e, 0x68, 0xaf, 0xdb, 0x7a, 0x0e, 0x50,
+ 0x33, 0x83, 0xe6, 0xbd, 0x90, 0x7d, 0xa5, 0x3a, 0x42, 0xef, 0x5a, 0x22, 0x7e, 0xda, 0x98, 0xfe,
+ 0x69, 0x66, 0xb3, 0x3e, 0x4c, 0x76, 0x6c, 0xc2, 0xd0, 0xf0, 0x3d, 0x78, 0xb4, 0x24, 0x9a, 0xaf,
+ 0xd2, 0x69, 0x59, 0x79, 0xf7, 0x87, 0xee, 0xa8, 0x13, 0x1f, 0x2e, 0xa6, 0x81, 0xb7, 0x79, 0xc9,
+ 0x0a, 0x09, 0x93, 0x87, 0x36, 0x77, 0xb6, 0x4c, 0x1d, 0xf7, 0xbe, 0xde, 0x04, 0xce, 0xaf, 0x9b,
+ 0xc0, 0x09, 0xf7, 0xc0, 0xe3, 0xb5, 0x59, 0x25, 0x54, 0x55, 0x82, 0x2b, 0x7a, 0x24, 0xc0, 0xd6,
+ 0xa9, 0xca, 0x61, 0x01, 0x7a, 0xab, 0x31, 0x3e, 0x47, 0xff, 0x5a, 0x26, 0xb4, 0x76, 0xcb, 0x20,
+ 0xfa, 0x6f, 0x74, 0x69, 0x18, 0x7f, 0xbc, 0x9d, 0xf9, 0xee, 0xdd, 0xcc, 0x77, 0x7f, 0xce, 0x7c,
+ 0xf7, 0xdb, 0xdc, 0x77, 0xee, 0xe6, 0xbe, 0xf3, 0x63, 0xee, 0x3b, 0x9f, 0xde, 0xe4, 0x4c, 0x17,
+ 0x93, 0x0c, 0x11, 0x51, 0x62, 0xbb, 0x9a, 0xe6, 0xf3, 0x42, 0x5d, 0x5c, 0xe2, 0x2b, 0xfc, 0xf7,
+ 0x3f, 0x41, 0x5f, 0x57, 0x54, 0x65, 0xdd, 0x76, 0x2b, 0x5f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff,
+ 0x26, 0x76, 0x5b, 0xfa, 0x33, 0x03, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // Transfer defines a rpc handler method for MsgTransfer.
+ Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn) MsgClient {
+ return &msgClient{cc}
+}
+
+func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) {
+ out := new(MsgTransferResponse)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Msg/Transfer", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // Transfer defines a rpc handler method for MsgTransfer.
+ Transfer(context.Context, *MsgTransfer) (*MsgTransferResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer) Transfer(ctx context.Context, req *MsgTransfer) (*MsgTransferResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgTransfer)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).Transfer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibc.applications.transfer.v1.Msg/Transfer",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibc.applications.transfer.v1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Transfer",
+ Handler: _Msg_Transfer_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibc/applications/transfer/v1/tx.proto",
+}
+
+func (m *MsgTransfer) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTransfer) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimeoutTimestamp != 0 {
+ i = encodeVarintTx(dAtA, i, uint64(m.TimeoutTimestamp))
+ i--
+ dAtA[i] = 0x38
+ }
+ {
+ size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if len(m.Receiver) > 0 {
+ i -= len(m.Receiver)
+ copy(dAtA[i:], m.Receiver)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Receiver)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Sender) > 0 {
+ i -= len(m.Sender)
+ copy(dAtA[i:], m.Sender)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Sender)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.Token.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.SourceChannel) > 0 {
+ i -= len(m.SourceChannel)
+ copy(dAtA[i:], m.SourceChannel)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.SourceChannel)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.SourcePort) > 0 {
+ i -= len(m.SourcePort)
+ copy(dAtA[i:], m.SourcePort)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.SourcePort)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgTransferResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTransferResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTransferResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MsgTransfer) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SourcePort)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.SourceChannel)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Token.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Sender)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Receiver)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.TimeoutHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ if m.TimeoutTimestamp != 0 {
+ n += 1 + sovTx(uint64(m.TimeoutTimestamp))
+ }
+ return n
+}
+
+func (m *MsgTransferResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MsgTransfer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTransfer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTransfer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourcePort = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourceChannel = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Sender = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Receiver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType)
+ }
+ m.TimeoutTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TimeoutTimestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgTransferResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTransferResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTransferResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/apps/transfer/types/genesis.pb.go b/apps/transfer/types/genesis.pb.go
new file mode 100644
index 00000000..b19173d8
--- /dev/null
+++ b/apps/transfer/types/genesis.pb.go
@@ -0,0 +1,443 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/apps/transfer/v1/genesis.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibc-transfer genesis state
+type GenesisState struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ DenomTraces Traces `protobuf:"bytes,2,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces" yaml:"denom_traces"`
+ Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_19e19f3d07c11479, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *GenesisState) GetDenomTraces() Traces {
+ if m != nil {
+ return m.DenomTraces
+ }
+ return nil
+}
+
+func (m *GenesisState) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "ibcgo.apps.transfer.v1.GenesisState")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/apps/transfer/v1/genesis.proto", fileDescriptor_19e19f3d07c11479)
+}
+
+var fileDescriptor_19e19f3d07c11479 = []byte{
+ // 305 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xc9, 0x4c, 0x4a, 0x4e,
+ 0xcf, 0xd7, 0x4f, 0x2c, 0x28, 0x28, 0xd6, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2,
+ 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
+ 0x17, 0x12, 0x03, 0xab, 0xd2, 0x03, 0xa9, 0xd2, 0x83, 0xa9, 0xd2, 0x2b, 0x33, 0x94, 0x52, 0xc5,
+ 0xa1, 0x1b, 0xae, 0x06, 0xac, 0x5d, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1,
+ 0x20, 0xa2, 0x4a, 0xcf, 0x19, 0xb9, 0x78, 0xdc, 0x21, 0xd6, 0x04, 0x97, 0x24, 0x96, 0xa4, 0x0a,
+ 0x69, 0x73, 0xb1, 0x17, 0xe4, 0x17, 0x95, 0xc4, 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70,
+ 0x3a, 0x09, 0x7d, 0xba, 0x27, 0xcf, 0x57, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0x95, 0x50, 0x0a,
+ 0x62, 0x03, 0xb1, 0x3c, 0x53, 0x84, 0x72, 0xb8, 0x78, 0x52, 0x52, 0xf3, 0xf2, 0x73, 0xe3, 0x4b,
+ 0x8a, 0x12, 0x93, 0x53, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0x94, 0xf4, 0xb0, 0xbb,
+ 0x54, 0xcf, 0x05, 0xa4, 0x36, 0x04, 0xa4, 0xd4, 0x49, 0xf5, 0xc4, 0x3d, 0x79, 0x86, 0x4f, 0xf7,
+ 0xe4, 0x85, 0x21, 0x26, 0x23, 0x9b, 0xa2, 0xb4, 0xea, 0xbe, 0x3c, 0x1b, 0x58, 0x55, 0x71, 0x10,
+ 0x77, 0x0a, 0x5c, 0x4b, 0xb1, 0x90, 0x0d, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04,
+ 0xb3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x1c, 0x2e, 0x7b, 0x02, 0xc0, 0xaa, 0x9c, 0x58, 0x40, 0x76,
+ 0x04, 0x41, 0xf5, 0x38, 0xb9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
+ 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94,
+ 0x4e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e,
+ 0x7e, 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x2e, 0x46, 0x98, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1,
+ 0x81, 0x03, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb9, 0x62, 0x19, 0xb5, 0x01, 0x00,
+ 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.DenomTraces) > 0 {
+ for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ if len(m.DenomTraces) > 0 {
+ for _, e := range m.DenomTraces {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ l = m.Params.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DenomTraces = append(m.DenomTraces, DenomTrace{})
+ if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/apps/transfer/types/query.pb.go b/apps/transfer/types/query.pb.go
new file mode 100644
index 00000000..3e365af1
--- /dev/null
+++ b/apps/transfer/types/query.pb.go
@@ -0,0 +1,1418 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/apps/transfer/v1/query.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ query "github.com/cosmos/cosmos-sdk/types/query"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
+// method
+type QueryDenomTraceRequest struct {
+ // hash (in hex format) of the denomination trace information.
+ Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{} }
+func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTraceRequest) ProtoMessage() {}
+func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_956e6703e65895ef, []int{0}
+}
+func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTraceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTraceRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTraceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTraceRequest.Merge(m, src)
+}
+func (m *QueryDenomTraceRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTraceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTraceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTraceRequest proto.InternalMessageInfo
+
+func (m *QueryDenomTraceRequest) GetHash() string {
+ if m != nil {
+ return m.Hash
+ }
+ return ""
+}
+
+// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
+// method.
+type QueryDenomTraceResponse struct {
+ // denom_trace returns the requested denomination trace information.
+ DenomTrace *DenomTrace `protobuf:"bytes,1,opt,name=denom_trace,json=denomTrace,proto3" json:"denom_trace,omitempty"`
+}
+
+func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse{} }
+func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTraceResponse) ProtoMessage() {}
+func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_956e6703e65895ef, []int{1}
+}
+func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTraceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTraceResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTraceResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTraceResponse.Merge(m, src)
+}
+func (m *QueryDenomTraceResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTraceResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTraceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTraceResponse proto.InternalMessageInfo
+
+func (m *QueryDenomTraceResponse) GetDenomTrace() *DenomTrace {
+ if m != nil {
+ return m.DenomTrace
+ }
+ return nil
+}
+
+// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
+// method
+type QueryDenomTracesRequest struct {
+ // pagination defines an optional pagination for the request.
+ Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest{} }
+func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTracesRequest) ProtoMessage() {}
+func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_956e6703e65895ef, []int{2}
+}
+func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTracesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTracesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTracesRequest.Merge(m, src)
+}
+func (m *QueryDenomTracesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTracesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTracesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTracesRequest proto.InternalMessageInfo
+
+func (m *QueryDenomTracesRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
+// method.
+type QueryDenomTracesResponse struct {
+ // denom_traces returns all denominations trace information.
+ DenomTraces Traces `protobuf:"bytes,1,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces"`
+ // pagination defines the pagination in the response.
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesResponse{} }
+func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryDenomTracesResponse) ProtoMessage() {}
+func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_956e6703e65895ef, []int{3}
+}
+func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryDenomTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryDenomTracesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryDenomTracesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryDenomTracesResponse.Merge(m, src)
+}
+func (m *QueryDenomTracesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryDenomTracesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryDenomTracesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryDenomTracesResponse proto.InternalMessageInfo
+
+func (m *QueryDenomTracesResponse) GetDenomTraces() Traces {
+ if m != nil {
+ return m.DenomTraces
+ }
+ return nil
+}
+
+func (m *QueryDenomTracesResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryParamsRequest is the request type for the Query/Params RPC method.
+type QueryParamsRequest struct {
+}
+
+func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} }
+func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryParamsRequest) ProtoMessage() {}
+func (*QueryParamsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_956e6703e65895ef, []int{4}
+}
+func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryParamsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryParamsRequest.Merge(m, src)
+}
+func (m *QueryParamsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryParamsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo
+
+// QueryParamsResponse is the response type for the Query/Params RPC method.
+type QueryParamsResponse struct {
+ // params defines the parameters of the module.
+ Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"`
+}
+
+func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} }
+func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryParamsResponse) ProtoMessage() {}
+func (*QueryParamsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_956e6703e65895ef, []int{5}
+}
+func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryParamsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryParamsResponse.Merge(m, src)
+}
+func (m *QueryParamsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryParamsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo
+
+func (m *QueryParamsResponse) GetParams() *Params {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibcgo.apps.transfer.v1.QueryDenomTraceRequest")
+ proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibcgo.apps.transfer.v1.QueryDenomTraceResponse")
+ proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibcgo.apps.transfer.v1.QueryDenomTracesRequest")
+ proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibcgo.apps.transfer.v1.QueryDenomTracesResponse")
+ proto.RegisterType((*QueryParamsRequest)(nil), "ibcgo.apps.transfer.v1.QueryParamsRequest")
+ proto.RegisterType((*QueryParamsResponse)(nil), "ibcgo.apps.transfer.v1.QueryParamsResponse")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/apps/transfer/v1/query.proto", fileDescriptor_956e6703e65895ef)
+}
+
+var fileDescriptor_956e6703e65895ef = []byte{
+ // 519 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0x13, 0x3d,
+ 0x14, 0x8d, 0xdb, 0xef, 0x8b, 0xc4, 0x0d, 0x62, 0x61, 0xaa, 0x12, 0x8d, 0xaa, 0x69, 0x65, 0x95,
+ 0xbf, 0xb4, 0xd8, 0x4c, 0x91, 0x78, 0x80, 0x82, 0xca, 0x0a, 0xa9, 0x44, 0xb0, 0x61, 0x01, 0xf2,
+ 0x4c, 0xcd, 0x64, 0x24, 0x32, 0x9e, 0x8e, 0x9d, 0x48, 0x15, 0x62, 0xc3, 0x86, 0x2d, 0x12, 0x5b,
+ 0x16, 0xac, 0x79, 0x04, 0x9e, 0xa0, 0xcb, 0x4a, 0xdd, 0xb0, 0x02, 0x94, 0xf0, 0x20, 0x68, 0x6c,
+ 0x4f, 0x33, 0xd1, 0x24, 0x74, 0x76, 0xd6, 0xf5, 0xb9, 0xe7, 0x9e, 0x73, 0xae, 0x65, 0x20, 0x49,
+ 0x18, 0xc5, 0x92, 0xf1, 0x2c, 0x53, 0x4c, 0xe7, 0x3c, 0x55, 0x6f, 0x44, 0xce, 0xc6, 0x01, 0x3b,
+ 0x1e, 0x89, 0xfc, 0x84, 0x66, 0xb9, 0xd4, 0x12, 0xaf, 0x1b, 0x0c, 0x2d, 0x30, 0xb4, 0xc4, 0xd0,
+ 0x71, 0xe0, 0xad, 0xc5, 0x32, 0x96, 0x06, 0xc2, 0x8a, 0x93, 0x45, 0x7b, 0xbd, 0x48, 0xaa, 0xa1,
+ 0x54, 0x2c, 0xe4, 0x4a, 0x58, 0x1a, 0x36, 0x0e, 0x42, 0xa1, 0x79, 0xc0, 0x32, 0x1e, 0x27, 0x29,
+ 0xd7, 0x89, 0x4c, 0x1d, 0xf6, 0xe6, 0x92, 0xe9, 0x17, 0x53, 0x2c, 0x6c, 0x23, 0x96, 0x32, 0x7e,
+ 0x2b, 0x18, 0xcf, 0x12, 0xc6, 0xd3, 0x54, 0x6a, 0xc3, 0xa1, 0xec, 0x2d, 0xd9, 0x85, 0xf5, 0x67,
+ 0xc5, 0x98, 0xc7, 0x22, 0x95, 0xc3, 0xe7, 0x39, 0x8f, 0x44, 0x5f, 0x1c, 0x8f, 0x84, 0xd2, 0x18,
+ 0xc3, 0x7f, 0x03, 0xae, 0x06, 0x5d, 0xb4, 0x85, 0xee, 0x5c, 0xe9, 0x9b, 0x33, 0x79, 0x05, 0x37,
+ 0x6a, 0x68, 0x95, 0xc9, 0x54, 0x09, 0xfc, 0x08, 0x3a, 0x47, 0x45, 0xf5, 0xb5, 0x2e, 0xca, 0xa6,
+ 0xab, 0xb3, 0x47, 0xe8, 0x62, 0xf7, 0xb4, 0x42, 0x00, 0x47, 0x17, 0x67, 0xc2, 0x6b, 0xfc, 0xaa,
+ 0x94, 0x73, 0x00, 0x30, 0x4b, 0xc0, 0xd1, 0xdf, 0xa2, 0x36, 0x2e, 0x5a, 0xc4, 0x45, 0x6d, 0xea,
+ 0x2e, 0x2e, 0x7a, 0xc8, 0xe3, 0xd2, 0x4a, 0xbf, 0xd2, 0x49, 0xbe, 0x23, 0xe8, 0xd6, 0x67, 0x38,
+ 0x13, 0x2f, 0xe0, 0x6a, 0xc5, 0x84, 0xea, 0xa2, 0xad, 0xd5, 0x66, 0x2e, 0xf6, 0xaf, 0x9d, 0xfe,
+ 0xdc, 0x6c, 0x7d, 0xfb, 0xb5, 0xd9, 0x76, 0x8c, 0x9d, 0x99, 0x2b, 0x85, 0x9f, 0xcc, 0x69, 0x5f,
+ 0x31, 0xda, 0x6f, 0x5f, 0xaa, 0xdd, 0x6a, 0x9a, 0x13, 0xbf, 0x06, 0xd8, 0x68, 0x3f, 0xe4, 0x39,
+ 0x1f, 0x96, 0xd1, 0x90, 0xa7, 0x70, 0x7d, 0xae, 0xea, 0xcc, 0x3c, 0x84, 0x76, 0x66, 0x2a, 0x2e,
+ 0x2d, 0x7f, 0x99, 0x0d, 0xd7, 0xe7, 0xd0, 0x7b, 0xe7, 0xab, 0xf0, 0xbf, 0xe1, 0xc3, 0x5f, 0x11,
+ 0xc0, 0xcc, 0x23, 0xa6, 0xcb, 0x08, 0x16, 0xbf, 0x20, 0x8f, 0x35, 0xc6, 0x5b, 0xc5, 0x24, 0xf8,
+ 0x70, 0xfe, 0xe7, 0xf3, 0xca, 0x0e, 0xbe, 0xcb, 0x92, 0x30, 0xaa, 0x3f, 0xec, 0xea, 0x6a, 0xd8,
+ 0xbb, 0xe2, 0x41, 0xbe, 0xc7, 0x5f, 0x10, 0x74, 0x2a, 0x9b, 0xc4, 0x4d, 0x67, 0x96, 0xe1, 0x79,
+ 0xf7, 0x9b, 0x37, 0x38, 0x95, 0x3d, 0xa3, 0x72, 0x1b, 0x93, 0xcb, 0x55, 0xe2, 0x8f, 0x08, 0xda,
+ 0x36, 0x5e, 0xdc, 0xfb, 0xe7, 0xa0, 0xb9, 0x8d, 0x7a, 0x3b, 0x8d, 0xb0, 0x4e, 0xcf, 0xb6, 0xd1,
+ 0xe3, 0xe3, 0x8d, 0xc5, 0x7a, 0xec, 0x56, 0xf7, 0x0f, 0x4e, 0x27, 0x3e, 0x3a, 0x9b, 0xf8, 0xe8,
+ 0xf7, 0xc4, 0x47, 0x9f, 0xa6, 0x7e, 0xeb, 0x6c, 0xea, 0xb7, 0x7e, 0x4c, 0xfd, 0xd6, 0xcb, 0xdd,
+ 0x38, 0xd1, 0x83, 0x51, 0x48, 0x23, 0x39, 0x64, 0xee, 0xfb, 0x49, 0xc2, 0xe8, 0x5e, 0xed, 0x6b,
+ 0xd1, 0x27, 0x99, 0x50, 0x61, 0xdb, 0xfc, 0x1b, 0x0f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xef,
+ 0xe0, 0xd8, 0x44, 0xfc, 0x04, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// QueryClient is the client API for Query service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryClient interface {
+ // DenomTrace queries a denomination trace information.
+ DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error)
+ // DenomTraces queries all denomination traces.
+ DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error)
+ // Params queries all parameters of the ibc-transfer module.
+ Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
+}
+
+type queryClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewQueryClient(cc grpc1.ClientConn) QueryClient {
+ return &queryClient{cc}
+}
+
+func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) {
+ out := new(QueryDenomTraceResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/DenomTrace", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) {
+ out := new(QueryDenomTracesResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/DenomTraces", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) {
+ out := new(QueryParamsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/Params", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServer is the server API for Query service.
+type QueryServer interface {
+ // DenomTrace queries a denomination trace information.
+ DenomTrace(context.Context, *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error)
+ // DenomTraces queries all denomination traces.
+ DenomTraces(context.Context, *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error)
+ // Params queries all parameters of the ibc-transfer module.
+ Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error)
+}
+
+// UnimplementedQueryServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServer struct {
+}
+
+func (*UnimplementedQueryServer) DenomTrace(ctx context.Context, req *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DenomTrace not implemented")
+}
+func (*UnimplementedQueryServer) DenomTraces(ctx context.Context, req *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DenomTraces not implemented")
+}
+func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
+}
+
+func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
+ s.RegisterService(&_Query_serviceDesc, srv)
+}
+
+func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryDenomTraceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).DenomTrace(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.apps.transfer.v1.Query/DenomTrace",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryDenomTracesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).DenomTraces(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.apps.transfer.v1.Query/DenomTraces",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryParamsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Params(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.apps.transfer.v1.Query/Params",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Query_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.apps.transfer.v1.Query",
+ HandlerType: (*QueryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "DenomTrace",
+ Handler: _Query_DenomTrace_Handler,
+ },
+ {
+ MethodName: "DenomTraces",
+ Handler: _Query_DenomTraces_Handler,
+ },
+ {
+ MethodName: "Params",
+ Handler: _Query_Params_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/apps/transfer/v1/query.proto",
+}
+
+func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTraceRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hash) > 0 {
+ i -= len(m.Hash)
+ copy(dAtA[i:], m.Hash)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryDenomTraceResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTraceResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTraceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.DenomTrace != nil {
+ {
+ size, err := m.DenomTrace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryDenomTracesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTracesRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryDenomTracesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryDenomTracesResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryDenomTracesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.DenomTraces) > 0 {
+ for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Params != nil {
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
+ offset -= sovQuery(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *QueryDenomTraceRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Hash)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryDenomTraceResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DenomTrace != nil {
+ l = m.DenomTrace.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryDenomTracesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryDenomTracesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.DenomTraces) > 0 {
+ for _, e := range m.DenomTraces {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryParamsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *QueryParamsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Params != nil {
+ l = m.Params.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func sovQuery(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozQuery(x uint64) (n int) {
+ return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *QueryDenomTraceRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTraceRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTraceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hash = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryDenomTraceResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTraceResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTraceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DenomTrace", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DenomTrace == nil {
+ m.DenomTrace = &DenomTrace{}
+ }
+ if err := m.DenomTrace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryDenomTracesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTracesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTracesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryDenomTracesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryDenomTracesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryDenomTracesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DenomTraces = append(m.DenomTraces, DenomTrace{})
+ if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Params == nil {
+ m.Params = &Params{}
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipQuery(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupQuery
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/apps/transfer/types/query.pb.gw.go b/apps/transfer/types/query.pb.gw.go
new file mode 100644
index 00000000..4333649f
--- /dev/null
+++ b/apps/transfer/types/query.pb.gw.go
@@ -0,0 +1,326 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: ibcgo/apps/transfer/v1/query.proto
+
+/*
+Package types is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package types
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+
+func request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTraceRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["hash"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash")
+ }
+
+ protoReq.Hash, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err)
+ }
+
+ msg, err := client.DenomTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTraceRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["hash"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash")
+ }
+
+ protoReq.Hash, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err)
+ }
+
+ msg, err := server.DenomTrace(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_DenomTraces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
+)
+
+func request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTracesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.DenomTraces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryDenomTracesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.DenomTraces(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := server.Params(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
+// UnaryRPC :call QueryServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
+func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
+
+ mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_DenomTrace_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_DenomTraces_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterQueryHandler(ctx, mux, conn)
+}
+
+// RegisterQueryHandler registers the http handlers for service Query to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
+}
+
+// RegisterQueryHandlerClient registers the http handlers for service Query
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "QueryClient" to call the correct interceptors.
+func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
+
+ mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_DenomTrace_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_DenomTraces_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Query_DenomTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "apps", "transfer", "v1", "denom_traces", "hash"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_DenomTraces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "transfer", "v1", "denom_traces"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "transfer", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true)))
+)
+
+var (
+ forward_Query_DenomTrace_0 = runtime.ForwardResponseMessage
+
+ forward_Query_DenomTraces_0 = runtime.ForwardResponseMessage
+
+ forward_Query_Params_0 = runtime.ForwardResponseMessage
+)
diff --git a/apps/transfer/types/transfer.pb.go b/apps/transfer/types/transfer.pb.go
new file mode 100644
index 00000000..7b405f30
--- /dev/null
+++ b/apps/transfer/types/transfer.pb.go
@@ -0,0 +1,908 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/apps/transfer/v1/transfer.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// FungibleTokenPacketData defines a struct for the packet payload
+// See FungibleTokenPacketData spec:
+// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+type FungibleTokenPacketData struct {
+ // the token denomination to be transferred
+ Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"`
+ // the token amount to be transferred
+ Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
+ // the sender address
+ Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"`
+ // the recipient address on the destination chain
+ Receiver string `protobuf:"bytes,4,opt,name=receiver,proto3" json:"receiver,omitempty"`
+}
+
+func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData{} }
+func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) }
+func (*FungibleTokenPacketData) ProtoMessage() {}
+func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0cd9e010e90bbec6, []int{0}
+}
+func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *FungibleTokenPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_FungibleTokenPacketData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *FungibleTokenPacketData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FungibleTokenPacketData.Merge(m, src)
+}
+func (m *FungibleTokenPacketData) XXX_Size() int {
+ return m.Size()
+}
+func (m *FungibleTokenPacketData) XXX_DiscardUnknown() {
+ xxx_messageInfo_FungibleTokenPacketData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FungibleTokenPacketData proto.InternalMessageInfo
+
+func (m *FungibleTokenPacketData) GetDenom() string {
+ if m != nil {
+ return m.Denom
+ }
+ return ""
+}
+
+func (m *FungibleTokenPacketData) GetAmount() uint64 {
+ if m != nil {
+ return m.Amount
+ }
+ return 0
+}
+
+func (m *FungibleTokenPacketData) GetSender() string {
+ if m != nil {
+ return m.Sender
+ }
+ return ""
+}
+
+func (m *FungibleTokenPacketData) GetReceiver() string {
+ if m != nil {
+ return m.Receiver
+ }
+ return ""
+}
+
+// DenomTrace contains the base denomination for ICS20 fungible tokens and the
+// source tracing information path.
+type DenomTrace struct {
+ // path defines the chain of port/channel identifiers used for tracing the
+ // source of the fungible token.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // base denomination of the relayed fungible token.
+ BaseDenom string `protobuf:"bytes,2,opt,name=base_denom,json=baseDenom,proto3" json:"base_denom,omitempty"`
+}
+
+func (m *DenomTrace) Reset() { *m = DenomTrace{} }
+func (m *DenomTrace) String() string { return proto.CompactTextString(m) }
+func (*DenomTrace) ProtoMessage() {}
+func (*DenomTrace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0cd9e010e90bbec6, []int{1}
+}
+func (m *DenomTrace) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DenomTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DenomTrace.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DenomTrace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DenomTrace.Merge(m, src)
+}
+func (m *DenomTrace) XXX_Size() int {
+ return m.Size()
+}
+func (m *DenomTrace) XXX_DiscardUnknown() {
+ xxx_messageInfo_DenomTrace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DenomTrace proto.InternalMessageInfo
+
+func (m *DenomTrace) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *DenomTrace) GetBaseDenom() string {
+ if m != nil {
+ return m.BaseDenom
+ }
+ return ""
+}
+
+// Params defines the set of IBC transfer parameters.
+// NOTE: To prevent a single token from being transferred, set the
+// TransfersEnabled parameter to true and then set the bank module's SendEnabled
+// parameter for the denomination to false.
+type Params struct {
+ // send_enabled enables or disables all cross-chain token transfers from this
+ // chain.
+ SendEnabled bool `protobuf:"varint,1,opt,name=send_enabled,json=sendEnabled,proto3" json:"send_enabled,omitempty" yaml:"send_enabled"`
+ // receive_enabled enables or disables all cross-chain token transfers to this
+ // chain.
+ ReceiveEnabled bool `protobuf:"varint,2,opt,name=receive_enabled,json=receiveEnabled,proto3" json:"receive_enabled,omitempty" yaml:"receive_enabled"`
+}
+
+func (m *Params) Reset() { *m = Params{} }
+func (m *Params) String() string { return proto.CompactTextString(m) }
+func (*Params) ProtoMessage() {}
+func (*Params) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0cd9e010e90bbec6, []int{2}
+}
+func (m *Params) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Params.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Params) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Params.Merge(m, src)
+}
+func (m *Params) XXX_Size() int {
+ return m.Size()
+}
+func (m *Params) XXX_DiscardUnknown() {
+ xxx_messageInfo_Params.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Params proto.InternalMessageInfo
+
+func (m *Params) GetSendEnabled() bool {
+ if m != nil {
+ return m.SendEnabled
+ }
+ return false
+}
+
+func (m *Params) GetReceiveEnabled() bool {
+ if m != nil {
+ return m.ReceiveEnabled
+ }
+ return false
+}
+
+func init() {
+ proto.RegisterType((*FungibleTokenPacketData)(nil), "ibcgo.apps.transfer.v1.FungibleTokenPacketData")
+ proto.RegisterType((*DenomTrace)(nil), "ibcgo.apps.transfer.v1.DenomTrace")
+ proto.RegisterType((*Params)(nil), "ibcgo.apps.transfer.v1.Params")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/apps/transfer/v1/transfer.proto", fileDescriptor_0cd9e010e90bbec6)
+}
+
+var fileDescriptor_0cd9e010e90bbec6 = []byte{
+ // 349 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
+ 0x10, 0xc6, 0x8d, 0x7f, 0xff, 0xa2, 0xdb, 0xd2, 0xc2, 0x56, 0x34, 0x08, 0x8d, 0x12, 0x28, 0x78,
+ 0x68, 0x13, 0xa4, 0x37, 0x2f, 0x05, 0x6b, 0x3d, 0x4b, 0xf0, 0xd4, 0x8b, 0x6c, 0xd6, 0x69, 0x0c,
+ 0x9a, 0xdd, 0xb0, 0xbb, 0x0a, 0xd2, 0x27, 0xe8, 0xad, 0x8f, 0xd5, 0xa3, 0xc7, 0x9e, 0xa4, 0xe8,
+ 0x1b, 0xf8, 0x04, 0x65, 0x37, 0x21, 0x94, 0xf6, 0x36, 0xdf, 0x7c, 0xbf, 0x6f, 0x66, 0x60, 0xd0,
+ 0x4d, 0x1c, 0xd2, 0x88, 0xfb, 0x24, 0x4d, 0xa5, 0xaf, 0x04, 0x61, 0xf2, 0x05, 0x84, 0xbf, 0xe9,
+ 0x17, 0xb5, 0x97, 0x0a, 0xae, 0x38, 0x6e, 0x1a, 0xcc, 0xd3, 0x98, 0x57, 0x58, 0x9b, 0x7e, 0xbb,
+ 0x11, 0xf1, 0x88, 0x1b, 0xc4, 0xd7, 0x55, 0x46, 0xbb, 0xaf, 0xa8, 0x35, 0x5e, 0xb3, 0x28, 0x0e,
+ 0x57, 0x30, 0xe5, 0x4b, 0x60, 0x13, 0x42, 0x97, 0xa0, 0x46, 0x44, 0x11, 0xdc, 0x40, 0xff, 0xe7,
+ 0xc0, 0x78, 0x62, 0x5b, 0x5d, 0xab, 0x57, 0x0f, 0x32, 0x81, 0x9b, 0xa8, 0x4a, 0x12, 0xbe, 0x66,
+ 0xca, 0x2e, 0x77, 0xad, 0x5e, 0x25, 0xc8, 0x95, 0xee, 0x4b, 0x60, 0x73, 0x10, 0xf6, 0x3f, 0x83,
+ 0xe7, 0x0a, 0xb7, 0x51, 0x4d, 0x00, 0x85, 0x78, 0x03, 0xc2, 0xae, 0x18, 0xa7, 0xd0, 0xee, 0x03,
+ 0x42, 0x23, 0x3d, 0x74, 0x2a, 0x08, 0x05, 0x8c, 0x51, 0x25, 0x25, 0x6a, 0x91, 0xaf, 0x33, 0x35,
+ 0xbe, 0x46, 0x28, 0x24, 0x12, 0x66, 0xd9, 0x21, 0x65, 0xe3, 0xd4, 0x75, 0xc7, 0xe4, 0xdc, 0x37,
+ 0x0b, 0x55, 0x27, 0x44, 0x90, 0x44, 0xe2, 0x01, 0x3a, 0xd7, 0x1b, 0x67, 0xc0, 0x48, 0xb8, 0x82,
+ 0xb9, 0x99, 0x52, 0x1b, 0xb6, 0x4e, 0xfb, 0xce, 0xd5, 0x96, 0x24, 0xab, 0x81, 0xfb, 0xd3, 0x75,
+ 0x83, 0x33, 0x2d, 0x9f, 0x32, 0x85, 0x1f, 0xd1, 0x65, 0x7e, 0x53, 0x11, 0x2f, 0x9b, 0x78, 0xfb,
+ 0xb4, 0xef, 0x34, 0xb3, 0xf8, 0x2f, 0xc0, 0x0d, 0x2e, 0xf2, 0x4e, 0x3e, 0x64, 0x38, 0xfe, 0x38,
+ 0x38, 0xd6, 0xee, 0xe0, 0x58, 0x5f, 0x07, 0xc7, 0x7a, 0x3f, 0x3a, 0xa5, 0xdd, 0xd1, 0x29, 0x7d,
+ 0x1e, 0x9d, 0xd2, 0xf3, 0x6d, 0x14, 0xab, 0xc5, 0x3a, 0xf4, 0x28, 0x4f, 0x7c, 0xca, 0x65, 0xc2,
+ 0xa5, 0x1f, 0x87, 0xf4, 0xee, 0xcf, 0x2f, 0xd5, 0x36, 0x05, 0x19, 0x56, 0xcd, 0x63, 0xee, 0xbf,
+ 0x03, 0x00, 0x00, 0xff, 0xff, 0x41, 0x6a, 0xce, 0x58, 0xef, 0x01, 0x00, 0x00,
+}
+
+func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FungibleTokenPacketData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FungibleTokenPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Receiver) > 0 {
+ i -= len(m.Receiver)
+ copy(dAtA[i:], m.Receiver)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Receiver)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Sender) > 0 {
+ i -= len(m.Sender)
+ copy(dAtA[i:], m.Sender)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Sender)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Amount != 0 {
+ i = encodeVarintTransfer(dAtA, i, uint64(m.Amount))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Denom) > 0 {
+ i -= len(m.Denom)
+ copy(dAtA[i:], m.Denom)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Denom)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DenomTrace) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DenomTrace) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DenomTrace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.BaseDenom) > 0 {
+ i -= len(m.BaseDenom)
+ copy(dAtA[i:], m.BaseDenom)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.BaseDenom)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintTransfer(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Params) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Params) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ReceiveEnabled {
+ i--
+ if m.ReceiveEnabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.SendEnabled {
+ i--
+ if m.SendEnabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTransfer(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTransfer(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *FungibleTokenPacketData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Denom)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ if m.Amount != 0 {
+ n += 1 + sovTransfer(uint64(m.Amount))
+ }
+ l = len(m.Sender)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ l = len(m.Receiver)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ return n
+}
+
+func (m *DenomTrace) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ l = len(m.BaseDenom)
+ if l > 0 {
+ n += 1 + l + sovTransfer(uint64(l))
+ }
+ return n
+}
+
+func (m *Params) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.SendEnabled {
+ n += 2
+ }
+ if m.ReceiveEnabled {
+ n += 2
+ }
+ return n
+}
+
+func sovTransfer(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTransfer(x uint64) (n int) {
+ return sovTransfer(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *FungibleTokenPacketData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: FungibleTokenPacketData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: FungibleTokenPacketData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Denom = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType)
+ }
+ m.Amount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Amount |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Sender = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Receiver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTransfer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DenomTrace) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DenomTrace: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DenomTrace: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BaseDenom", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.BaseDenom = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTransfer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Params) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Params: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SendEnabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SendEnabled = bool(v != 0)
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEnabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ReceiveEnabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTransfer(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTransfer
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTransfer(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTransfer
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTransfer
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTransfer
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTransfer
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTransfer = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTransfer = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTransfer = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/apps/transfer/types/tx.pb.go b/apps/transfer/types/tx.pb.go
new file mode 100644
index 00000000..0d5b29f4
--- /dev/null
+++ b/apps/transfer/types/tx.pb.go
@@ -0,0 +1,801 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/apps/transfer/v1/tx.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/types"
+ types1 "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
+// ICS20 enabled chains. See ICS Spec here:
+// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+type MsgTransfer struct {
+ // the port on which the packet will be sent
+ SourcePort string `protobuf:"bytes,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"`
+ // the channel by which the packet will be sent
+ SourceChannel string `protobuf:"bytes,2,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"`
+ // the tokens to be transferred
+ Token types.Coin `protobuf:"bytes,3,opt,name=token,proto3" json:"token"`
+ // the sender address
+ Sender string `protobuf:"bytes,4,opt,name=sender,proto3" json:"sender,omitempty"`
+ // the recipient address on the destination chain
+ Receiver string `protobuf:"bytes,5,opt,name=receiver,proto3" json:"receiver,omitempty"`
+ // Timeout height relative to the current block height.
+ // The timeout is disabled when set to 0.
+ TimeoutHeight types1.Height `protobuf:"bytes,6,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"`
+ // Timeout timestamp (in nanoseconds) relative to the current block timestamp.
+ // The timeout is disabled when set to 0.
+ TimeoutTimestamp uint64 `protobuf:"varint,7,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"`
+}
+
+func (m *MsgTransfer) Reset() { *m = MsgTransfer{} }
+func (m *MsgTransfer) String() string { return proto.CompactTextString(m) }
+func (*MsgTransfer) ProtoMessage() {}
+func (*MsgTransfer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4ca3945bed527d36, []int{0}
+}
+func (m *MsgTransfer) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTransfer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTransfer.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTransfer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTransfer.Merge(m, src)
+}
+func (m *MsgTransfer) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTransfer) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTransfer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTransfer proto.InternalMessageInfo
+
+// MsgTransferResponse defines the Msg/Transfer response type.
+type MsgTransferResponse struct {
+}
+
+func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} }
+func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgTransferResponse) ProtoMessage() {}
+func (*MsgTransferResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4ca3945bed527d36, []int{1}
+}
+func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTransferResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTransferResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTransferResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTransferResponse.Merge(m, src)
+}
+func (m *MsgTransferResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTransferResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTransferResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgTransfer)(nil), "ibcgo.apps.transfer.v1.MsgTransfer")
+ proto.RegisterType((*MsgTransferResponse)(nil), "ibcgo.apps.transfer.v1.MsgTransferResponse")
+}
+
+func init() { proto.RegisterFile("ibcgo/apps/transfer/v1/tx.proto", fileDescriptor_4ca3945bed527d36) }
+
+var fileDescriptor_4ca3945bed527d36 = []byte{
+ // 478 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+ 0x14, 0xc7, 0x13, 0xdb, 0xad, 0x75, 0xca, 0x2e, 0x3a, 0xba, 0x25, 0x5b, 0xd6, 0xa4, 0xc6, 0x4b,
+ 0x41, 0x9d, 0x21, 0x2b, 0x22, 0xec, 0x49, 0xb2, 0x20, 0x7a, 0x58, 0x90, 0xb0, 0x27, 0x11, 0x96,
+ 0x64, 0x7c, 0xa6, 0xc1, 0x26, 0x13, 0x66, 0xa6, 0xc1, 0xfd, 0x06, 0x1e, 0xfd, 0x08, 0xfb, 0x01,
+ 0xfc, 0x20, 0x7b, 0xec, 0xd1, 0x53, 0x91, 0xf6, 0xe2, 0xb9, 0x9f, 0x40, 0x92, 0x99, 0xd6, 0x16,
+ 0x3d, 0x78, 0x9a, 0x79, 0xef, 0xff, 0x7b, 0xf3, 0xe7, 0xbd, 0x79, 0xc8, 0xcb, 0x12, 0x96, 0x72,
+ 0x1a, 0x97, 0xa5, 0xa4, 0x4a, 0xc4, 0x85, 0xfc, 0x04, 0x82, 0x56, 0x01, 0x55, 0x5f, 0x48, 0x29,
+ 0xb8, 0xe2, 0xb8, 0xdf, 0x00, 0xa4, 0x06, 0xc8, 0x1a, 0x20, 0x55, 0x30, 0x78, 0x90, 0xf2, 0x94,
+ 0x37, 0x08, 0xad, 0x6f, 0x9a, 0x1e, 0xb8, 0x8c, 0xcb, 0x9c, 0x4b, 0x9a, 0xc4, 0x12, 0x68, 0x15,
+ 0x24, 0xa0, 0xe2, 0x80, 0x32, 0x9e, 0x15, 0x46, 0x7f, 0xa4, 0xed, 0x18, 0x17, 0x40, 0xd9, 0x24,
+ 0x83, 0x42, 0xd5, 0x66, 0xfa, 0xa6, 0x11, 0xff, 0x7b, 0x0b, 0xf5, 0xce, 0x65, 0x7a, 0x61, 0xbc,
+ 0xf0, 0x4b, 0xd4, 0x93, 0x7c, 0x2a, 0x18, 0x5c, 0x96, 0x5c, 0x28, 0xc7, 0x1e, 0xda, 0xa3, 0x3b,
+ 0x61, 0x7f, 0x35, 0xf7, 0xf0, 0x55, 0x9c, 0x4f, 0x4e, 0xfd, 0x2d, 0xd1, 0x8f, 0x90, 0x8e, 0xde,
+ 0x71, 0xa1, 0xf0, 0x2b, 0x74, 0x60, 0x34, 0x36, 0x8e, 0x8b, 0x02, 0x26, 0xce, 0xad, 0xa6, 0xf6,
+ 0x68, 0x35, 0xf7, 0x0e, 0x77, 0x6a, 0x8d, 0xee, 0x47, 0xfb, 0x3a, 0x71, 0xa6, 0x63, 0xfc, 0x02,
+ 0xed, 0x29, 0xfe, 0x19, 0x0a, 0xa7, 0x35, 0xb4, 0x47, 0xbd, 0x93, 0x23, 0xa2, 0xbb, 0x23, 0x75,
+ 0x77, 0xc4, 0x74, 0x47, 0xce, 0x78, 0x56, 0x84, 0xed, 0x9b, 0xb9, 0x67, 0x45, 0x9a, 0xc6, 0x7d,
+ 0xd4, 0x91, 0x50, 0x7c, 0x04, 0xe1, 0xb4, 0x6b, 0xc3, 0xc8, 0x44, 0x78, 0x80, 0xba, 0x02, 0x18,
+ 0x64, 0x15, 0x08, 0x67, 0xaf, 0x51, 0x36, 0x31, 0x4e, 0xd0, 0x81, 0xca, 0x72, 0xe0, 0x53, 0x75,
+ 0x39, 0x86, 0x2c, 0x1d, 0x2b, 0xa7, 0xd3, 0x78, 0x1e, 0x13, 0x3d, 0xff, 0x7a, 0x62, 0xc4, 0xcc,
+ 0xa9, 0x0a, 0xc8, 0x9b, 0x86, 0x09, 0x1f, 0xd6, 0xb6, 0x7f, 0xda, 0xd9, 0x7d, 0xc1, 0x8f, 0xf6,
+ 0x4d, 0x42, 0xd3, 0xf8, 0x2d, 0xba, 0xb7, 0x26, 0xea, 0x53, 0xaa, 0x38, 0x2f, 0x9d, 0xdb, 0x43,
+ 0x7b, 0xd4, 0x0e, 0x8f, 0x57, 0x73, 0xcf, 0xd9, 0x7d, 0x64, 0x83, 0xf8, 0xd1, 0x5d, 0x93, 0xbb,
+ 0x58, 0xa7, 0x4e, 0xbb, 0x5f, 0xaf, 0x3d, 0xeb, 0xd7, 0xb5, 0x67, 0xf9, 0x87, 0xe8, 0xfe, 0xd6,
+ 0x6f, 0x45, 0x20, 0x4b, 0x5e, 0x48, 0x38, 0x61, 0xa8, 0x75, 0x2e, 0x53, 0xfc, 0x01, 0x75, 0x37,
+ 0x1f, 0xf9, 0x98, 0xfc, 0x7b, 0x95, 0xc8, 0x56, 0xfd, 0xe0, 0xc9, 0x7f, 0x40, 0x6b, 0x93, 0xf0,
+ 0xf5, 0xcd, 0xc2, 0xb5, 0x67, 0x0b, 0xd7, 0xfe, 0xb9, 0x70, 0xed, 0x6f, 0x4b, 0xd7, 0x9a, 0x2d,
+ 0x5d, 0xeb, 0xc7, 0xd2, 0xb5, 0xde, 0x3f, 0x4d, 0x33, 0x35, 0x9e, 0x26, 0x84, 0xf1, 0x9c, 0x9a,
+ 0x95, 0xcc, 0x12, 0xf6, 0xec, 0xaf, 0x4d, 0x57, 0x57, 0x25, 0xc8, 0xa4, 0xd3, 0x6c, 0xde, 0xf3,
+ 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x48, 0x06, 0x61, 0x0d, 0x03, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // Transfer defines a rpc handler method for MsgTransfer.
+ Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn) MsgClient {
+ return &msgClient{cc}
+}
+
+func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) {
+ out := new(MsgTransferResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Msg/Transfer", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // Transfer defines a rpc handler method for MsgTransfer.
+ Transfer(context.Context, *MsgTransfer) (*MsgTransferResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer) Transfer(ctx context.Context, req *MsgTransfer) (*MsgTransferResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgTransfer)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).Transfer(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.apps.transfer.v1.Msg/Transfer",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.apps.transfer.v1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Transfer",
+ Handler: _Msg_Transfer_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/apps/transfer/v1/tx.proto",
+}
+
+func (m *MsgTransfer) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTransfer) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimeoutTimestamp != 0 {
+ i = encodeVarintTx(dAtA, i, uint64(m.TimeoutTimestamp))
+ i--
+ dAtA[i] = 0x38
+ }
+ {
+ size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if len(m.Receiver) > 0 {
+ i -= len(m.Receiver)
+ copy(dAtA[i:], m.Receiver)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Receiver)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Sender) > 0 {
+ i -= len(m.Sender)
+ copy(dAtA[i:], m.Sender)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Sender)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.Token.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.SourceChannel) > 0 {
+ i -= len(m.SourceChannel)
+ copy(dAtA[i:], m.SourceChannel)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.SourceChannel)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.SourcePort) > 0 {
+ i -= len(m.SourcePort)
+ copy(dAtA[i:], m.SourcePort)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.SourcePort)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgTransferResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTransferResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTransferResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MsgTransfer) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SourcePort)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.SourceChannel)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Token.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Sender)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Receiver)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.TimeoutHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ if m.TimeoutTimestamp != 0 {
+ n += 1 + sovTx(uint64(m.TimeoutTimestamp))
+ }
+ return n
+}
+
+func (m *MsgTransferResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MsgTransfer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTransfer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTransfer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourcePort = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourceChannel = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Sender = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Receiver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType)
+ }
+ m.TimeoutTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TimeoutTimestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgTransferResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTransferResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTransferResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/buf.yaml b/buf.yaml
new file mode 100644
index 00000000..37f716ca
--- /dev/null
+++ b/buf.yaml
@@ -0,0 +1,34 @@
+version: v1beta1
+
+build:
+ roots:
+ - proto
+ - third_party/proto
+ excludes:
+ - third_party/proto/google/protobuf
+lint:
+ use:
+ - DEFAULT
+ - COMMENTS
+ - FILE_LOWER_SNAKE_CASE
+ except:
+ - UNARY_RPC
+ - COMMENT_FIELD
+ - SERVICE_SUFFIX
+ - PACKAGE_VERSION_SUFFIX
+ - RPC_REQUEST_STANDARD_NAME
+ ignore:
+ - tendermint
+ - gogoproto
+ - cosmos_proto
+ - google
+ - confio
+breaking:
+ use:
+ - FILE
+ ignore:
+ - tendermint
+ - gogoproto
+ - cosmos_proto
+ - google
+ - confio
diff --git a/core/02-client/abci.go b/core/02-client/abci.go
new file mode 100644
index 00000000..3c56d90a
--- /dev/null
+++ b/core/02-client/abci.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// BeginBlocker updates an existing localhost client with the latest block height.
+func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
+ _, found := k.GetClientState(ctx, exported.Localhost)
+ if !found {
+ return
+ }
+
+ // update the localhost client with the latest block height
+ if err := k.UpdateClient(ctx, exported.Localhost, nil); err != nil {
+ panic(err)
+ }
+}
diff --git a/core/02-client/abci_test.go b/core/02-client/abci_test.go
new file mode 100644
index 00000000..3a296618
--- /dev/null
+++ b/core/02-client/abci_test.go
@@ -0,0 +1,60 @@
+package client_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type ClientTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+func (suite *ClientTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+
+ // set localhost client
+ revision := types.ParseChainID(suite.chainA.GetContext().ChainID())
+ localHostClient := localhosttypes.NewClientState(
+ suite.chainA.GetContext().ChainID(), types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())),
+ )
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
+}
+
+func TestClientTestSuite(t *testing.T) {
+ suite.Run(t, new(ClientTestSuite))
+}
+
+func (suite *ClientTestSuite) TestBeginBlocker() {
+ prevHeight := types.GetSelfHeight(suite.chainA.GetContext())
+
+ localHostClient := suite.chainA.GetClientState(exported.Localhost)
+ suite.Require().Equal(prevHeight, localHostClient.GetLatestHeight())
+
+ for i := 0; i < 10; i++ {
+ // increment height
+ suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
+
+ suite.Require().NotPanics(func() {
+ client.BeginBlocker(suite.chainA.GetContext(), suite.chainA.App.IBCKeeper.ClientKeeper)
+ }, "BeginBlocker shouldn't panic")
+
+ localHostClient = suite.chainA.GetClientState(exported.Localhost)
+ suite.Require().Equal(prevHeight.Increment(), localHostClient.GetLatestHeight())
+ prevHeight = localHostClient.GetLatestHeight().(types.Height)
+ }
+}
diff --git a/core/02-client/client/cli/cli.go b/core/02-client/client/cli/cli.go
new file mode 100644
index 00000000..33c99152
--- /dev/null
+++ b/core/02-client/client/cli/cli.go
@@ -0,0 +1,51 @@
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// GetQueryCmd returns the query commands for IBC clients
+func GetQueryCmd() *cobra.Command {
+ queryCmd := &cobra.Command{
+ Use: types.SubModuleName,
+ Short: "IBC client query subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ queryCmd.AddCommand(
+ GetCmdQueryClientStates(),
+ GetCmdQueryClientState(),
+ GetCmdQueryConsensusStates(),
+ GetCmdQueryConsensusState(),
+ GetCmdQueryHeader(),
+ GetCmdNodeConsensusState(),
+ GetCmdParams(),
+ )
+
+ return queryCmd
+}
+
+// NewTxCmd returns the command to create and handle IBC clients
+func NewTxCmd() *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: types.SubModuleName,
+ Short: "IBC client transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ txCmd.AddCommand(
+ NewCreateClientCmd(),
+ NewUpdateClientCmd(),
+ NewSubmitMisbehaviourCmd(),
+ NewUpgradeClientCmd(),
+ )
+
+ return txCmd
+}
diff --git a/core/02-client/client/cli/query.go b/core/02-client/client/cli/query.go
new file mode 100644
index 00000000..c1b5e51a
--- /dev/null
+++ b/core/02-client/client/cli/query.go
@@ -0,0 +1,260 @@
+package cli
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ flagLatestHeight = "latest-height"
+)
+
+// GetCmdQueryClientStates defines the command to query all the light clients
+// that this chain mantains.
+func GetCmdQueryClientStates() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "states",
+ Short: "Query all available light clients",
+ Long: "Query all available light clients",
+ Example: fmt.Sprintf("%s query %s %s states", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryClientStatesRequest{
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.ClientStates(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "client states")
+
+ return cmd
+}
+
+// GetCmdQueryClientState defines the command to query the state of a client with
+// a given id as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#query
+func GetCmdQueryClientState() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "state [client-id]",
+ Short: "Query a client state",
+ Long: "Query stored client state",
+ Example: fmt.Sprintf("%s query %s %s state [client-id]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ clientID := args[0]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ clientStateRes, err := utils.QueryClientState(clientCtx, clientID, prove)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(clientStateRes)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryConsensusStates defines the command to query all the consensus states from a given
+// client state.
+func GetCmdQueryConsensusStates() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "consensus-states [client-id]",
+ Short: "Query all the consensus states of a client.",
+ Long: "Query all the consensus states from a given client state.",
+ Example: fmt.Sprintf("%s query %s %s consensus-states [client-id]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ clientID := args[0]
+
+ queryClient := types.NewQueryClient(clientCtx)
+
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryConsensusStatesRequest{
+ ClientId: clientID,
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.ConsensusStates(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "consensus states")
+
+ return cmd
+}
+
+// GetCmdQueryConsensusState defines the command to query the consensus state of
+// the chain as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#query
+func GetCmdQueryConsensusState() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "consensus-state [client-id] [height]",
+ Short: "Query the consensus state of a client at a given height",
+ Long: `Query the consensus state for a particular light client at a given height.
+If the '--latest' flag is included, the query returns the latest consensus state, overriding the height argument.`,
+ Example: fmt.Sprintf("%s query %s %s consensus-state [client-id] [height]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.RangeArgs(1, 2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ clientID := args[0]
+ queryLatestHeight, _ := cmd.Flags().GetBool(flagLatestHeight)
+ var height types.Height
+
+ if !queryLatestHeight {
+ if len(args) != 2 {
+ return errors.New("must include a second 'height' argument when '--latest-height' flag is not provided")
+ }
+
+ height, err = types.ParseHeight(args[1])
+ if err != nil {
+ return err
+ }
+ }
+
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ csRes, err := utils.QueryConsensusState(clientCtx, clientID, height, prove, queryLatestHeight)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(csRes)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ cmd.Flags().Bool(flagLatestHeight, false, "return latest stored consensus state")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryHeader defines the command to query the latest header on the chain
+func GetCmdQueryHeader() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "header",
+ Short: "Query the latest header of the running chain",
+ Long: "Query the latest Tendermint header of the running chain",
+ Example: fmt.Sprintf("%s query %s %s header", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ header, _, err := utils.QueryTendermintHeader(clientCtx)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(&header)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdNodeConsensusState defines the command to query the latest consensus state of a node
+// The result is feed to client creation
+func GetCmdNodeConsensusState() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "node-state",
+ Short: "Query a node consensus state",
+ Long: "Query a node consensus state. This result is feed to the client creation transaction.",
+ Example: fmt.Sprintf("%s query %s %s node-state", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ state, _, err := utils.QueryNodeConsensusState(clientCtx)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(state)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdParams returns the command handler for ibc client parameter querying.
+func GetCmdParams() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "params",
+ Short: "Query the current ibc client parameters",
+ Long: "Query the current ibc client parameters",
+ Args: cobra.NoArgs,
+ Example: fmt.Sprintf("%s query %s %s params", version.AppName, host.ModuleName, types.SubModuleName),
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ res, _ := queryClient.ClientParams(cmd.Context(), &types.QueryClientParamsRequest{})
+ return clientCtx.PrintProto(res.Params)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/core/02-client/client/cli/tx.go b/core/02-client/client/cli/tx.go
new file mode 100644
index 00000000..bdaa53a8
--- /dev/null
+++ b/core/02-client/client/cli/tx.go
@@ -0,0 +1,328 @@
+package cli
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ "github.com/cosmos/cosmos-sdk/version"
+ govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// NewCreateClientCmd defines the command to create a new IBC light client.
+func NewCreateClientCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "create [path/to/client_state.json] [path/to/consensus_state.json]",
+ Short: "create new IBC client",
+ Long: `create a new IBC client with the specified client state and consensus state
+ - ClientState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}
+ - ConsensusState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
+ Example: fmt.Sprintf("%s tx ibc %s create [path/to/client_state.json] [path/to/consensus_state.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ // attempt to unmarshal client state argument
+ var clientState exported.ClientState
+ clientContentOrFileName := args[0]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(clientContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for client state were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, &clientState); err != nil {
+ return errors.Wrap(err, "error unmarshalling client state file")
+ }
+ }
+
+ // attempt to unmarshal consensus state argument
+ var consensusState exported.ConsensusState
+ consensusContentOrFileName := args[1]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(consensusContentOrFileName), &consensusState); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(consensusContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for consensus state were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, &consensusState); err != nil {
+ return errors.Wrap(err, "error unmarshalling consensus state file")
+ }
+ }
+
+ msg, err := types.NewMsgCreateClient(clientState, consensusState, clientCtx.GetFromAddress())
+ if err != nil {
+ return err
+ }
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.CreateClient(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewUpdateClientCmd defines the command to update an IBC client.
+func NewUpdateClientCmd() *cobra.Command {
+ return &cobra.Command{
+ Use: "update [client-id] [path/to/header.json]",
+ Short: "update existing client with a header",
+ Long: "update existing client with a header",
+ Example: fmt.Sprintf("%s tx ibc %s update [client-id] [path/to/header.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ clientID := args[0]
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ var header exported.Header
+ headerContentOrFileName := args[1]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(headerContentOrFileName), &header); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(headerContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for header were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, &header); err != nil {
+ return errors.Wrap(err, "error unmarshalling header file")
+ }
+ }
+
+ msg, err := types.NewMsgUpdateClient(clientID, header, clientCtx.GetFromAddress())
+ if err != nil {
+ return err
+ }
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.UpdateClient(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+}
+
+// NewSubmitMisbehaviourCmd defines the command to submit a misbehaviour to prevent
+// future updates.
+func NewSubmitMisbehaviourCmd() *cobra.Command {
+ return &cobra.Command{
+ Use: "misbehaviour [path/to/misbehaviour.json]",
+ Short: "submit a client misbehaviour",
+ Long: "submit a client misbehaviour to prevent future updates",
+ Example: fmt.Sprintf("%s tx ibc %s misbehaviour [path/to/misbehaviour.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ var misbehaviour exported.Misbehaviour
+ misbehaviourContentOrFileName := args[0]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(misbehaviourContentOrFileName), &misbehaviour); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(misbehaviourContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for misbehaviour were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, misbehaviour); err != nil {
+ return errors.Wrap(err, "error unmarshalling misbehaviour file")
+ }
+ }
+
+ msg, err := types.NewMsgSubmitMisbehaviour(misbehaviour.GetClientID(), misbehaviour, clientCtx.GetFromAddress())
+ if err != nil {
+ return err
+ }
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.SubmitMisbehaviour(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+}
+
+// NewUpgradeClientCmd defines the command to upgrade an IBC light client.
+func NewUpgradeClientCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "upgrade [client-identifier] [path/to/client_state.json] [path/to/consensus_state.json] [upgrade-client-proof] [upgrade-consensus-state-proof]",
+ Short: "upgrade an IBC client",
+ Long: `upgrade the IBC client associated with the provided client identifier while providing proof committed by the counterparty chain to the new client and consensus states
+ - ClientState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}
+ - ConsensusState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
+ Example: fmt.Sprintf("%s tx ibc %s upgrade [client-identifier] [path/to/client_state.json] [path/to/consensus_state.json] [client-state-proof] [consensus-state-proof] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName),
+ Args: cobra.ExactArgs(5),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+ clientID := args[0]
+
+ // attempt to unmarshal client state argument
+ var clientState exported.ClientState
+ clientContentOrFileName := args[1]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(clientContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for client state were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, &clientState); err != nil {
+ return errors.Wrap(err, "error unmarshalling client state file")
+ }
+ }
+
+ // attempt to unmarshal consensus state argument
+ var consensusState exported.ConsensusState
+ consensusContentOrFileName := args[2]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(consensusContentOrFileName), &consensusState); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(consensusContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for consensus state were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, &consensusState); err != nil {
+ return errors.Wrap(err, "error unmarshalling consensus state file")
+ }
+ }
+
+ proofUpgradeClient := []byte(args[3])
+ proofUpgradeConsensus := []byte(args[4])
+
+ msg, err := types.NewMsgUpgradeClient(clientID, clientState, consensusState, proofUpgradeClient, proofUpgradeConsensus, clientCtx.GetFromAddress())
+ if err != nil {
+ return err
+ }
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.UpgradeClient(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewCmdSubmitUpdateClientProposal implements a command handler for submitting an update IBC client proposal transaction.
+func NewCmdSubmitUpdateClientProposal() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "update-client [subject-client-id] [substitute-client-id] [initial-height] [flags]",
+ Args: cobra.ExactArgs(3),
+ Short: "Submit an update IBC client proposal",
+ Long: "Submit an update IBC client proposal along with an initial deposit.\n" +
+ "Please specify a subject client identifier you want to update..\n" +
+ "Please specify the substitute client the subject client will use and the initial height to reference the substitute client's state.",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+
+ title, err := cmd.Flags().GetString(govcli.FlagTitle)
+ if err != nil {
+ return err
+ }
+
+ description, err := cmd.Flags().GetString(govcli.FlagDescription)
+ if err != nil {
+ return err
+ }
+
+ subjectClientID := args[0]
+ substituteClientID := args[1]
+
+ initialHeight, err := types.ParseHeight(args[2])
+ if err != nil {
+ return err
+ }
+
+ content := types.NewClientUpdateProposal(title, description, subjectClientID, substituteClientID, initialHeight)
+
+ from := clientCtx.GetFromAddress()
+
+ depositStr, err := cmd.Flags().GetString(govcli.FlagDeposit)
+ if err != nil {
+ return err
+ }
+ deposit, err := sdk.ParseCoinsNormalized(depositStr)
+ if err != nil {
+ return err
+ }
+
+ msg, err := govtypes.NewMsgSubmitProposal(content, deposit, from)
+ if err != nil {
+ return err
+ }
+
+ if err = msg.ValidateBasic(); err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
+ },
+ }
+
+ cmd.Flags().String(govcli.FlagTitle, "", "title of proposal")
+ cmd.Flags().String(govcli.FlagDescription, "", "description of proposal")
+ cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal")
+
+ return cmd
+}
diff --git a/core/02-client/client/proposal_handler.go b/core/02-client/client/proposal_handler.go
new file mode 100644
index 00000000..63585cbe
--- /dev/null
+++ b/core/02-client/client/proposal_handler.go
@@ -0,0 +1,8 @@
+package client
+
+import (
+ govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/cli"
+)
+
+var ProposalHandler = govclient.NewProposalHandler(cli.NewCmdSubmitUpdateClientProposal, nil)
diff --git a/core/02-client/client/utils/utils.go b/core/02-client/client/utils/utils.go
new file mode 100644
index 00000000..1a7bc003
--- /dev/null
+++ b/core/02-client/client/utils/utils.go
@@ -0,0 +1,199 @@
+package utils
+
+import (
+ "context"
+
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/client"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+// QueryClientState returns a client state. If prove is true, it performs an ABCI store query
+// in order to retrieve the merkle proof. Otherwise, it uses the gRPC query client.
+func QueryClientState(
+ clientCtx client.Context, clientID string, prove bool,
+) (*types.QueryClientStateResponse, error) {
+ if prove {
+ return QueryClientStateABCI(clientCtx, clientID)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryClientStateRequest{
+ ClientId: clientID,
+ }
+
+ return queryClient.ClientState(context.Background(), req)
+}
+
+// QueryClientStateABCI queries the store to get the light client state and a merkle proof.
+func QueryClientStateABCI(
+ clientCtx client.Context, clientID string,
+) (*types.QueryClientStateResponse, error) {
+ key := host.FullClientStateKey(clientID)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if client exists
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrap(types.ErrClientNotFound, clientID)
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ clientState, err := types.UnmarshalClientState(cdc, value)
+ if err != nil {
+ return nil, err
+ }
+
+ anyClientState, err := types.PackClientState(clientState)
+ if err != nil {
+ return nil, err
+ }
+
+ clientStateRes := types.NewQueryClientStateResponse(anyClientState, proofBz, proofHeight)
+ return clientStateRes, nil
+}
+
+// QueryConsensusState returns a consensus state. If prove is true, it performs an ABCI store
+// query in order to retrieve the merkle proof. Otherwise, it uses the gRPC query client.
+func QueryConsensusState(
+ clientCtx client.Context, clientID string, height exported.Height, prove, latestHeight bool,
+) (*types.QueryConsensusStateResponse, error) {
+ if prove {
+ return QueryConsensusStateABCI(clientCtx, clientID, height)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryConsensusStateRequest{
+ ClientId: clientID,
+ RevisionNumber: height.GetRevisionNumber(),
+ RevisionHeight: height.GetRevisionHeight(),
+ LatestHeight: latestHeight,
+ }
+
+ return queryClient.ConsensusState(context.Background(), req)
+}
+
+// QueryConsensusStateABCI queries the store to get the consensus state of a light client and a
+// merkle proof of its existence or non-existence.
+func QueryConsensusStateABCI(
+ clientCtx client.Context, clientID string, height exported.Height,
+) (*types.QueryConsensusStateResponse, error) {
+ key := host.FullConsensusStateKey(clientID, height)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if consensus state exists
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrap(types.ErrConsensusStateNotFound, clientID)
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ cs, err := types.UnmarshalConsensusState(cdc, value)
+ if err != nil {
+ return nil, err
+ }
+
+ anyConsensusState, err := types.PackConsensusState(cs)
+ if err != nil {
+ return nil, err
+ }
+
+ return types.NewQueryConsensusStateResponse(anyConsensusState, proofBz, proofHeight), nil
+}
+
+// QueryTendermintHeader takes a client context and returns the appropriate
+// tendermint header
+func QueryTendermintHeader(clientCtx client.Context) (ibctmtypes.Header, int64, error) {
+ node, err := clientCtx.GetNode()
+ if err != nil {
+ return ibctmtypes.Header{}, 0, err
+ }
+
+ info, err := node.ABCIInfo(context.Background())
+ if err != nil {
+ return ibctmtypes.Header{}, 0, err
+ }
+
+ height := info.Response.LastBlockHeight
+
+ commit, err := node.Commit(context.Background(), &height)
+ if err != nil {
+ return ibctmtypes.Header{}, 0, err
+ }
+
+ page := 0
+ count := 10_000
+
+ validators, err := node.Validators(context.Background(), &height, &page, &count)
+ if err != nil {
+ return ibctmtypes.Header{}, 0, err
+ }
+
+ protoCommit := commit.SignedHeader.ToProto()
+ protoValset, err := tmtypes.NewValidatorSet(validators.Validators).ToProto()
+ if err != nil {
+ return ibctmtypes.Header{}, 0, err
+ }
+
+ header := ibctmtypes.Header{
+ SignedHeader: protoCommit,
+ ValidatorSet: protoValset,
+ }
+
+ return header, height, nil
+}
+
+// QueryNodeConsensusState takes a client context and returns the appropriate
+// tendermint consensus state
+func QueryNodeConsensusState(clientCtx client.Context) (*ibctmtypes.ConsensusState, int64, error) {
+ node, err := clientCtx.GetNode()
+ if err != nil {
+ return &ibctmtypes.ConsensusState{}, 0, err
+ }
+
+ info, err := node.ABCIInfo(context.Background())
+ if err != nil {
+ return &ibctmtypes.ConsensusState{}, 0, err
+ }
+
+ height := info.Response.LastBlockHeight
+
+ commit, err := node.Commit(context.Background(), &height)
+ if err != nil {
+ return &ibctmtypes.ConsensusState{}, 0, err
+ }
+
+ page := 1
+ count := 10_000
+
+ nextHeight := height + 1
+ nextVals, err := node.Validators(context.Background(), &nextHeight, &page, &count)
+ if err != nil {
+ return &ibctmtypes.ConsensusState{}, 0, err
+ }
+
+ state := &ibctmtypes.ConsensusState{
+ Timestamp: commit.Time,
+ Root: commitmenttypes.NewMerkleRoot(commit.AppHash),
+ NextValidatorsHash: tmtypes.NewValidatorSet(nextVals.Validators).Hash(),
+ }
+
+ return state, height, nil
+}
diff --git a/core/02-client/doc.go b/core/02-client/doc.go
new file mode 100644
index 00000000..cfe3c76c
--- /dev/null
+++ b/core/02-client/doc.go
@@ -0,0 +1,10 @@
+/*
+Package client implements the ICS 02 - Client Semantics specification
+https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics. This
+concrete implementations defines types and method to store and update light
+clients which tracks on other chain's state.
+
+The main type is `Client`, which provides `commitment.Root` to verify state proofs and `ConsensusState` to
+verify header proofs.
+*/
+package client
diff --git a/core/02-client/genesis.go b/core/02-client/genesis.go
new file mode 100644
index 00000000..26635f07
--- /dev/null
+++ b/core/02-client/genesis.go
@@ -0,0 +1,69 @@
+package client
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// InitGenesis initializes the ibc client submodule's state from a provided genesis
+// state.
+func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) {
+ k.SetParams(ctx, gs.Params)
+
+ // Set all client metadata first. This will allow client keeper to overwrite client and consensus state keys
+ // if clients accidentally write to ClientKeeper reserved keys.
+ if len(gs.ClientsMetadata) != 0 {
+ k.SetAllClientMetadata(ctx, gs.ClientsMetadata)
+ }
+
+ for _, client := range gs.Clients {
+ cs, ok := client.ClientState.GetCachedValue().(exported.ClientState)
+ if !ok {
+ panic("invalid client state")
+ }
+
+ if !gs.Params.IsAllowedClient(cs.ClientType()) {
+ panic(fmt.Sprintf("client state type %s is not registered on the allowlist", cs.ClientType()))
+ }
+
+ k.SetClientState(ctx, client.ClientId, cs)
+ }
+
+ for _, cs := range gs.ClientsConsensus {
+ for _, consState := range cs.ConsensusStates {
+ consensusState, ok := consState.ConsensusState.GetCachedValue().(exported.ConsensusState)
+ if !ok {
+ panic(fmt.Sprintf("invalid consensus state with client ID %s at height %s", cs.ClientId, consState.Height))
+ }
+
+ k.SetClientConsensusState(ctx, cs.ClientId, consState.Height, consensusState)
+ }
+ }
+
+ k.SetNextClientSequence(ctx, gs.NextClientSequence)
+
+ // NOTE: localhost creation is specifically disallowed for the time being.
+ // Issue: https://github.com/cosmos/cosmos-sdk/issues/7871
+}
+
+// ExportGenesis returns the ibc client submodule's exported genesis.
+// NOTE: CreateLocalhost should always be false on export since a
+// created localhost will be included in the exported clients.
+func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
+ genClients := k.GetAllGenesisClients(ctx)
+ clientsMetadata, err := k.GetAllClientMetadata(ctx, genClients)
+ if err != nil {
+ panic(err)
+ }
+ return types.GenesisState{
+ Clients: genClients,
+ ClientsMetadata: clientsMetadata,
+ ClientsConsensus: k.GetAllConsensusStates(ctx),
+ Params: k.GetParams(ctx),
+ CreateLocalhost: false,
+ }
+}
diff --git a/core/02-client/keeper/client.go b/core/02-client/keeper/client.go
new file mode 100644
index 00000000..672dcf5d
--- /dev/null
+++ b/core/02-client/keeper/client.go
@@ -0,0 +1,192 @@
+package keeper
+
+import (
+ "github.com/armon/go-metrics"
+
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CreateClient creates a new client state and populates it with a given consensus
+// state as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#create
+func (k Keeper) CreateClient(
+ ctx sdk.Context, clientState exported.ClientState, consensusState exported.ConsensusState,
+) (string, error) {
+ params := k.GetParams(ctx)
+ if !params.IsAllowedClient(clientState.ClientType()) {
+ return "", sdkerrors.Wrapf(
+ types.ErrInvalidClientType,
+ "client state type %s is not registered in the allowlist", clientState.ClientType(),
+ )
+ }
+
+ clientID := k.GenerateClientIdentifier(ctx, clientState.ClientType())
+
+ k.SetClientState(ctx, clientID, clientState)
+ k.Logger(ctx).Info("client created at height", "client-id", clientID, "height", clientState.GetLatestHeight().String())
+
+ // verifies initial consensus state against client state and initializes client store with any client-specific metadata
+ // e.g. set ProcessedTime in Tendermint clients
+ if err := clientState.Initialize(ctx, k.cdc, k.ClientStore(ctx, clientID), consensusState); err != nil {
+ return "", err
+ }
+
+ // check if consensus state is nil in case the created client is Localhost
+ if consensusState != nil {
+ k.SetClientConsensusState(ctx, clientID, clientState.GetLatestHeight(), consensusState)
+ }
+
+ k.Logger(ctx).Info("client created at height", "client-id", clientID, "height", clientState.GetLatestHeight().String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "create"},
+ 1,
+ []metrics.Label{telemetry.NewLabel("client-type", clientState.ClientType())},
+ )
+ }()
+
+ return clientID, nil
+}
+
+// UpdateClient updates the consensus state and the state root from a provided header.
+func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.Header) error {
+ clientState, found := k.GetClientState(ctx, clientID)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot update client with ID %s", clientID)
+ }
+
+ // prevent update if the client is frozen before or at header height
+ if clientState.IsFrozen() && clientState.GetFrozenHeight().LTE(header.GetHeight()) {
+ return sdkerrors.Wrapf(types.ErrClientFrozen, "cannot update client with ID %s", clientID)
+ }
+
+ clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, clientID), header)
+ if err != nil {
+ return sdkerrors.Wrapf(err, "cannot update client with ID %s", clientID)
+ }
+
+ k.SetClientState(ctx, clientID, clientState)
+
+ var consensusHeight exported.Height
+
+ // we don't set consensus state for localhost client
+ if header != nil && clientID != exported.Localhost {
+ k.SetClientConsensusState(ctx, clientID, header.GetHeight(), consensusState)
+ consensusHeight = header.GetHeight()
+ } else {
+ consensusHeight = types.GetSelfHeight(ctx)
+ }
+
+ k.Logger(ctx).Info("client state updated", "client-id", clientID, "height", consensusHeight.String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "update"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("client-type", clientState.ClientType()),
+ telemetry.NewLabel("client-id", clientID),
+ telemetry.NewLabel("update-type", "msg"),
+ },
+ )
+ }()
+
+ // emitting events in the keeper emits for both begin block and handler client updates
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypeUpdateClient,
+ sdk.NewAttribute(types.AttributeKeyClientID, clientID),
+ sdk.NewAttribute(types.AttributeKeyClientType, clientState.ClientType()),
+ sdk.NewAttribute(types.AttributeKeyConsensusHeight, consensusHeight.String()),
+ ),
+ )
+
+ return nil
+}
+
+// UpgradeClient upgrades the client to a new client state if this new client was committed to
+// by the old client at the specified upgrade height
+func (k Keeper) UpgradeClient(ctx sdk.Context, clientID string, upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState,
+ proofUpgradeClient, proofUpgradeConsState []byte) error {
+ clientState, found := k.GetClientState(ctx, clientID)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot update client with ID %s", clientID)
+ }
+
+ // prevent upgrade if current client is frozen
+ if clientState.IsFrozen() {
+ return sdkerrors.Wrapf(types.ErrClientFrozen, "cannot update client with ID %s", clientID)
+ }
+
+ updatedClientState, updatedConsState, err := clientState.VerifyUpgradeAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, clientID),
+ upgradedClient, upgradedConsState, proofUpgradeClient, proofUpgradeConsState)
+ if err != nil {
+ return sdkerrors.Wrapf(err, "cannot upgrade client with ID %s", clientID)
+ }
+
+ k.SetClientState(ctx, clientID, updatedClientState)
+ k.SetClientConsensusState(ctx, clientID, updatedClientState.GetLatestHeight(), updatedConsState)
+
+ k.Logger(ctx).Info("client state upgraded", "client-id", clientID, "height", updatedClientState.GetLatestHeight().String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "upgrade"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("client-type", updatedClientState.ClientType()),
+ telemetry.NewLabel("client-id", clientID),
+ },
+ )
+ }()
+
+ // emitting events in the keeper emits for client upgrades
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypeUpgradeClient,
+ sdk.NewAttribute(types.AttributeKeyClientID, clientID),
+ sdk.NewAttribute(types.AttributeKeyClientType, updatedClientState.ClientType()),
+ sdk.NewAttribute(types.AttributeKeyConsensusHeight, updatedClientState.GetLatestHeight().String()),
+ ),
+ )
+
+ return nil
+}
+
+// CheckMisbehaviourAndUpdateState checks for client misbehaviour and freezes the
+// client if so.
+func (k Keeper) CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour exported.Misbehaviour) error {
+ clientState, found := k.GetClientState(ctx, misbehaviour.GetClientID())
+ if !found {
+ return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot check misbehaviour for client with ID %s", misbehaviour.GetClientID())
+ }
+
+ if clientState.IsFrozen() && clientState.GetFrozenHeight().LTE(misbehaviour.GetHeight()) {
+ return sdkerrors.Wrapf(types.ErrInvalidMisbehaviour, "client is already frozen at height ≤ misbehaviour height (%s ≤ %s)", clientState.GetFrozenHeight(), misbehaviour.GetHeight())
+ }
+
+ clientState, err := clientState.CheckMisbehaviourAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, misbehaviour.GetClientID()), misbehaviour)
+ if err != nil {
+ return err
+ }
+
+ k.SetClientState(ctx, misbehaviour.GetClientID(), clientState)
+ k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", misbehaviour.GetClientID(), "height", misbehaviour.GetHeight().String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "misbehaviour"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("client-type", misbehaviour.ClientType()),
+ telemetry.NewLabel("client-id", misbehaviour.GetClientID()),
+ },
+ )
+ }()
+
+ return nil
+}
diff --git a/core/02-client/keeper/client_test.go b/core/02-client/keeper/client_test.go
new file mode 100644
index 00000000..0cf5c1fe
--- /dev/null
+++ b/core/02-client/keeper/client_test.go
@@ -0,0 +1,603 @@
+package keeper_test
+
+import (
+ "fmt"
+ "time"
+
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+)
+
+func (suite *KeeperTestSuite) TestCreateClient() {
+ cases := []struct {
+ msg string
+ clientState exported.ClientState
+ expPass bool
+ }{
+ {"success", ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), true},
+ {"client type not supported", localhosttypes.NewClientState(testChainID, clienttypes.NewHeight(0, 1)), false},
+ }
+
+ for i, tc := range cases {
+
+ clientID, err := suite.keeper.CreateClient(suite.ctx, tc.clientState, suite.consensusState)
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg)
+ suite.Require().NotNil(clientID, "valid test case %d failed: %s", i, tc.msg)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg)
+ suite.Require().Equal("", clientID, "invalid test case %d passed: %s", i, tc.msg)
+ }
+ }
+}
+
+func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
+ // Must create header creation functions since suite.header gets recreated on each test case
+ createFutureUpdateFn := func(s *KeeperTestSuite) *ibctmtypes.Header {
+ heightPlus3 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()+3)
+ height := suite.header.GetHeight().(clienttypes.Height)
+
+ return suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus3.RevisionHeight), height, suite.header.Header.Time.Add(time.Hour),
+ suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
+ }
+ createPastUpdateFn := func(s *KeeperTestSuite) *ibctmtypes.Header {
+ heightMinus2 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()-2)
+ heightMinus4 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()-4)
+
+ return suite.chainA.CreateTMClientHeader(testChainID, int64(heightMinus2.RevisionHeight), heightMinus4, suite.header.Header.Time,
+ suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
+ }
+ var (
+ updateHeader *ibctmtypes.Header
+ clientState *ibctmtypes.ClientState
+ clientID string
+ err error
+ )
+
+ cases := []struct {
+ name string
+ malleate func() error
+ expPass bool
+ }{
+ {"valid update", func() error {
+ clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ // store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height
+ incrementedClientHeight := testClientHeight.Increment().(types.Height)
+ intermediateConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.now.Add(time.Minute),
+ NextValidatorsHash: suite.valSetHash,
+ }
+ suite.keeper.SetClientConsensusState(suite.ctx, clientID, incrementedClientHeight, intermediateConsState)
+
+ clientState.LatestHeight = incrementedClientHeight
+ suite.keeper.SetClientState(suite.ctx, clientID, clientState)
+
+ updateHeader = createFutureUpdateFn(suite)
+ return err
+ }, true},
+ {"valid past update", func() error {
+ clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+ suite.Require().NoError(err)
+
+ height1 := types.NewHeight(0, 1)
+
+ // store previous consensus state
+ prevConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past,
+ NextValidatorsHash: suite.valSetHash,
+ }
+ suite.keeper.SetClientConsensusState(suite.ctx, clientID, height1, prevConsState)
+
+ height2 := types.NewHeight(0, 2)
+
+ // store intermediate consensus state to check that trustedHeight does not need to be hightest consensus state before header height
+ intermediateConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past.Add(time.Minute),
+ NextValidatorsHash: suite.valSetHash,
+ }
+ suite.keeper.SetClientConsensusState(suite.ctx, clientID, height2, intermediateConsState)
+
+ // updateHeader will fill in consensus state between prevConsState and suite.consState
+ // clientState should not be updated
+ updateHeader = createPastUpdateFn(suite)
+ return nil
+ }, true},
+ {"client state not found", func() error {
+ updateHeader = createFutureUpdateFn(suite)
+
+ return nil
+ }, false},
+ {"consensus state not found", func() error {
+ clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+ updateHeader = createFutureUpdateFn(suite)
+
+ return nil
+ }, false},
+ {"frozen client before update", func() error {
+ clientState = &ibctmtypes.ClientState{FrozenHeight: types.NewHeight(0, 1), LatestHeight: testClientHeight}
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+ updateHeader = createFutureUpdateFn(suite)
+
+ return nil
+ }, false},
+ {"valid past update before client was frozen", func() error {
+ clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientState.FrozenHeight = types.NewHeight(0, testClientHeight.RevisionHeight-1)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+ suite.Require().NoError(err)
+
+ height1 := types.NewHeight(0, 1)
+
+ // store previous consensus state
+ prevConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past,
+ NextValidatorsHash: suite.valSetHash,
+ }
+ suite.keeper.SetClientConsensusState(suite.ctx, clientID, height1, prevConsState)
+
+ // updateHeader will fill in consensus state between prevConsState and suite.consState
+ // clientState should not be updated
+ updateHeader = createPastUpdateFn(suite)
+ return nil
+ }, true},
+ {"invalid header", func() error {
+ clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ _, err := suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+ suite.Require().NoError(err)
+ updateHeader = createPastUpdateFn(suite)
+
+ return nil
+ }, false},
+ }
+
+ for i, tc := range cases {
+ tc := tc
+ i := i
+ suite.Run(fmt.Sprintf("Case %s", tc.name), func() {
+ suite.SetupTest()
+ clientID = testClientID // must be explicitly changed
+
+ err := tc.malleate()
+ suite.Require().NoError(err)
+
+ suite.ctx = suite.ctx.WithBlockTime(updateHeader.Header.Time.Add(time.Minute))
+
+ err = suite.keeper.UpdateClient(suite.ctx, clientID, updateHeader)
+
+ if tc.expPass {
+ suite.Require().NoError(err, err)
+
+ expConsensusState := &ibctmtypes.ConsensusState{
+ Timestamp: updateHeader.GetTime(),
+ Root: commitmenttypes.NewMerkleRoot(updateHeader.Header.GetAppHash()),
+ NextValidatorsHash: updateHeader.Header.NextValidatorsHash,
+ }
+
+ newClientState, found := suite.keeper.GetClientState(suite.ctx, clientID)
+ suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
+
+ consensusState, found := suite.keeper.GetClientConsensusState(suite.ctx, clientID, updateHeader.GetHeight())
+ suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
+
+ // Determine if clientState should be updated or not
+ if updateHeader.GetHeight().GT(clientState.GetLatestHeight()) {
+ // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
+ suite.Require().Equal(updateHeader.GetHeight(), newClientState.GetLatestHeight(), "clientstate height did not update")
+ } else {
+ // Update will add past consensus state, clientState should not be updated at all
+ suite.Require().Equal(clientState.GetLatestHeight(), newClientState.GetLatestHeight(), "client state height updated for past header")
+ }
+
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestUpdateClientLocalhost() {
+ revision := types.ParseChainID(suite.chainA.ChainID)
+ var localhostClient exported.ClientState = localhosttypes.NewClientState(suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())))
+
+ ctx := suite.chainA.GetContext().WithBlockHeight(suite.chainA.GetContext().BlockHeight() + 1)
+ err := suite.chainA.App.IBCKeeper.ClientKeeper.UpdateClient(ctx, exported.Localhost, nil)
+ suite.Require().NoError(err)
+
+ clientState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(ctx, exported.Localhost)
+ suite.Require().True(found)
+ suite.Require().Equal(localhostClient.GetLatestHeight().(types.Height).Increment(), clientState.GetLatestHeight())
+}
+
+func (suite *KeeperTestSuite) TestUpgradeClient() {
+ var (
+ upgradedClient exported.ClientState
+ upgradedConsState exported.ConsensusState
+ lastHeight exported.Height
+ clientA string
+ proofUpgradedClient, proofUpgradedConsState []byte
+ )
+
+ testCases := []struct {
+ name string
+ setup func()
+ expPass bool
+ }{
+ {
+ name: "successful upgrade",
+ setup: func() {
+
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // last Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: true,
+ },
+ {
+ name: "client state not found",
+ setup: func() {
+
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // last Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+
+ clientA = "wrongclientid"
+ },
+ expPass: false,
+ },
+ {
+ name: "client state frozen",
+ setup: func() {
+
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // last Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+
+ // set frozen client in store
+ tmClient, ok := cs.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClient.FrozenHeight = types.NewHeight(0, 1)
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient)
+ },
+ expPass: false,
+ },
+ {
+ name: "tendermint client VerifyUpgrade fails",
+ setup: func() {
+
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // last Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // change upgradedClient client-specified parameters
+ upgradedClient = ibctmtypes.NewClientState("wrongchainID", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, true, true)
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ tc.setup()
+
+ // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
+ upgradedClient = upgradedClient.ZeroCustomFields()
+
+ err := suite.chainA.App.IBCKeeper.ClientKeeper.UpgradeClient(suite.chainA.GetContext(), clientA, upgradedClient, upgradedConsState, proofUpgradedClient, proofUpgradedConsState)
+
+ if tc.expPass {
+ suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name)
+ } else {
+ suite.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name)
+ }
+ }
+
+}
+
+func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
+ var (
+ clientID string
+ err error
+ )
+
+ altPrivVal := ibctestingmock.NewPV()
+ altPubKey, err := altPrivVal.GetPubKey()
+ suite.Require().NoError(err)
+ altVal := tmtypes.NewValidator(altPubKey, 4)
+
+ // Set valSet here with suite.valSet so it doesn't get reset on each testcase
+ valSet := suite.valSet
+ valsHash := valSet.Hash()
+
+ // Create bothValSet with both suite validator and altVal
+ bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
+ bothValsHash := bothValSet.Hash()
+ // Create alternative validator set with only altVal
+ altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal})
+
+ // Create signer array and ensure it is in same order as bothValSet
+ _, suiteVal := suite.valSet.GetByIndex(0)
+ bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
+
+ altSigners := []tmtypes.PrivValidator{altPrivVal}
+
+ // Create valid Misbehaviour by making a duplicate header that signs over different block time
+ altTime := suite.ctx.BlockTime().Add(time.Minute)
+
+ heightPlus3 := types.NewHeight(0, height+3)
+ heightPlus5 := types.NewHeight(0, height+5)
+
+ testCases := []struct {
+ name string
+ misbehaviour *ibctmtypes.Misbehaviour
+ malleate func() error
+ expPass bool
+ }{
+ {
+ "trusting period misbehavior should pass",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = bothValsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ return err
+ },
+ true,
+ },
+ {
+ "misbehavior at later height should pass",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = valsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ // store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height
+ intermediateConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.now.Add(time.Minute),
+ NextValidatorsHash: suite.valSetHash,
+ }
+ suite.keeper.SetClientConsensusState(suite.ctx, clientID, heightPlus3, intermediateConsState)
+
+ clientState.LatestHeight = heightPlus3
+ suite.keeper.SetClientState(suite.ctx, clientID, clientState)
+
+ return err
+ },
+ true,
+ },
+ {
+ "misbehavior at later height with different trusted heights should pass",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = valsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ // store trusted consensus state for Header2
+ intermediateConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.now.Add(time.Minute),
+ NextValidatorsHash: bothValsHash,
+ }
+ suite.keeper.SetClientConsensusState(suite.ctx, clientID, heightPlus3, intermediateConsState)
+
+ clientState.LatestHeight = heightPlus3
+ suite.keeper.SetClientState(suite.ctx, clientID, clientState)
+
+ return err
+ },
+ true,
+ },
+ {
+ "trusted ConsensusState1 not found",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = valsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+ // intermediate consensus state at height + 3 is not created
+ return err
+ },
+ false,
+ },
+ {
+ "trusted ConsensusState2 not found",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = valsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+ // intermediate consensus state at height + 3 is not created
+ return err
+ },
+ false,
+ },
+ {
+ "client state not found",
+ &ibctmtypes.Misbehaviour{},
+ func() error { return nil },
+ false,
+ },
+ {
+ "client already frozen at earlier height",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = bothValsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ clientState.FrozenHeight = types.NewHeight(0, 1)
+ suite.keeper.SetClientState(suite.ctx, clientID, clientState)
+
+ return err
+ },
+ false,
+ },
+ {
+ "misbehaviour check failed",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), altValSet, bothValSet, altSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ if err != nil {
+ return err
+ }
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ return err
+ },
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ i := i
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+ clientID = testClientID // must be explicitly changed
+
+ err := tc.malleate()
+ suite.Require().NoError(err)
+
+ tc.misbehaviour.ClientId = clientID
+
+ err = suite.keeper.CheckMisbehaviourAndUpdateState(suite.ctx, tc.misbehaviour)
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+
+ clientState, found := suite.keeper.GetClientState(suite.ctx, clientID)
+ suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().True(clientState.IsFrozen(), "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(),
+ "valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight())
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ })
+ }
+}
diff --git a/core/02-client/keeper/encoding.go b/core/02-client/keeper/encoding.go
new file mode 100644
index 00000000..f2a07b86
--- /dev/null
+++ b/core/02-client/keeper/encoding.go
@@ -0,0 +1,42 @@
+package keeper
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// UnmarshalClientState attempts to decode and return an ClientState object from
+// raw encoded bytes.
+func (k Keeper) UnmarshalClientState(bz []byte) (exported.ClientState, error) {
+ return types.UnmarshalClientState(k.cdc, bz)
+}
+
+// MustUnmarshalClientState attempts to decode and return an ClientState object from
+// raw encoded bytes. It panics on error.
+func (k Keeper) MustUnmarshalClientState(bz []byte) exported.ClientState {
+ return types.MustUnmarshalClientState(k.cdc, bz)
+}
+
+// UnmarshalConsensusState attempts to decode and return an ConsensusState object from
+// raw encoded bytes.
+func (k Keeper) UnmarshalConsensusState(bz []byte) (exported.ConsensusState, error) {
+ return types.UnmarshalConsensusState(k.cdc, bz)
+}
+
+// MustUnmarshalConsensusState attempts to decode and return an ConsensusState object from
+// raw encoded bytes. It panics on error.
+func (k Keeper) MustUnmarshalConsensusState(bz []byte) exported.ConsensusState {
+ return types.MustUnmarshalConsensusState(k.cdc, bz)
+}
+
+// MustMarshalClientState attempts to encode an ClientState object and returns the
+// raw encoded bytes. It panics on error.
+func (k Keeper) MustMarshalClientState(clientState exported.ClientState) []byte {
+ return types.MustMarshalClientState(k.cdc, clientState)
+}
+
+// MustMarshalConsensusState attempts to encode an ConsensusState object and returns the
+// raw encoded bytes. It panics on error.
+func (k Keeper) MustMarshalConsensusState(consensusState exported.ConsensusState) []byte {
+ return types.MustMarshalConsensusState(k.cdc, consensusState)
+}
diff --git a/core/02-client/keeper/grpc_query.go b/core/02-client/keeper/grpc_query.go
new file mode 100644
index 00000000..21344277
--- /dev/null
+++ b/core/02-client/keeper/grpc_query.go
@@ -0,0 +1,199 @@
+package keeper
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ types.QueryServer = Keeper{}
+
+// ClientState implements the Query/ClientState gRPC method
+func (q Keeper) ClientState(c context.Context, req *types.QueryClientStateRequest) (*types.QueryClientStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ clientState, found := q.GetClientState(ctx, req.ClientId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrap(types.ErrClientNotFound, req.ClientId).Error(),
+ )
+ }
+
+ any, err := types.PackClientState(clientState)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ proofHeight := types.GetSelfHeight(ctx)
+ return &types.QueryClientStateResponse{
+ ClientState: any,
+ ProofHeight: proofHeight,
+ }, nil
+}
+
+// ClientStates implements the Query/ClientStates gRPC method
+func (q Keeper) ClientStates(c context.Context, req *types.QueryClientStatesRequest) (*types.QueryClientStatesResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ clientStates := types.IdentifiedClientStates{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), host.KeyClientStorePrefix)
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
+ keySplit := strings.Split(string(key), "/")
+ if keySplit[len(keySplit)-1] != "clientState" {
+ return nil
+ }
+
+ clientState, err := q.UnmarshalClientState(value)
+ if err != nil {
+ return err
+ }
+
+ clientID := keySplit[1]
+ if err := host.ClientIdentifierValidator(clientID); err != nil {
+ return err
+ }
+
+ identifiedClient := types.NewIdentifiedClientState(clientID, clientState)
+ clientStates = append(clientStates, identifiedClient)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ sort.Sort(clientStates)
+
+ return &types.QueryClientStatesResponse{
+ ClientStates: clientStates,
+ Pagination: pageRes,
+ }, nil
+}
+
+// ConsensusState implements the Query/ConsensusState gRPC method
+func (q Keeper) ConsensusState(c context.Context, req *types.QueryConsensusStateRequest) (*types.QueryConsensusStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ var (
+ consensusState exported.ConsensusState
+ found bool
+ )
+
+ height := types.NewHeight(req.RevisionNumber, req.RevisionHeight)
+ if req.LatestHeight {
+ consensusState, found = q.GetLatestClientConsensusState(ctx, req.ClientId)
+ } else {
+ if req.RevisionHeight == 0 {
+ return nil, status.Error(codes.InvalidArgument, "consensus state height cannot be 0")
+ }
+
+ consensusState, found = q.GetClientConsensusState(ctx, req.ClientId, height)
+ }
+
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(types.ErrConsensusStateNotFound, "client-id: %s, height: %s", req.ClientId, height).Error(),
+ )
+ }
+
+ any, err := types.PackConsensusState(consensusState)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ proofHeight := types.GetSelfHeight(ctx)
+ return &types.QueryConsensusStateResponse{
+ ConsensusState: any,
+ ProofHeight: proofHeight,
+ }, nil
+}
+
+// ConsensusStates implements the Query/ConsensusStates gRPC method
+func (q Keeper) ConsensusStates(c context.Context, req *types.QueryConsensusStatesRequest) (*types.QueryConsensusStatesResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ consensusStates := []types.ConsensusStateWithHeight{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), host.FullClientKey(req.ClientId, []byte(fmt.Sprintf("%s/", host.KeyConsensusStatePrefix))))
+
+ pageRes, err := query.FilteredPaginate(store, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) {
+ // filter any metadata stored under consensus state key
+ if bytes.Contains(key, []byte("/")) {
+ return false, nil
+ }
+
+ height, err := types.ParseHeight(string(key))
+ if err != nil {
+ return false, err
+ }
+
+ consensusState, err := q.UnmarshalConsensusState(value)
+ if err != nil {
+ return false, err
+ }
+
+ consensusStates = append(consensusStates, types.NewConsensusStateWithHeight(height, consensusState))
+ return true, nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.QueryConsensusStatesResponse{
+ ConsensusStates: consensusStates,
+ Pagination: pageRes,
+ }, nil
+}
+
+// ClientParams implements the Query/ClientParams gRPC method
+func (q Keeper) ClientParams(c context.Context, _ *types.QueryClientParamsRequest) (*types.QueryClientParamsResponse, error) {
+ ctx := sdk.UnwrapSDKContext(c)
+ params := q.GetParams(ctx)
+
+ return &types.QueryClientParamsResponse{
+ Params: ¶ms,
+ }, nil
+}
diff --git a/core/02-client/keeper/grpc_query_test.go b/core/02-client/keeper/grpc_query_test.go
new file mode 100644
index 00000000..5e361a76
--- /dev/null
+++ b/core/02-client/keeper/grpc_query_test.go
@@ -0,0 +1,381 @@
+package keeper_test
+
+import (
+ "fmt"
+ "time"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/cosmos/cosmos-sdk/types/query"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *KeeperTestSuite) TestQueryClientState() {
+ var (
+ req *types.QueryClientStateRequest
+ expClientState *codectypes.Any
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"invalid clientID",
+ func() {
+ req = &types.QueryClientStateRequest{}
+ },
+ false,
+ },
+ {"client not found",
+ func() {
+ req = &types.QueryClientStateRequest{
+ ClientId: testClientID,
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+
+ var err error
+ expClientState, err = types.PackClientState(clientState)
+ suite.Require().NoError(err)
+
+ req = &types.QueryClientStateRequest{
+ ClientId: testClientID,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.ctx)
+ res, err := suite.queryClient.ClientState(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expClientState, res.ClientState)
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.ClientState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryClientStates() {
+ var (
+ req *types.QueryClientStatesRequest
+ expClientStates = types.IdentifiedClientStates{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty pagination",
+ func() {
+ req = &types.QueryClientStatesRequest{}
+ },
+ true,
+ },
+ {
+ "success, no results",
+ func() {
+ req = &types.QueryClientStatesRequest{
+ Pagination: &query.PageRequest{
+ Limit: 3,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ clientA1, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ clientA2, _ := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+
+ clientStateA1 := suite.chainA.GetClientState(clientA1)
+ clientStateA2 := suite.chainA.GetClientState(clientA2)
+
+ idcs := types.NewIdentifiedClientState(clientA1, clientStateA1)
+ idcs2 := types.NewIdentifiedClientState(clientA2, clientStateA2)
+
+ // order is sorted by client id, localhost is last
+ expClientStates = types.IdentifiedClientStates{idcs, idcs2}.Sort()
+ req = &types.QueryClientStatesRequest{
+ Pagination: &query.PageRequest{
+ Limit: 7,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+ expClientStates = nil
+
+ tc.malleate()
+
+ // always add localhost which is created by default in init genesis
+ localhostClientState := suite.chainA.GetClientState(exported.Localhost)
+ identifiedLocalhost := types.NewIdentifiedClientState(exported.Localhost, localhostClientState)
+ expClientStates = append(expClientStates, identifiedLocalhost)
+
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ClientStates(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expClientStates.Sort(), res.ClientStates)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryConsensusState() {
+ var (
+ req *types.QueryConsensusStateRequest
+ expConsensusState *codectypes.Any
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "invalid clientID",
+ func() {
+ req = &types.QueryConsensusStateRequest{}
+ },
+ false,
+ },
+ {
+ "invalid height",
+ func() {
+ req = &types.QueryConsensusStateRequest{
+ ClientId: testClientID,
+ RevisionNumber: 0,
+ RevisionHeight: 0,
+ LatestHeight: false,
+ }
+ },
+ false,
+ },
+ {
+ "consensus state not found",
+ func() {
+ req = &types.QueryConsensusStateRequest{
+ ClientId: testClientID,
+ LatestHeight: true,
+ }
+ },
+ false,
+ },
+ {
+ "success latest height",
+ func() {
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ cs := ibctmtypes.NewConsensusState(
+ suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil,
+ )
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+ suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, cs)
+
+ var err error
+ expConsensusState, err = types.PackConsensusState(cs)
+ suite.Require().NoError(err)
+
+ req = &types.QueryConsensusStateRequest{
+ ClientId: testClientID,
+ LatestHeight: true,
+ }
+ },
+ true,
+ },
+ {
+ "success with height",
+ func() {
+ cs := ibctmtypes.NewConsensusState(
+ suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil,
+ )
+ suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, cs)
+
+ var err error
+ expConsensusState, err = types.PackConsensusState(cs)
+ suite.Require().NoError(err)
+
+ req = &types.QueryConsensusStateRequest{
+ ClientId: testClientID,
+ RevisionNumber: 0,
+ RevisionHeight: height,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.ctx)
+ res, err := suite.queryClient.ConsensusState(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expConsensusState, res.ConsensusState)
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.ConsensusState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryConsensusStates() {
+ var (
+ req *types.QueryConsensusStatesRequest
+ expConsensusStates = []types.ConsensusStateWithHeight{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "invalid client identifier",
+ func() {
+ req = &types.QueryConsensusStatesRequest{}
+ },
+ false,
+ },
+ {
+ "empty pagination",
+ func() {
+ req = &types.QueryConsensusStatesRequest{
+ ClientId: testClientID,
+ }
+ },
+ true,
+ },
+ {
+ "success, no results",
+ func() {
+ req = &types.QueryConsensusStatesRequest{
+ ClientId: testClientID,
+ Pagination: &query.PageRequest{
+ Limit: 3,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ cs := ibctmtypes.NewConsensusState(
+ suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil,
+ )
+ cs2 := ibctmtypes.NewConsensusState(
+ suite.consensusState.Timestamp.Add(time.Second), commitmenttypes.NewMerkleRoot([]byte("hash2")), nil,
+ )
+
+ clientState := ibctmtypes.NewClientState(
+ testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
+ )
+
+ // Use CreateClient to ensure that processedTime metadata gets stored.
+ clientId, err := suite.keeper.CreateClient(suite.ctx, clientState, cs)
+ suite.Require().NoError(err)
+ suite.keeper.SetClientConsensusState(suite.ctx, clientId, testClientHeight.Increment(), cs2)
+
+ // order is swapped because the res is sorted by client id
+ expConsensusStates = []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(testClientHeight, cs),
+ types.NewConsensusStateWithHeight(testClientHeight.Increment().(types.Height), cs2),
+ }
+ req = &types.QueryConsensusStatesRequest{
+ ClientId: clientId,
+ Pagination: &query.PageRequest{
+ Limit: 3,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.ctx)
+
+ res, err := suite.queryClient.ConsensusStates(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(len(expConsensusStates), len(res.ConsensusStates))
+ for i := range expConsensusStates {
+ suite.Require().NotNil(res.ConsensusStates[i])
+ suite.Require().Equal(expConsensusStates[i], res.ConsensusStates[i])
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.ConsensusStates[i].ConsensusState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ }
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryParams() {
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+ expParams := types.DefaultParams()
+ res, _ := suite.queryClient.ClientParams(ctx, &types.QueryClientParamsRequest{})
+ suite.Require().Equal(&expParams, res.Params)
+}
diff --git a/core/02-client/keeper/keeper.go b/core/02-client/keeper/keeper.go
new file mode 100644
index 00000000..67c5c065
--- /dev/null
+++ b/core/02-client/keeper/keeper.go
@@ -0,0 +1,367 @@
+package keeper
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/tendermint/tendermint/libs/log"
+ "github.com/tendermint/tendermint/light"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+)
+
+// Keeper represents a type that grants read and write permissions to any client
+// state information
+type Keeper struct {
+ storeKey sdk.StoreKey
+ cdc codec.BinaryMarshaler
+ paramSpace paramtypes.Subspace
+ stakingKeeper types.StakingKeeper
+}
+
+// NewKeeper creates a new NewKeeper instance
+func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper) Keeper {
+ // set KeyTable if it has not already been set
+ if !paramSpace.HasKeyTable() {
+ paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable())
+ }
+
+ return Keeper{
+ storeKey: key,
+ cdc: cdc,
+ paramSpace: paramSpace,
+ stakingKeeper: sk,
+ }
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper) Logger(ctx sdk.Context) log.Logger {
+ return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
+}
+
+// GenerateClientIdentifier returns the next client identifier.
+func (k Keeper) GenerateClientIdentifier(ctx sdk.Context, clientType string) string {
+ nextClientSeq := k.GetNextClientSequence(ctx)
+ clientID := types.FormatClientIdentifier(clientType, nextClientSeq)
+
+ nextClientSeq++
+ k.SetNextClientSequence(ctx, nextClientSeq)
+ return clientID
+}
+
+// GetClientState gets a particular client from the store
+func (k Keeper) GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) {
+ store := k.ClientStore(ctx, clientID)
+ bz := store.Get(host.ClientStateKey())
+ if bz == nil {
+ return nil, false
+ }
+
+ clientState := k.MustUnmarshalClientState(bz)
+ return clientState, true
+}
+
+// SetClientState sets a particular Client to the store
+func (k Keeper) SetClientState(ctx sdk.Context, clientID string, clientState exported.ClientState) {
+ store := k.ClientStore(ctx, clientID)
+ store.Set(host.ClientStateKey(), k.MustMarshalClientState(clientState))
+}
+
+// GetClientConsensusState gets the stored consensus state from a client at a given height.
+func (k Keeper) GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool) {
+ store := k.ClientStore(ctx, clientID)
+ bz := store.Get(host.ConsensusStateKey(height))
+ if bz == nil {
+ return nil, false
+ }
+
+ consensusState := k.MustUnmarshalConsensusState(bz)
+ return consensusState, true
+}
+
+// SetClientConsensusState sets a ConsensusState to a particular client at the given
+// height
+func (k Keeper) SetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height, consensusState exported.ConsensusState) {
+ store := k.ClientStore(ctx, clientID)
+ store.Set(host.ConsensusStateKey(height), k.MustMarshalConsensusState(consensusState))
+}
+
+// GetNextClientSequence gets the next client sequence from the store.
+func (k Keeper) GetNextClientSequence(ctx sdk.Context) uint64 {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get([]byte(types.KeyNextClientSequence))
+ if bz == nil {
+ panic("next client sequence is nil")
+ }
+
+ return sdk.BigEndianToUint64(bz)
+}
+
+// SetNextClientSequence sets the next client sequence to the store.
+func (k Keeper) SetNextClientSequence(ctx sdk.Context, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ bz := sdk.Uint64ToBigEndian(sequence)
+ store.Set([]byte(types.KeyNextClientSequence), bz)
+}
+
+// IterateConsensusStates provides an iterator over all stored consensus states.
+// objects. For each State object, cb will be called. If the cb returns true,
+// the iterator will close and stop.
+func (k Keeper) IterateConsensusStates(ctx sdk.Context, cb func(clientID string, cs types.ConsensusStateWithHeight) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix)
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ // consensus key is in the format "clients//consensusStates/"
+ if len(keySplit) != 4 || keySplit[2] != string(host.KeyConsensusStatePrefix) {
+ continue
+ }
+ clientID := keySplit[1]
+ height := types.MustParseHeight(keySplit[3])
+ consensusState := k.MustUnmarshalConsensusState(iterator.Value())
+
+ consensusStateWithHeight := types.NewConsensusStateWithHeight(height, consensusState)
+
+ if cb(clientID, consensusStateWithHeight) {
+ break
+ }
+ }
+}
+
+// GetAllGenesisClients returns all the clients in state with their client ids returned as IdentifiedClientState
+func (k Keeper) GetAllGenesisClients(ctx sdk.Context) types.IdentifiedClientStates {
+ var genClients types.IdentifiedClientStates
+ k.IterateClients(ctx, func(clientID string, cs exported.ClientState) bool {
+ genClients = append(genClients, types.NewIdentifiedClientState(clientID, cs))
+ return false
+ })
+
+ return genClients.Sort()
+}
+
+// GetAllClientMetadata will take a list of IdentifiedClientState and return a list
+// of IdentifiedGenesisMetadata necessary for exporting and importing client metadata
+// into the client store.
+func (k Keeper) GetAllClientMetadata(ctx sdk.Context, genClients []types.IdentifiedClientState) ([]types.IdentifiedGenesisMetadata, error) {
+ genMetadata := make([]types.IdentifiedGenesisMetadata, 0)
+ for _, ic := range genClients {
+ cs, err := types.UnpackClientState(ic.ClientState)
+ if err != nil {
+ return nil, err
+ }
+ gms := cs.ExportMetadata(k.ClientStore(ctx, ic.ClientId))
+ if len(gms) == 0 {
+ continue
+ }
+ clientMetadata := make([]types.GenesisMetadata, len(gms))
+ for i, metadata := range gms {
+ cmd, ok := metadata.(types.GenesisMetadata)
+ if !ok {
+ return nil, sdkerrors.Wrapf(types.ErrInvalidClientMetadata, "expected metadata type: %T, got: %T",
+ types.GenesisMetadata{}, cmd)
+ }
+ clientMetadata[i] = cmd
+ }
+ genMetadata = append(genMetadata, types.NewIdentifiedGenesisMetadata(
+ ic.ClientId,
+ clientMetadata,
+ ))
+ }
+ return genMetadata, nil
+}
+
+// SetAllClientMetadata takes a list of IdentifiedGenesisMetadata and stores all of the metadata in the client store at the appropriate paths.
+func (k Keeper) SetAllClientMetadata(ctx sdk.Context, genMetadata []types.IdentifiedGenesisMetadata) {
+ for _, igm := range genMetadata {
+ // create client store
+ store := k.ClientStore(ctx, igm.ClientId)
+ // set all metadata kv pairs in client store
+ for _, md := range igm.ClientMetadata {
+ store.Set(md.GetKey(), md.GetValue())
+ }
+ }
+}
+
+// GetAllConsensusStates returns all stored client consensus states.
+func (k Keeper) GetAllConsensusStates(ctx sdk.Context) types.ClientsConsensusStates {
+ clientConsStates := make(types.ClientsConsensusStates, 0)
+ mapClientIDToConsStateIdx := make(map[string]int)
+
+ k.IterateConsensusStates(ctx, func(clientID string, cs types.ConsensusStateWithHeight) bool {
+ idx, ok := mapClientIDToConsStateIdx[clientID]
+ if ok {
+ clientConsStates[idx].ConsensusStates = append(clientConsStates[idx].ConsensusStates, cs)
+ return false
+ }
+
+ clientConsState := types.ClientConsensusStates{
+ ClientId: clientID,
+ ConsensusStates: []types.ConsensusStateWithHeight{cs},
+ }
+
+ clientConsStates = append(clientConsStates, clientConsState)
+ mapClientIDToConsStateIdx[clientID] = len(clientConsStates) - 1
+ return false
+ })
+
+ return clientConsStates.Sort()
+}
+
+// HasClientConsensusState returns if keeper has a ConsensusState for a particular
+// client at the given height
+func (k Keeper) HasClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) bool {
+ store := k.ClientStore(ctx, clientID)
+ return store.Has(host.ConsensusStateKey(height))
+}
+
+// GetLatestClientConsensusState gets the latest ConsensusState stored for a given client
+func (k Keeper) GetLatestClientConsensusState(ctx sdk.Context, clientID string) (exported.ConsensusState, bool) {
+ clientState, ok := k.GetClientState(ctx, clientID)
+ if !ok {
+ return nil, false
+ }
+ return k.GetClientConsensusState(ctx, clientID, clientState.GetLatestHeight())
+}
+
+// GetSelfConsensusState introspects the (self) past historical info at a given height
+// and returns the expected consensus state at that height.
+// For now, can only retrieve self consensus states for the current revision
+func (k Keeper) GetSelfConsensusState(ctx sdk.Context, height exported.Height) (exported.ConsensusState, bool) {
+ selfHeight, ok := height.(types.Height)
+ if !ok {
+ return nil, false
+ }
+ // check that height revision matches chainID revision
+ revision := types.ParseChainID(ctx.ChainID())
+ if revision != height.GetRevisionNumber() {
+ return nil, false
+ }
+ histInfo, found := k.stakingKeeper.GetHistoricalInfo(ctx, int64(selfHeight.RevisionHeight))
+ if !found {
+ return nil, false
+ }
+
+ consensusState := &ibctmtypes.ConsensusState{
+ Timestamp: histInfo.Header.Time,
+ Root: commitmenttypes.NewMerkleRoot(histInfo.Header.GetAppHash()),
+ NextValidatorsHash: histInfo.Header.NextValidatorsHash,
+ }
+ return consensusState, true
+}
+
+// ValidateSelfClient validates the client parameters for a client of the running chain
+// This function is only used to validate the client state the counterparty stores for this chain
+// Client must be in same revision as the executing chain
+func (k Keeper) ValidateSelfClient(ctx sdk.Context, clientState exported.ClientState) error {
+ tmClient, ok := clientState.(*ibctmtypes.ClientState)
+ if !ok {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "client must be a Tendermint client, expected: %T, got: %T",
+ &ibctmtypes.ClientState{}, tmClient)
+ }
+
+ if clientState.IsFrozen() {
+ return types.ErrClientFrozen
+ }
+
+ if ctx.ChainID() != tmClient.ChainId {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "invalid chain-id. expected: %s, got: %s",
+ ctx.ChainID(), tmClient.ChainId)
+ }
+
+ revision := types.ParseChainID(ctx.ChainID())
+
+ // client must be in the same revision as executing chain
+ if tmClient.LatestHeight.RevisionNumber != revision {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "client is not in the same revision as the chain. expected revision: %d, got: %d",
+ tmClient.LatestHeight.RevisionNumber, revision)
+ }
+
+ selfHeight := types.NewHeight(revision, uint64(ctx.BlockHeight()))
+ if tmClient.LatestHeight.GTE(selfHeight) {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "client has LatestHeight %d greater than or equal to chain height %d",
+ tmClient.LatestHeight, selfHeight)
+ }
+
+ expectedProofSpecs := commitmenttypes.GetSDKSpecs()
+ if !reflect.DeepEqual(expectedProofSpecs, tmClient.ProofSpecs) {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "client has invalid proof specs. expected: %v got: %v",
+ expectedProofSpecs, tmClient.ProofSpecs)
+ }
+
+ if err := light.ValidateTrustLevel(tmClient.TrustLevel.ToTendermint()); err != nil {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "trust-level invalid: %v", err)
+ }
+
+ expectedUbdPeriod := k.stakingKeeper.UnbondingTime(ctx)
+ if expectedUbdPeriod != tmClient.UnbondingPeriod {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "invalid unbonding period. expected: %s, got: %s",
+ expectedUbdPeriod, tmClient.UnbondingPeriod)
+ }
+
+ if tmClient.UnbondingPeriod < tmClient.TrustingPeriod {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "unbonding period must be greater than trusting period. unbonding period (%d) < trusting period (%d)",
+ tmClient.UnbondingPeriod, tmClient.TrustingPeriod)
+ }
+
+ if len(tmClient.UpgradePath) != 0 {
+ // For now, SDK IBC implementation assumes that upgrade path (if defined) is defined by SDK upgrade module
+ expectedUpgradePath := []string{upgradetypes.StoreKey, upgradetypes.KeyUpgradedIBCState}
+ if !reflect.DeepEqual(expectedUpgradePath, tmClient.UpgradePath) {
+ return sdkerrors.Wrapf(types.ErrInvalidClient, "upgrade path must be the upgrade path defined by upgrade module. expected %v, got %v",
+ expectedUpgradePath, tmClient.UpgradePath)
+ }
+ }
+ return nil
+}
+
+// IterateClients provides an iterator over all stored light client State
+// objects. For each State object, cb will be called. If the cb returns true,
+// the iterator will close and stop.
+func (k Keeper) IterateClients(ctx sdk.Context, cb func(clientID string, cs exported.ClientState) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix)
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ if keySplit[len(keySplit)-1] != host.KeyClientState {
+ continue
+ }
+ clientState := k.MustUnmarshalClientState(iterator.Value())
+
+ // key is ibc/{clientid}/clientState
+ // Thus, keySplit[1] is clientID
+ if cb(keySplit[1], clientState) {
+ break
+ }
+ }
+}
+
+// GetAllClients returns all stored light client State objects.
+func (k Keeper) GetAllClients(ctx sdk.Context) (states []exported.ClientState) {
+ k.IterateClients(ctx, func(_ string, state exported.ClientState) bool {
+ states = append(states, state)
+ return false
+ })
+ return states
+}
+
+// ClientStore returns isolated prefix store for each client so they can read/write in separate
+// namespace without being able to read/write other client's data
+func (k Keeper) ClientStore(ctx sdk.Context, clientID string) sdk.KVStore {
+ clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID))
+ return prefix.NewStore(ctx.KVStore(k.storeKey), clientPrefix)
+}
diff --git a/core/02-client/keeper/keeper_test.go b/core/02-client/keeper/keeper_test.go
new file mode 100644
index 00000000..c22e80cc
--- /dev/null
+++ b/core/02-client/keeper/keeper_test.go
@@ -0,0 +1,389 @@
+package keeper_test
+
+import (
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+ tmbytes "github.com/tendermint/tendermint/libs/bytes"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/codec"
+ cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
+ "github.com/cosmos/cosmos-sdk/simapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+const (
+ testChainID = "gaiahub-0"
+ testChainIDRevision1 = "gaiahub-1"
+
+ testClientID = "tendermint-0"
+ testClientID2 = "tendermint-1"
+ testClientID3 = "tendermint-2"
+
+ height = 5
+
+ trustingPeriod time.Duration = time.Hour * 24 * 7 * 2
+ ubdPeriod time.Duration = time.Hour * 24 * 7 * 3
+ maxClockDrift time.Duration = time.Second * 10
+)
+
+var (
+ testClientHeight = types.NewHeight(0, 5)
+ testClientHeightRevision1 = types.NewHeight(1, 5)
+ newClientHeight = types.NewHeight(1, 1)
+)
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+
+ cdc codec.Marshaler
+ ctx sdk.Context
+ keeper *keeper.Keeper
+ consensusState *ibctmtypes.ConsensusState
+ header *ibctmtypes.Header
+ valSet *tmtypes.ValidatorSet
+ valSetHash tmbytes.HexBytes
+ privVal tmtypes.PrivValidator
+ now time.Time
+ past time.Time
+
+ queryClient types.QueryClient
+}
+
+func (suite *KeeperTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+
+ isCheckTx := false
+ suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC)
+ suite.past = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
+ now2 := suite.now.Add(time.Hour)
+ app := simapp.Setup(isCheckTx)
+
+ suite.cdc = app.AppCodec()
+ suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{Height: height, ChainID: testClientID, Time: now2})
+ suite.keeper = &app.IBCKeeper.ClientKeeper
+ suite.privVal = ibctestingmock.NewPV()
+
+ pubKey, err := suite.privVal.GetPubKey()
+ suite.Require().NoError(err)
+
+ testClientHeightMinus1 := types.NewHeight(0, height-1)
+
+ validator := tmtypes.NewValidator(pubKey, 1)
+ suite.valSet = tmtypes.NewValidatorSet([]*tmtypes.Validator{validator})
+ suite.valSetHash = suite.valSet.Hash()
+ suite.header = suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeightMinus1, now2, suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
+ suite.consensusState = ibctmtypes.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("hash")), suite.valSetHash)
+
+ var validators stakingtypes.Validators
+ for i := 1; i < 11; i++ {
+ privVal := ibctestingmock.NewPV()
+ tmPk, err := privVal.GetPubKey()
+ suite.Require().NoError(err)
+ pk, err := cryptocodec.FromTmPubKeyInterface(tmPk)
+ suite.Require().NoError(err)
+ val, err := stakingtypes.NewValidator(sdk.ValAddress(pk.Address()), pk, stakingtypes.Description{})
+ suite.Require().NoError(err)
+
+ val.Status = stakingtypes.Bonded
+ val.Tokens = sdk.NewInt(rand.Int63())
+ validators = append(validators, val)
+
+ hi := stakingtypes.NewHistoricalInfo(suite.ctx.BlockHeader(), validators)
+ app.StakingKeeper.SetHistoricalInfo(suite.ctx, int64(i), &hi)
+ }
+
+ // add localhost client
+ revision := types.ParseChainID(suite.chainA.ChainID)
+ localHostClient := localhosttypes.NewClientState(
+ suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())),
+ )
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
+
+ queryHelper := baseapp.NewQueryServerTestHelper(suite.ctx, app.InterfaceRegistry())
+ types.RegisterQueryServer(queryHelper, app.IBCKeeper.ClientKeeper)
+ suite.queryClient = types.NewQueryClient(queryHelper)
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+
+func (suite *KeeperTestSuite) TestSetClientState() {
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+
+ retrievedState, found := suite.keeper.GetClientState(suite.ctx, testClientID)
+ suite.Require().True(found, "GetClientState failed")
+ suite.Require().Equal(clientState, retrievedState, "Client states are not equal")
+}
+
+func (suite *KeeperTestSuite) TestSetClientConsensusState() {
+ suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState)
+
+ retrievedConsState, found := suite.keeper.GetClientConsensusState(suite.ctx, testClientID, testClientHeight)
+ suite.Require().True(found, "GetConsensusState failed")
+
+ tmConsState, ok := retrievedConsState.(*ibctmtypes.ConsensusState)
+ suite.Require().True(ok)
+ suite.Require().Equal(suite.consensusState, tmConsState, "ConsensusState not stored correctly")
+}
+
+func (suite *KeeperTestSuite) TestValidateSelfClient() {
+ testClientHeight := types.NewHeight(0, uint64(suite.chainA.GetContext().BlockHeight()-1))
+
+ testCases := []struct {
+ name string
+ clientState exported.ClientState
+ expPass bool
+ }{
+ {
+ "success",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ true,
+ },
+ {
+ "success with nil UpgradePath",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), nil, false, false),
+ true,
+ },
+ {
+ "invalid client type",
+ localhosttypes.NewClientState(suite.chainA.ChainID, testClientHeight),
+ false,
+ },
+ {
+ "frozen client",
+ &ibctmtypes.ClientState{suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false},
+ false,
+ },
+ {
+ "incorrect chainID",
+ ibctmtypes.NewClientState("gaiatestnet", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid client height",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.NewHeight(0, uint64(suite.chainA.GetContext().BlockHeight())), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid client revision",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeightRevision1, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid proof specs",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, nil, ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid trust level",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.Fraction{0, 1}, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid unbonding period",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+10, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid trusting period",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ubdPeriod+10, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ false,
+ },
+ {
+ "invalid upgrade path",
+ ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), []string{"bad", "upgrade", "path"}, false, false),
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ err := suite.chainA.App.IBCKeeper.ClientKeeper.ValidateSelfClient(suite.chainA.GetContext(), tc.clientState)
+ if tc.expPass {
+ suite.Require().NoError(err, "expected valid client for case: %s", tc.name)
+ } else {
+ suite.Require().Error(err, "expected invalid client for case: %s", tc.name)
+ }
+ }
+}
+
+func (suite KeeperTestSuite) TestGetAllGenesisClients() {
+ clientIDs := []string{
+ testClientID2, testClientID3, testClientID,
+ }
+ expClients := []exported.ClientState{
+ ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ }
+
+ expGenClients := make(types.IdentifiedClientStates, len(expClients))
+
+ for i := range expClients {
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientIDs[i], expClients[i])
+ expGenClients[i] = types.NewIdentifiedClientState(clientIDs[i], expClients[i])
+ }
+
+ // add localhost client
+ localHostClient, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), exported.Localhost)
+ suite.Require().True(found)
+ expGenClients = append(expGenClients, types.NewIdentifiedClientState(exported.Localhost, localHostClient))
+
+ genClients := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext())
+
+ suite.Require().Equal(expGenClients.Sort(), genClients)
+}
+
+func (suite KeeperTestSuite) TestGetAllGenesisMetadata() {
+ expectedGenMetadata := []types.IdentifiedGenesisMetadata{
+ types.NewIdentifiedGenesisMetadata(
+ "clientA",
+ []types.GenesisMetadata{
+ types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 1)), []byte("foo")),
+ types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 2)), []byte("bar")),
+ types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 3)), []byte("baz")),
+ },
+ ),
+ types.NewIdentifiedGenesisMetadata(
+ "clientB",
+ []types.GenesisMetadata{
+ types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(1, 100)), []byte("val1")),
+ types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(2, 300)), []byte("val2")),
+ },
+ ),
+ }
+
+ genClients := []types.IdentifiedClientState{
+ types.NewIdentifiedClientState("clientA", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientB", &ibctmtypes.ClientState{}),
+ types.NewIdentifiedClientState("clientC", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientD", &localhosttypes.ClientState{}),
+ }
+
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetAllClientMetadata(suite.chainA.GetContext(), expectedGenMetadata)
+
+ actualGenMetadata, err := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients)
+ suite.Require().NoError(err, "get client metadata returned error unexpectedly")
+ suite.Require().Equal(expectedGenMetadata, actualGenMetadata, "retrieved metadata is unexpected")
+}
+
+func (suite KeeperTestSuite) TestGetConsensusState() {
+ suite.ctx = suite.ctx.WithBlockHeight(10)
+ cases := []struct {
+ name string
+ height types.Height
+ expPass bool
+ }{
+ {"zero height", types.ZeroHeight(), false},
+ {"height > latest height", types.NewHeight(0, uint64(suite.ctx.BlockHeight())+1), false},
+ {"latest height - 1", types.NewHeight(0, uint64(suite.ctx.BlockHeight())-1), true},
+ {"latest height", types.GetSelfHeight(suite.ctx), true},
+ }
+
+ for i, tc := range cases {
+ tc := tc
+ cs, found := suite.keeper.GetSelfConsensusState(suite.ctx, tc.height)
+ if tc.expPass {
+ suite.Require().True(found, "Case %d should have passed: %s", i, tc.name)
+ suite.Require().NotNil(cs, "Case %d should have passed: %s", i, tc.name)
+ } else {
+ suite.Require().False(found, "Case %d should have failed: %s", i, tc.name)
+ suite.Require().Nil(cs, "Case %d should have failed: %s", i, tc.name)
+ }
+ }
+}
+
+func (suite KeeperTestSuite) TestConsensusStateHelpers() {
+ // initial setup
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+ suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState)
+
+ nextState := ibctmtypes.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("next")), suite.valSetHash)
+
+ testClientHeightPlus5 := types.NewHeight(0, height+5)
+
+ header := suite.chainA.CreateTMClientHeader(testClientID, int64(testClientHeightPlus5.RevisionHeight), testClientHeight, suite.header.Header.Time.Add(time.Minute),
+ suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
+
+ // mock update functionality
+ clientState.LatestHeight = header.GetHeight().(types.Height)
+ suite.keeper.SetClientConsensusState(suite.ctx, testClientID, header.GetHeight(), nextState)
+ suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+
+ latest, ok := suite.keeper.GetLatestClientConsensusState(suite.ctx, testClientID)
+ suite.Require().True(ok)
+ suite.Require().Equal(nextState, latest, "Latest client not returned correctly")
+}
+
+// 2 clients in total are created on chainA. The first client is updated so it contains an initial consensus state
+// and a consensus state at the update height.
+func (suite KeeperTestSuite) TestGetAllConsensusStates() {
+ clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ clientState := suite.chainA.GetClientState(clientA)
+ expConsensusHeight0 := clientState.GetLatestHeight()
+ consensusState0, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight0)
+ suite.Require().True(ok)
+
+ // update client to create a second consensus state
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ clientState = suite.chainA.GetClientState(clientA)
+ expConsensusHeight1 := clientState.GetLatestHeight()
+ suite.Require().True(expConsensusHeight1.GT(expConsensusHeight0))
+ consensusState1, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight1)
+ suite.Require().True(ok)
+
+ expConsensus := []exported.ConsensusState{
+ consensusState0,
+ consensusState1,
+ }
+
+ // create second client on chainA
+ clientA2, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ clientState = suite.chainA.GetClientState(clientA2)
+
+ expConsensusHeight2 := clientState.GetLatestHeight()
+ consensusState2, ok := suite.chainA.GetConsensusState(clientA2, expConsensusHeight2)
+ suite.Require().True(ok)
+
+ expConsensus2 := []exported.ConsensusState{consensusState2}
+
+ expConsensusStates := types.ClientsConsensusStates{
+ types.NewClientConsensusStates(clientA, []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(expConsensusHeight0.(types.Height), expConsensus[0]),
+ types.NewConsensusStateWithHeight(expConsensusHeight1.(types.Height), expConsensus[1]),
+ }),
+ types.NewClientConsensusStates(clientA2, []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(expConsensusHeight2.(types.Height), expConsensus2[0]),
+ }),
+ }.Sort()
+
+ consStates := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllConsensusStates(suite.chainA.GetContext())
+ suite.Require().Equal(expConsensusStates, consStates, "%s \n\n%s", expConsensusStates, consStates)
+}
diff --git a/core/02-client/keeper/params.go b/core/02-client/keeper/params.go
new file mode 100644
index 00000000..04f4a256
--- /dev/null
+++ b/core/02-client/keeper/params.go
@@ -0,0 +1,23 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// GetAllowedClients retrieves the receive enabled boolean from the paramstore
+func (k Keeper) GetAllowedClients(ctx sdk.Context) []string {
+ var res []string
+ k.paramSpace.Get(ctx, types.KeyAllowedClients, &res)
+ return res
+}
+
+// GetParams returns the total set of ibc-transfer parameters.
+func (k Keeper) GetParams(ctx sdk.Context) types.Params {
+ return types.NewParams(k.GetAllowedClients(ctx)...)
+}
+
+// SetParams sets the total set of ibc-transfer parameters.
+func (k Keeper) SetParams(ctx sdk.Context, params types.Params) {
+ k.paramSpace.SetParamSet(ctx, ¶ms)
+}
diff --git a/core/02-client/keeper/params_test.go b/core/02-client/keeper/params_test.go
new file mode 100644
index 00000000..9df08597
--- /dev/null
+++ b/core/02-client/keeper/params_test.go
@@ -0,0 +1,17 @@
+package keeper_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+func (suite *KeeperTestSuite) TestParams() {
+ expParams := types.DefaultParams()
+
+ params := suite.chainA.App.IBCKeeper.ClientKeeper.GetParams(suite.chainA.GetContext())
+ suite.Require().Equal(expParams, params)
+
+ expParams.AllowedClients = []string{}
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetParams(suite.chainA.GetContext(), expParams)
+ params = suite.chainA.App.IBCKeeper.ClientKeeper.GetParams(suite.chainA.GetContext())
+ suite.Require().Empty(expParams.AllowedClients)
+}
diff --git a/core/02-client/keeper/proposal.go b/core/02-client/keeper/proposal.go
new file mode 100644
index 00000000..6d4ff350
--- /dev/null
+++ b/core/02-client/keeper/proposal.go
@@ -0,0 +1,72 @@
+package keeper
+
+import (
+ "github.com/armon/go-metrics"
+
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// ClientUpdateProposal will retrieve the subject and substitute client.
+// The initial height must be greater than the latest height of the subject
+// client. A callback will occur to the subject client state with the client
+// prefixed store being provided for both the subject and the substitute client.
+// The localhost client is not allowed to be modified with a proposal. The IBC
+// client implementations are responsible for validating the parameters of the
+// subtitute (enusring they match the subject's parameters) as well as copying
+// the necessary consensus states from the subtitute to the subject client
+// store.
+func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdateProposal) error {
+ if p.SubjectClientId == exported.Localhost || p.SubstituteClientId == exported.Localhost {
+ return sdkerrors.Wrap(types.ErrInvalidUpdateClientProposal, "cannot update localhost client with proposal")
+ }
+
+ subjectClientState, found := k.GetClientState(ctx, p.SubjectClientId)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrClientNotFound, "subject client with ID %s", p.SubjectClientId)
+ }
+
+ if subjectClientState.GetLatestHeight().GTE(p.InitialHeight) {
+ return sdkerrors.Wrapf(types.ErrInvalidHeight, "subject client state latest height is greater or equal to initial height (%s >= %s)", subjectClientState.GetLatestHeight(), p.InitialHeight)
+ }
+
+ substituteClientState, found := k.GetClientState(ctx, p.SubstituteClientId)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrClientNotFound, "substitute client with ID %s", p.SubstituteClientId)
+ }
+
+ clientState, err := subjectClientState.CheckSubstituteAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, p.SubjectClientId), k.ClientStore(ctx, p.SubstituteClientId), substituteClientState, p.InitialHeight)
+ if err != nil {
+ return err
+ }
+ k.SetClientState(ctx, p.SubjectClientId, clientState)
+
+ k.Logger(ctx).Info("client updated after governance proposal passed", "client-id", p.SubjectClientId, "height", clientState.GetLatestHeight().String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "update"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("client-type", clientState.ClientType()),
+ telemetry.NewLabel("client-id", p.SubjectClientId),
+ telemetry.NewLabel("update-type", "proposal"),
+ },
+ )
+ }()
+
+ // emitting events in the keeper for proposal updates to clients
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ types.EventTypeUpdateClientProposal,
+ sdk.NewAttribute(types.AttributeKeySubjectClientID, p.SubjectClientId),
+ sdk.NewAttribute(types.AttributeKeyClientType, clientState.ClientType()),
+ sdk.NewAttribute(types.AttributeKeyConsensusHeight, clientState.GetLatestHeight().String()),
+ ),
+ )
+
+ return nil
+}
diff --git a/core/02-client/keeper/proposal_test.go b/core/02-client/keeper/proposal_test.go
new file mode 100644
index 00000000..8dbe43f7
--- /dev/null
+++ b/core/02-client/keeper/proposal_test.go
@@ -0,0 +1,130 @@
+package keeper_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *KeeperTestSuite) TestClientUpdateProposal() {
+ var (
+ subject, substitute string
+ subjectClientState, substituteClientState exported.ClientState
+ initialHeight clienttypes.Height
+ content *types.ClientUpdateProposal
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "valid update client proposal", func() {
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ }, true,
+ },
+ {
+ "subject and substitute use different revision numbers", func() {
+ tmClientState, ok := substituteClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ consState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight)
+ suite.Require().True(found)
+ newRevisionNumber := tmClientState.GetLatestHeight().GetRevisionNumber() + 1
+
+ tmClientState.LatestHeight = clienttypes.NewHeight(newRevisionNumber, tmClientState.GetLatestHeight().GetRevisionHeight())
+ initialHeight = clienttypes.NewHeight(newRevisionNumber, initialHeight.GetRevisionHeight())
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState)
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ }, true,
+ },
+ {
+ "cannot use localhost as subject", func() {
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, exported.Localhost, substitute, initialHeight)
+ }, false,
+ },
+ {
+ "cannot use localhost as substitute", func() {
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, exported.Localhost, initialHeight)
+ }, false,
+ },
+ {
+ "subject client does not exist", func() {
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight)
+ }, false,
+ },
+ {
+ "substitute client does not exist", func() {
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight)
+ }, false,
+ },
+ {
+ "subject and substitute have equal latest height", func() {
+ tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.LatestHeight = substituteClientState.GetLatestHeight().(clienttypes.Height)
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ }, false,
+ },
+ {
+ "update fails, client is not frozen or expired", func() {
+ tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.FrozenHeight = clienttypes.ZeroHeight()
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectClientState = suite.chainA.GetClientState(subject)
+ substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ initialHeight = clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
+
+ // update substitute twice
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ substituteClientState = suite.chainA.GetClientState(substitute)
+
+ tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.AllowUpdateAfterMisbehaviour = true
+ tmClientState.AllowUpdateAfterExpiry = true
+ tmClientState.FrozenHeight = tmClientState.LatestHeight
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+
+ tmClientState, ok = substituteClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.AllowUpdateAfterMisbehaviour = true
+ tmClientState.AllowUpdateAfterExpiry = true
+ tmClientState.FrozenHeight = tmClientState.LatestHeight
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+
+ tc.malleate()
+
+ err = suite.chainA.App.IBCKeeper.ClientKeeper.ClientUpdateProposal(suite.chainA.GetContext(), content)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+
+}
diff --git a/core/02-client/module.go b/core/02-client/module.go
new file mode 100644
index 00000000..08efee8b
--- /dev/null
+++ b/core/02-client/module.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "github.com/gogo/protobuf/grpc"
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// Name returns the IBC client name
+func Name() string {
+ return types.SubModuleName
+}
+
+// GetQueryCmd returns no root query command for the IBC client
+func GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd()
+}
+
+// GetTxCmd returns the root tx command for 02-client.
+func GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd()
+}
+
+// RegisterQueryService registers the gRPC query service for IBC client.
+func RegisterQueryService(server grpc.Server, queryServer types.QueryServer) {
+ types.RegisterQueryServer(server, queryServer)
+}
diff --git a/core/02-client/proposal_handler.go b/core/02-client/proposal_handler.go
new file mode 100644
index 00000000..befa95df
--- /dev/null
+++ b/core/02-client/proposal_handler.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// NewClientUpdateProposalHandler defines the client update proposal handler
+func NewClientUpdateProposalHandler(k keeper.Keeper) govtypes.Handler {
+ return func(ctx sdk.Context, content govtypes.Content) error {
+ switch c := content.(type) {
+ case *types.ClientUpdateProposal:
+ return k.ClientUpdateProposal(ctx, c)
+
+ default:
+ return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ibc proposal content type: %T", c)
+ }
+ }
+}
diff --git a/core/02-client/proposal_handler_test.go b/core/02-client/proposal_handler_test.go
new file mode 100644
index 00000000..41b89318
--- /dev/null
+++ b/core/02-client/proposal_handler_test.go
@@ -0,0 +1,84 @@
+package client_test
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
+ var (
+ content govtypes.Content
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "valid update client proposal", func() {
+ subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectClientState := suite.chainA.GetClientState(subject)
+ substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ initialHeight := clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
+
+ // update substitute twice
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ substituteClientState := suite.chainA.GetClientState(substitute)
+
+ tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.AllowUpdateAfterMisbehaviour = true
+ tmClientState.FrozenHeight = tmClientState.LatestHeight
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+
+ // replicate changes to substitute (they must match)
+ tmClientState, ok = substituteClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.AllowUpdateAfterMisbehaviour = true
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ }, true,
+ },
+ {
+ "nil proposal", func() {
+ content = nil
+ }, false,
+ },
+ {
+ "unsupported proposal type", func() {
+ content = distributiontypes.NewCommunityPoolSpendProposal(ibctesting.Title, ibctesting.Description, suite.chainA.SenderAccount.GetAddress(), sdk.NewCoins(sdk.NewCoin("communityfunds", sdk.NewInt(10))))
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ proposalHandler := client.NewClientUpdateProposalHandler(suite.chainA.App.IBCKeeper.ClientKeeper)
+
+ err = proposalHandler(suite.chainA.GetContext(), content)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+
+}
diff --git a/core/02-client/simulation/decoder.go b/core/02-client/simulation/decoder.go
new file mode 100644
index 00000000..03a803b1
--- /dev/null
+++ b/core/02-client/simulation/decoder.go
@@ -0,0 +1,38 @@
+package simulation
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ ClientUnmarshaler = (*keeper.Keeper)(nil)
+
+// ClientUnmarshaler defines an interface for unmarshaling ICS02 interfaces.
+type ClientUnmarshaler interface {
+ MustUnmarshalClientState([]byte) exported.ClientState
+ MustUnmarshalConsensusState([]byte) exported.ConsensusState
+}
+
+// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
+// Value to the corresponding client type.
+func NewDecodeStore(cdc ClientUnmarshaler, kvA, kvB kv.Pair) (string, bool) {
+ switch {
+ case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.HasSuffix(kvA.Key, []byte(host.KeyClientState)):
+ clientStateA := cdc.MustUnmarshalClientState(kvA.Value)
+ clientStateB := cdc.MustUnmarshalClientState(kvB.Value)
+ return fmt.Sprintf("ClientState A: %v\nClientState B: %v", clientStateA, clientStateB), true
+
+ case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.Contains(kvA.Key, []byte(host.KeyConsensusStatePrefix)):
+ consensusStateA := cdc.MustUnmarshalConsensusState(kvA.Value)
+ consensusStateB := cdc.MustUnmarshalConsensusState(kvB.Value)
+ return fmt.Sprintf("ConsensusState A: %v\nConsensusState B: %v", consensusStateA, consensusStateB), true
+
+ default:
+ return "", false
+ }
+}
diff --git a/core/02-client/simulation/decoder_test.go b/core/02-client/simulation/decoder_test.go
new file mode 100644
index 00000000..095834ba
--- /dev/null
+++ b/core/02-client/simulation/decoder_test.go
@@ -0,0 +1,70 @@
+package simulation_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+func TestDecodeStore(t *testing.T) {
+ app := simapp.Setup(false)
+ clientID := "clientidone"
+
+ height := types.NewHeight(0, 10)
+
+ clientState := &ibctmtypes.ClientState{
+ FrozenHeight: height,
+ }
+
+ consState := &ibctmtypes.ConsensusState{
+ Timestamp: time.Now().UTC(),
+ }
+
+ kvPairs := kv.Pairs{
+ Pairs: []kv.Pair{
+ {
+ Key: host.FullClientStateKey(clientID),
+ Value: app.IBCKeeper.ClientKeeper.MustMarshalClientState(clientState),
+ },
+ {
+ Key: host.FullConsensusStateKey(clientID, height),
+ Value: app.IBCKeeper.ClientKeeper.MustMarshalConsensusState(consState),
+ },
+ {
+ Key: []byte{0x99},
+ Value: []byte{0x99},
+ },
+ },
+ }
+ tests := []struct {
+ name string
+ expectedLog string
+ }{
+ {"ClientState", fmt.Sprintf("ClientState A: %v\nClientState B: %v", clientState, clientState)},
+ {"ConsensusState", fmt.Sprintf("ConsensusState A: %v\nConsensusState B: %v", consState, consState)},
+ {"other", ""},
+ }
+
+ for i, tt := range tests {
+ i, tt := i, tt
+ t.Run(tt.name, func(t *testing.T) {
+ res, found := simulation.NewDecodeStore(app.IBCKeeper.ClientKeeper, kvPairs.Pairs[i], kvPairs.Pairs[i])
+ if i == len(tests)-1 {
+ require.False(t, found, string(kvPairs.Pairs[i].Key))
+ require.Empty(t, res, string(kvPairs.Pairs[i].Key))
+ } else {
+ require.True(t, found, string(kvPairs.Pairs[i].Key))
+ require.Equal(t, tt.expectedLog, res, string(kvPairs.Pairs[i].Key))
+ }
+ })
+ }
+}
diff --git a/core/02-client/simulation/genesis.go b/core/02-client/simulation/genesis.go
new file mode 100644
index 00000000..2f231970
--- /dev/null
+++ b/core/02-client/simulation/genesis.go
@@ -0,0 +1,13 @@
+package simulation
+
+import (
+ "math/rand"
+
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// GenClientGenesis returns the default client genesis state.
+func GenClientGenesis(_ *rand.Rand, _ []simtypes.Account) types.GenesisState {
+ return types.DefaultGenesisState()
+}
diff --git a/core/02-client/types/client.go b/core/02-client/types/client.go
new file mode 100644
index 00000000..6d51828a
--- /dev/null
+++ b/core/02-client/types/client.go
@@ -0,0 +1,111 @@
+package types
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ codectypes.UnpackInterfacesMessage = IdentifiedClientState{}
+ _ codectypes.UnpackInterfacesMessage = ConsensusStateWithHeight{}
+)
+
+// NewIdentifiedClientState creates a new IdentifiedClientState instance
+func NewIdentifiedClientState(clientID string, clientState exported.ClientState) IdentifiedClientState {
+ msg, ok := clientState.(proto.Message)
+ if !ok {
+ panic(fmt.Errorf("cannot proto marshal %T", clientState))
+ }
+
+ anyClientState, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ panic(err)
+ }
+
+ return IdentifiedClientState{
+ ClientId: clientID,
+ ClientState: anyClientState,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (ics IdentifiedClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(ics.ClientState, new(exported.ClientState))
+}
+
+var _ sort.Interface = IdentifiedClientStates{}
+
+// IdentifiedClientStates defines a slice of ClientConsensusStates that supports the sort interface
+type IdentifiedClientStates []IdentifiedClientState
+
+// Len implements sort.Interface
+func (ics IdentifiedClientStates) Len() int { return len(ics) }
+
+// Less implements sort.Interface
+func (ics IdentifiedClientStates) Less(i, j int) bool { return ics[i].ClientId < ics[j].ClientId }
+
+// Swap implements sort.Interface
+func (ics IdentifiedClientStates) Swap(i, j int) { ics[i], ics[j] = ics[j], ics[i] }
+
+// Sort is a helper function to sort the set of IdentifiedClientStates in place
+func (ics IdentifiedClientStates) Sort() IdentifiedClientStates {
+ sort.Sort(ics)
+ return ics
+}
+
+// NewConsensusStateWithHeight creates a new ConsensusStateWithHeight instance
+func NewConsensusStateWithHeight(height Height, consensusState exported.ConsensusState) ConsensusStateWithHeight {
+ msg, ok := consensusState.(proto.Message)
+ if !ok {
+ panic(fmt.Errorf("cannot proto marshal %T", consensusState))
+ }
+
+ anyConsensusState, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ panic(err)
+ }
+
+ return ConsensusStateWithHeight{
+ Height: height,
+ ConsensusState: anyConsensusState,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (cswh ConsensusStateWithHeight) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(cswh.ConsensusState, new(exported.ConsensusState))
+}
+
+// ValidateClientType validates the client type. It cannot be blank or empty. It must be a valid
+// client identifier when used with '0' or the maximum uint64 as the sequence.
+func ValidateClientType(clientType string) error {
+ if strings.TrimSpace(clientType) == "" {
+ return sdkerrors.Wrap(ErrInvalidClientType, "client type cannot be blank")
+ }
+
+ smallestPossibleClientID := FormatClientIdentifier(clientType, 0)
+ largestPossibleClientID := FormatClientIdentifier(clientType, uint64(math.MaxUint64))
+
+ // IsValidClientID will check client type format and if the sequence is a uint64
+ if !IsValidClientID(smallestPossibleClientID) {
+ return sdkerrors.Wrap(ErrInvalidClientType, "")
+ }
+
+ if err := host.ClientIdentifierValidator(smallestPossibleClientID); err != nil {
+ return sdkerrors.Wrap(err, "client type results in smallest client identifier being invalid")
+ }
+ if err := host.ClientIdentifierValidator(largestPossibleClientID); err != nil {
+ return sdkerrors.Wrap(err, "client type results in largest client identifier being invalid")
+ }
+
+ return nil
+}
diff --git a/core/02-client/types/client.pb.go b/core/02-client/types/client.pb.go
new file mode 100644
index 00000000..b63fce16
--- /dev/null
+++ b/core/02-client/types/client.pb.go
@@ -0,0 +1,1598 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/client/v1/client.proto
+
+package types
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/codec/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// IdentifiedClientState defines a client state with an additional client
+// identifier field.
+type IdentifiedClientState struct {
+ // client identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // client state
+ ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+}
+
+func (m *IdentifiedClientState) Reset() { *m = IdentifiedClientState{} }
+func (m *IdentifiedClientState) String() string { return proto.CompactTextString(m) }
+func (*IdentifiedClientState) ProtoMessage() {}
+func (*IdentifiedClientState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{0}
+}
+func (m *IdentifiedClientState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IdentifiedClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_IdentifiedClientState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *IdentifiedClientState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IdentifiedClientState.Merge(m, src)
+}
+func (m *IdentifiedClientState) XXX_Size() int {
+ return m.Size()
+}
+func (m *IdentifiedClientState) XXX_DiscardUnknown() {
+ xxx_messageInfo_IdentifiedClientState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IdentifiedClientState proto.InternalMessageInfo
+
+func (m *IdentifiedClientState) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *IdentifiedClientState) GetClientState() *types.Any {
+ if m != nil {
+ return m.ClientState
+ }
+ return nil
+}
+
+// ConsensusStateWithHeight defines a consensus state with an additional height
+// field.
+type ConsensusStateWithHeight struct {
+ // consensus state height
+ Height Height `protobuf:"bytes,1,opt,name=height,proto3" json:"height"`
+ // consensus state
+ ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml"consensus_state"`
+}
+
+func (m *ConsensusStateWithHeight) Reset() { *m = ConsensusStateWithHeight{} }
+func (m *ConsensusStateWithHeight) String() string { return proto.CompactTextString(m) }
+func (*ConsensusStateWithHeight) ProtoMessage() {}
+func (*ConsensusStateWithHeight) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{1}
+}
+func (m *ConsensusStateWithHeight) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConsensusStateWithHeight) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConsensusStateWithHeight.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConsensusStateWithHeight) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConsensusStateWithHeight.Merge(m, src)
+}
+func (m *ConsensusStateWithHeight) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConsensusStateWithHeight) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConsensusStateWithHeight.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConsensusStateWithHeight proto.InternalMessageInfo
+
+func (m *ConsensusStateWithHeight) GetHeight() Height {
+ if m != nil {
+ return m.Height
+ }
+ return Height{}
+}
+
+func (m *ConsensusStateWithHeight) GetConsensusState() *types.Any {
+ if m != nil {
+ return m.ConsensusState
+ }
+ return nil
+}
+
+// ClientConsensusStates defines all the stored consensus states for a given
+// client.
+type ClientConsensusStates struct {
+ // client identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // consensus states and their heights associated with the client
+ ConsensusStates []ConsensusStateWithHeight `protobuf:"bytes,2,rep,name=consensus_states,json=consensusStates,proto3" json:"consensus_states" yaml:"consensus_states"`
+}
+
+func (m *ClientConsensusStates) Reset() { *m = ClientConsensusStates{} }
+func (m *ClientConsensusStates) String() string { return proto.CompactTextString(m) }
+func (*ClientConsensusStates) ProtoMessage() {}
+func (*ClientConsensusStates) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{2}
+}
+func (m *ClientConsensusStates) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientConsensusStates) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientConsensusStates.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientConsensusStates) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientConsensusStates.Merge(m, src)
+}
+func (m *ClientConsensusStates) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientConsensusStates) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientConsensusStates.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientConsensusStates proto.InternalMessageInfo
+
+func (m *ClientConsensusStates) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *ClientConsensusStates) GetConsensusStates() []ConsensusStateWithHeight {
+ if m != nil {
+ return m.ConsensusStates
+ }
+ return nil
+}
+
+// ClientUpdateProposal is a governance proposal. If it passes, the substitute
+// client's consensus states starting from the 'initial height' are copied over
+// to the subjects client state. The proposal handler may fail if the subject
+// and the substitute do not match in client and chain parameters (with
+// exception to latest height, frozen height, and chain-id). The updated client
+// must also be valid (cannot be expired).
+type ClientUpdateProposal struct {
+ // the title of the update proposal
+ Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ // the description of the proposal
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // the client identifier for the client to be updated if the proposal passes
+ SubjectClientId string `protobuf:"bytes,3,opt,name=subject_client_id,json=subjectClientId,proto3" json:"subject_client_id,omitempty" yaml:"subject_client_id"`
+ // the substitute client identifier for the client standing in for the subject
+ // client
+ SubstituteClientId string `protobuf:"bytes,4,opt,name=substitute_client_id,json=substituteClientId,proto3" json:"substitute_client_id,omitempty" yaml:"susbtitute_client_id"`
+ // the intital height to copy consensus states from the substitute to the
+ // subject
+ InitialHeight Height `protobuf:"bytes,5,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height" yaml:"initial_height"`
+}
+
+func (m *ClientUpdateProposal) Reset() { *m = ClientUpdateProposal{} }
+func (m *ClientUpdateProposal) String() string { return proto.CompactTextString(m) }
+func (*ClientUpdateProposal) ProtoMessage() {}
+func (*ClientUpdateProposal) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{3}
+}
+func (m *ClientUpdateProposal) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientUpdateProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientUpdateProposal.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientUpdateProposal) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientUpdateProposal.Merge(m, src)
+}
+func (m *ClientUpdateProposal) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientUpdateProposal) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientUpdateProposal.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientUpdateProposal proto.InternalMessageInfo
+
+// Height is a monotonically increasing data type
+// that can be compared against another Height for the purposes of updating and
+// freezing clients
+//
+// Normally the RevisionHeight is incremented at each height while keeping
+// RevisionNumber the same. However some consensus algorithms may choose to
+// reset the height in certain conditions e.g. hard forks, state-machine
+// breaking changes In these cases, the RevisionNumber is incremented so that
+// height continues to be monitonically increasing even as the RevisionHeight
+// gets reset
+type Height struct {
+ // the revision that the client is currently on
+ RevisionNumber uint64 `protobuf:"varint,1,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty" yaml:"revision_number"`
+ // the height within the given revision
+ RevisionHeight uint64 `protobuf:"varint,2,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty" yaml:"revision_height"`
+}
+
+func (m *Height) Reset() { *m = Height{} }
+func (*Height) ProtoMessage() {}
+func (*Height) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{4}
+}
+func (m *Height) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Height) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Height.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Height) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Height.Merge(m, src)
+}
+func (m *Height) XXX_Size() int {
+ return m.Size()
+}
+func (m *Height) XXX_DiscardUnknown() {
+ xxx_messageInfo_Height.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Height proto.InternalMessageInfo
+
+// Params defines the set of IBC light client parameters.
+type Params struct {
+ // allowed_clients defines the list of allowed client state types.
+ AllowedClients []string `protobuf:"bytes,1,rep,name=allowed_clients,json=allowedClients,proto3" json:"allowed_clients,omitempty" yaml:"allowed_clients"`
+}
+
+func (m *Params) Reset() { *m = Params{} }
+func (m *Params) String() string { return proto.CompactTextString(m) }
+func (*Params) ProtoMessage() {}
+func (*Params) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{5}
+}
+func (m *Params) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Params.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Params) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Params.Merge(m, src)
+}
+func (m *Params) XXX_Size() int {
+ return m.Size()
+}
+func (m *Params) XXX_DiscardUnknown() {
+ xxx_messageInfo_Params.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Params proto.InternalMessageInfo
+
+func (m *Params) GetAllowedClients() []string {
+ if m != nil {
+ return m.AllowedClients
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*IdentifiedClientState)(nil), "ibcgo.core.client.v1.IdentifiedClientState")
+ proto.RegisterType((*ConsensusStateWithHeight)(nil), "ibcgo.core.client.v1.ConsensusStateWithHeight")
+ proto.RegisterType((*ClientConsensusStates)(nil), "ibcgo.core.client.v1.ClientConsensusStates")
+ proto.RegisterType((*ClientUpdateProposal)(nil), "ibcgo.core.client.v1.ClientUpdateProposal")
+ proto.RegisterType((*Height)(nil), "ibcgo.core.client.v1.Height")
+ proto.RegisterType((*Params)(nil), "ibcgo.core.client.v1.Params")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/client/v1/client.proto", fileDescriptor_3cc2cf764ecc47af) }
+
+var fileDescriptor_3cc2cf764ecc47af = []byte{
+ // 636 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30,
+ 0x18, 0xc6, 0x9b, 0xae, 0xab, 0x56, 0x17, 0xda, 0x11, 0x52, 0xd6, 0x8d, 0xd1, 0x14, 0x9f, 0x7a,
+ 0x59, 0xc2, 0xca, 0x6d, 0x37, 0xda, 0x03, 0xdb, 0x01, 0x34, 0x8c, 0x10, 0x88, 0x4b, 0x95, 0x3f,
+ 0x5e, 0x6a, 0x94, 0xc6, 0x55, 0xec, 0x0c, 0x95, 0x4f, 0xc0, 0x91, 0x23, 0x07, 0x0e, 0x7c, 0x04,
+ 0x3e, 0x05, 0xda, 0x71, 0x17, 0x24, 0x4e, 0x11, 0xda, 0xbe, 0x41, 0x3e, 0x01, 0x8a, 0xed, 0x6c,
+ 0x6b, 0xd8, 0xa4, 0x89, 0x9b, 0xf3, 0xfa, 0xf1, 0xef, 0x7d, 0xde, 0x47, 0x8e, 0xc1, 0x63, 0xe2,
+ 0x7a, 0x01, 0xb5, 0x3d, 0x1a, 0x63, 0xdb, 0x0b, 0x09, 0x8e, 0xb8, 0x7d, 0xbc, 0xab, 0x56, 0xd6,
+ 0x3c, 0xa6, 0x9c, 0xea, 0x86, 0x90, 0x58, 0xb9, 0xc4, 0x52, 0x1b, 0xc7, 0xbb, 0x5b, 0x46, 0x40,
+ 0x03, 0x2a, 0x04, 0x76, 0xbe, 0x92, 0xda, 0xad, 0xcd, 0x80, 0xd2, 0x20, 0xc4, 0xb6, 0xf8, 0x72,
+ 0x93, 0x23, 0xdb, 0x89, 0x16, 0x72, 0x0b, 0x7e, 0xd3, 0x40, 0xe7, 0xc0, 0xc7, 0x11, 0x27, 0x47,
+ 0x04, 0xfb, 0x63, 0x01, 0x7a, 0xcd, 0x1d, 0x8e, 0xf5, 0x5d, 0xd0, 0x90, 0xdc, 0x09, 0xf1, 0xbb,
+ 0x5a, 0x5f, 0x1b, 0x34, 0x46, 0x46, 0x96, 0x9a, 0xeb, 0x0b, 0x67, 0x16, 0xee, 0xc1, 0x8b, 0x2d,
+ 0x88, 0xd6, 0xe4, 0xfa, 0xc0, 0xd7, 0x0f, 0xc1, 0x1d, 0x55, 0x67, 0x39, 0xa2, 0x5b, 0xed, 0x6b,
+ 0x83, 0xe6, 0xd0, 0xb0, 0x64, 0x7b, 0xab, 0x68, 0x6f, 0x3d, 0x8b, 0x16, 0xa3, 0x8d, 0x2c, 0x35,
+ 0xef, 0x2f, 0xb1, 0xc4, 0x19, 0x88, 0x9a, 0xde, 0xa5, 0x09, 0xf8, 0x43, 0x03, 0xdd, 0x31, 0x8d,
+ 0x18, 0x8e, 0x58, 0xc2, 0x44, 0xe9, 0x2d, 0xe1, 0xd3, 0x7d, 0x4c, 0x82, 0x29, 0xd7, 0xf7, 0x40,
+ 0x7d, 0x2a, 0x56, 0xc2, 0x5e, 0x73, 0xb8, 0x6d, 0x5d, 0x97, 0x89, 0x25, 0xd5, 0xa3, 0xda, 0x49,
+ 0x6a, 0x56, 0x90, 0x3a, 0xa1, 0xbf, 0x03, 0x6d, 0xaf, 0xe0, 0xde, 0xc2, 0xed, 0x66, 0x96, 0x9a,
+ 0x9d, 0xdc, 0x2d, 0x2c, 0x9d, 0x82, 0xa8, 0xe5, 0x2d, 0xf9, 0x83, 0x3f, 0x35, 0xd0, 0x91, 0x39,
+ 0x2e, 0x1b, 0x67, 0xff, 0x93, 0xe8, 0x27, 0xb0, 0x5e, 0x6a, 0xc8, 0xba, 0xd5, 0xfe, 0xca, 0xa0,
+ 0x39, 0xb4, 0xae, 0x1f, 0xf6, 0xa6, 0xb0, 0x46, 0x66, 0x3e, 0x7e, 0x96, 0x9a, 0x1b, 0xaa, 0x5b,
+ 0x89, 0x0a, 0x51, 0x7b, 0x79, 0x0e, 0x06, 0x7f, 0x55, 0x81, 0x21, 0x07, 0x79, 0x33, 0xf7, 0x1d,
+ 0x8e, 0x0f, 0x63, 0x3a, 0xa7, 0xcc, 0x09, 0x75, 0x03, 0xac, 0x72, 0xc2, 0x43, 0x2c, 0x67, 0x40,
+ 0xf2, 0x43, 0xef, 0x83, 0xa6, 0x8f, 0x99, 0x17, 0x93, 0x39, 0x27, 0x34, 0x12, 0x69, 0x36, 0xd0,
+ 0xd5, 0x92, 0xbe, 0x0f, 0xee, 0xb1, 0xc4, 0xfd, 0x80, 0x3d, 0x3e, 0xb9, 0xcc, 0x61, 0x45, 0xe4,
+ 0xb0, 0x9d, 0xa5, 0x66, 0x57, 0x3a, 0xfb, 0x47, 0x02, 0x51, 0x5b, 0xd5, 0xc6, 0x45, 0x2c, 0xaf,
+ 0x80, 0xc1, 0x12, 0x97, 0x71, 0xc2, 0x13, 0x8e, 0xaf, 0xc0, 0x6a, 0x02, 0x66, 0x66, 0xa9, 0xf9,
+ 0xb0, 0x80, 0x31, 0xb7, 0xac, 0x82, 0x48, 0xbf, 0x3c, 0x7c, 0x81, 0x74, 0x41, 0x8b, 0x44, 0x84,
+ 0x13, 0x27, 0x9c, 0xa8, 0x4b, 0xb5, 0x7a, 0x8b, 0x4b, 0xf5, 0x48, 0xa5, 0xda, 0x91, 0xed, 0x96,
+ 0x09, 0x10, 0xdd, 0x55, 0x05, 0xa9, 0xde, 0xab, 0x7d, 0xfe, 0x6e, 0x56, 0xf2, 0x5f, 0xae, 0xae,
+ 0x6e, 0xf0, 0x18, 0xb4, 0x63, 0x7c, 0x4c, 0x18, 0xa1, 0xd1, 0x24, 0x4a, 0x66, 0x2e, 0x8e, 0x45,
+ 0xa6, 0xb5, 0xd1, 0x56, 0x96, 0x9a, 0x0f, 0x24, 0xb3, 0x24, 0x80, 0xa8, 0x55, 0x54, 0x5e, 0x8a,
+ 0xc2, 0x12, 0x44, 0x59, 0xaf, 0xde, 0x08, 0x29, 0x9c, 0x5d, 0x40, 0x94, 0xb5, 0xb5, 0xdc, 0xda,
+ 0xd7, 0xdc, 0xde, 0x0b, 0x50, 0x3f, 0x74, 0x62, 0x67, 0xc6, 0x72, 0xb0, 0x13, 0x86, 0xf4, 0x23,
+ 0xf6, 0x55, 0x78, 0xac, 0xab, 0xf5, 0x57, 0x06, 0x8d, 0xab, 0xe0, 0x92, 0x00, 0xa2, 0x96, 0xaa,
+ 0xc8, 0x60, 0xd9, 0xe8, 0xf9, 0xc9, 0x59, 0x4f, 0x3b, 0x3d, 0xeb, 0x69, 0x7f, 0xce, 0x7a, 0xda,
+ 0x97, 0xf3, 0x5e, 0xe5, 0xf4, 0xbc, 0x57, 0xf9, 0x7d, 0xde, 0xab, 0xbc, 0xdf, 0x09, 0x08, 0x9f,
+ 0x26, 0xae, 0xe5, 0xd1, 0x99, 0xed, 0x51, 0x36, 0xa3, 0xcc, 0x26, 0xae, 0xb7, 0x53, 0xbc, 0x7b,
+ 0x4f, 0x86, 0x3b, 0xea, 0xe9, 0xe3, 0x8b, 0x39, 0x66, 0x6e, 0x5d, 0xfc, 0x90, 0x4f, 0xff, 0x06,
+ 0x00, 0x00, 0xff, 0xff, 0x47, 0x7f, 0x5c, 0x7c, 0x1c, 0x05, 0x00, 0x00,
+}
+
+func (m *IdentifiedClientState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IdentifiedClientState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IdentifiedClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConsensusStateWithHeight) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConsensusStateWithHeight) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConsensusStateWithHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ClientConsensusStates) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientConsensusStates) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientConsensusStates) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ConsensusStates) > 0 {
+ for iNdEx := len(m.ConsensusStates) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ConsensusStates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClientUpdateProposal) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientUpdateProposal) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientUpdateProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.InitialHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if len(m.SubstituteClientId) > 0 {
+ i -= len(m.SubstituteClientId)
+ copy(dAtA[i:], m.SubstituteClientId)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.SubstituteClientId)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.SubjectClientId) > 0 {
+ i -= len(m.SubjectClientId)
+ copy(dAtA[i:], m.SubjectClientId)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.SubjectClientId)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Description) > 0 {
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Title) > 0 {
+ i -= len(m.Title)
+ copy(dAtA[i:], m.Title)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.Title)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Height) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Height) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Height) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHeight != 0 {
+ i = encodeVarintClient(dAtA, i, uint64(m.RevisionHeight))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.RevisionNumber != 0 {
+ i = encodeVarintClient(dAtA, i, uint64(m.RevisionNumber))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Params) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Params) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.AllowedClients) > 0 {
+ for iNdEx := len(m.AllowedClients) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AllowedClients[iNdEx])
+ copy(dAtA[i:], m.AllowedClients[iNdEx])
+ i = encodeVarintClient(dAtA, i, uint64(len(m.AllowedClients[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintClient(dAtA []byte, offset int, v uint64) int {
+ offset -= sovClient(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *IdentifiedClientState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovClient(uint64(l))
+ }
+ return n
+}
+
+func (m *ConsensusStateWithHeight) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Height.Size()
+ n += 1 + l + sovClient(uint64(l))
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovClient(uint64(l))
+ }
+ return n
+}
+
+func (m *ClientConsensusStates) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ if len(m.ConsensusStates) > 0 {
+ for _, e := range m.ConsensusStates {
+ l = e.Size()
+ n += 1 + l + sovClient(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClientUpdateProposal) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Title)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ l = len(m.SubjectClientId)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ l = len(m.SubstituteClientId)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ l = m.InitialHeight.Size()
+ n += 1 + l + sovClient(uint64(l))
+ return n
+}
+
+func (m *Height) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RevisionNumber != 0 {
+ n += 1 + sovClient(uint64(m.RevisionNumber))
+ }
+ if m.RevisionHeight != 0 {
+ n += 1 + sovClient(uint64(m.RevisionHeight))
+ }
+ return n
+}
+
+func (m *Params) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.AllowedClients) > 0 {
+ for _, s := range m.AllowedClients {
+ l = len(s)
+ n += 1 + l + sovClient(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovClient(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozClient(x uint64) (n int) {
+ return sovClient(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *IdentifiedClientState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IdentifiedClientState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IdentifiedClientState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConsensusStateWithHeight) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConsensusStateWithHeight: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConsensusStateWithHeight: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClientConsensusStates) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientConsensusStates: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientConsensusStates: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusStates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConsensusStates = append(m.ConsensusStates, ConsensusStateWithHeight{})
+ if err := m.ConsensusStates[len(m.ConsensusStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClientUpdateProposal) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientUpdateProposal: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientUpdateProposal: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Title = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubjectClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubjectClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SubstituteClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SubstituteClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.InitialHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Height) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Height: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Height: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType)
+ }
+ m.RevisionNumber = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionNumber |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType)
+ }
+ m.RevisionHeight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionHeight |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Params) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Params: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowedClients", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AllowedClients = append(m.AllowedClients, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipClient(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthClient
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupClient
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthClient
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthClient = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowClient = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupClient = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/02-client/types/client_test.go b/core/02-client/types/client_test.go
new file mode 100644
index 00000000..2dfd3967
--- /dev/null
+++ b/core/02-client/types/client_test.go
@@ -0,0 +1,87 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() {
+ var (
+ cswh types.ConsensusStateWithHeight
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ }{
+ {
+ "solo machine client", func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1)
+ cswh = types.NewConsensusStateWithHeight(types.NewHeight(0, soloMachine.Sequence), soloMachine.ConsensusState())
+ },
+ },
+ {
+ "tendermint client", func() {
+ clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ clientState := suite.chainA.GetClientState(clientA)
+ consensusState, ok := suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ suite.Require().True(ok)
+
+ cswh = types.NewConsensusStateWithHeight(clientState.GetLatestHeight().(types.Height), consensusState)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ tc.malleate()
+
+ cdc := suite.chainA.App.AppCodec()
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(&cswh)
+ suite.Require().NoError(err)
+
+ // unmarshal message
+ newCswh := &types.ConsensusStateWithHeight{}
+ err = cdc.UnmarshalJSON(bz, newCswh)
+ suite.Require().NoError(err)
+ })
+ }
+}
+
+func TestValidateClientType(t *testing.T) {
+ testCases := []struct {
+ name string
+ clientType string
+ expPass bool
+ }{
+ {"valid", "tendermint", true},
+ {"valid solomachine", "solomachine-v1", true},
+ {"too large", "tenderminttenderminttenderminttenderminttendermintt", false},
+ {"too short", "t", false},
+ {"blank id", " ", false},
+ {"empty id", "", false},
+ {"ends with dash", "tendermint-", false},
+ }
+
+ for _, tc := range testCases {
+
+ err := types.ValidateClientType(tc.clientType)
+
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/core/02-client/types/codec.go b/core/02-client/types/codec.go
new file mode 100644
index 00000000..59a15832
--- /dev/null
+++ b/core/02-client/types/codec.go
@@ -0,0 +1,188 @@
+package types
+
+import (
+ proto "github.com/gogo/protobuf/proto"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces registers the client interfaces to protobuf Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterInterface(
+ "ibc.core.client.v1.ClientState",
+ (*exported.ClientState)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.client.v1.ConsensusState",
+ (*exported.ConsensusState)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.client.v1.Header",
+ (*exported.Header)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.client.v1.Height",
+ (*exported.Height)(nil),
+ &Height{},
+ )
+ registry.RegisterInterface(
+ "ibc.core.client.v1.Misbehaviour",
+ (*exported.Misbehaviour)(nil),
+ )
+ registry.RegisterImplementations(
+ (*govtypes.Content)(nil),
+ &ClientUpdateProposal{},
+ )
+ registry.RegisterImplementations(
+ (*sdk.Msg)(nil),
+ &MsgCreateClient{},
+ &MsgUpdateClient{},
+ &MsgUpgradeClient{},
+ &MsgSubmitMisbehaviour{},
+ )
+
+ msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
+}
+
+// PackClientState constructs a new Any packed with the given client state value. It returns
+// an error if the client state can't be casted to a protobuf message or if the concrete
+// implemention is not registered to the protobuf codec.
+func PackClientState(clientState exported.ClientState) (*codectypes.Any, error) {
+ msg, ok := clientState.(proto.Message)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", clientState)
+ }
+
+ anyClientState, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error())
+ }
+
+ return anyClientState, nil
+}
+
+// UnpackClientState unpacks an Any into a ClientState. It returns an error if the
+// client state can't be unpacked into a ClientState.
+func UnpackClientState(any *codectypes.Any) (exported.ClientState, error) {
+ if any == nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil")
+ }
+
+ clientState, ok := any.GetCachedValue().(exported.ClientState)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ClientState %T", any)
+ }
+
+ return clientState, nil
+}
+
+// PackConsensusState constructs a new Any packed with the given consensus state value. It returns
+// an error if the consensus state can't be casted to a protobuf message or if the concrete
+// implemention is not registered to the protobuf codec.
+func PackConsensusState(consensusState exported.ConsensusState) (*codectypes.Any, error) {
+ msg, ok := consensusState.(proto.Message)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", consensusState)
+ }
+
+ anyConsensusState, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error())
+ }
+
+ return anyConsensusState, nil
+}
+
+// MustPackConsensusState calls PackConsensusState and panics on error.
+func MustPackConsensusState(consensusState exported.ConsensusState) *codectypes.Any {
+ anyConsensusState, err := PackConsensusState(consensusState)
+ if err != nil {
+ panic(err)
+ }
+
+ return anyConsensusState
+}
+
+// UnpackConsensusState unpacks an Any into a ConsensusState. It returns an error if the
+// consensus state can't be unpacked into a ConsensusState.
+func UnpackConsensusState(any *codectypes.Any) (exported.ConsensusState, error) {
+ if any == nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil")
+ }
+
+ consensusState, ok := any.GetCachedValue().(exported.ConsensusState)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ConsensusState %T", any)
+ }
+
+ return consensusState, nil
+}
+
+// PackHeader constructs a new Any packed with the given header value. It returns
+// an error if the header can't be casted to a protobuf message or if the concrete
+// implemention is not registered to the protobuf codec.
+func PackHeader(header exported.Header) (*codectypes.Any, error) {
+ msg, ok := header.(proto.Message)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", header)
+ }
+
+ anyHeader, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error())
+ }
+
+ return anyHeader, nil
+}
+
+// UnpackHeader unpacks an Any into a Header. It returns an error if the
+// consensus state can't be unpacked into a Header.
+func UnpackHeader(any *codectypes.Any) (exported.Header, error) {
+ if any == nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil")
+ }
+
+ header, ok := any.GetCachedValue().(exported.Header)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into Header %T", any)
+ }
+
+ return header, nil
+}
+
+// PackMisbehaviour constructs a new Any packed with the given misbehaviour value. It returns
+// an error if the misbehaviour can't be casted to a protobuf message or if the concrete
+// implemention is not registered to the protobuf codec.
+func PackMisbehaviour(misbehaviour exported.Misbehaviour) (*codectypes.Any, error) {
+ msg, ok := misbehaviour.(proto.Message)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", misbehaviour)
+ }
+
+ anyMisbhaviour, err := codectypes.NewAnyWithValue(msg)
+ if err != nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error())
+ }
+
+ return anyMisbhaviour, nil
+}
+
+// UnpackMisbehaviour unpacks an Any into a Misbehaviour. It returns an error if the
+// Any can't be unpacked into a Misbehaviour.
+func UnpackMisbehaviour(any *codectypes.Any) (exported.Misbehaviour, error) {
+ if any == nil {
+ return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil")
+ }
+
+ misbehaviour, ok := any.GetCachedValue().(exported.Misbehaviour)
+ if !ok {
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into Misbehaviour %T", any)
+ }
+
+ return misbehaviour, nil
+}
diff --git a/core/02-client/types/codec_test.go b/core/02-client/types/codec_test.go
new file mode 100644
index 00000000..75cfc97e
--- /dev/null
+++ b/core/02-client/types/codec_test.go
@@ -0,0 +1,210 @@
+package types_test
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type caseAny struct {
+ name string
+ any *codectypes.Any
+ expPass bool
+}
+
+func (suite *TypesTestSuite) TestPackClientState() {
+
+ testCases := []struct {
+ name string
+ clientState exported.ClientState
+ expPass bool
+ }{
+ {
+ "solo machine client",
+ ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ClientState(),
+ true,
+ },
+ {
+ "tendermint client",
+ ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ true,
+ },
+ {
+ "localhost client",
+ localhosttypes.NewClientState(chainID, clientHeight),
+ true,
+ },
+ {
+ "nil",
+ nil,
+ false,
+ },
+ }
+
+ testCasesAny := []caseAny{}
+
+ for _, tc := range testCases {
+ clientAny, err := types.PackClientState(tc.clientState)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+
+ testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass})
+ }
+
+ for i, tc := range testCasesAny {
+ cs, err := types.UnpackClientState(tc.any)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ suite.Require().Equal(testCases[i].clientState, cs, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *TypesTestSuite) TestPackConsensusState() {
+ testCases := []struct {
+ name string
+ consensusState exported.ConsensusState
+ expPass bool
+ }{
+ {
+ "solo machine consensus",
+ ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState(),
+ true,
+ },
+ {
+ "tendermint consensus",
+ suite.chainA.LastHeader.ConsensusState(),
+ true,
+ },
+ {
+ "nil",
+ nil,
+ false,
+ },
+ }
+
+ testCasesAny := []caseAny{}
+
+ for _, tc := range testCases {
+ clientAny, err := types.PackConsensusState(tc.consensusState)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass})
+ }
+
+ for i, tc := range testCasesAny {
+ cs, err := types.UnpackConsensusState(tc.any)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ suite.Require().Equal(testCases[i].consensusState, cs, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *TypesTestSuite) TestPackHeader() {
+ testCases := []struct {
+ name string
+ header exported.Header
+ expPass bool
+ }{
+ {
+ "solo machine header",
+ ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateHeader(),
+ true,
+ },
+ {
+ "tendermint header",
+ suite.chainA.LastHeader,
+ true,
+ },
+ {
+ "nil",
+ nil,
+ false,
+ },
+ }
+
+ testCasesAny := []caseAny{}
+
+ for _, tc := range testCases {
+ clientAny, err := types.PackHeader(tc.header)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+
+ testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass})
+ }
+
+ for i, tc := range testCasesAny {
+ cs, err := types.UnpackHeader(tc.any)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ suite.Require().Equal(testCases[i].header, cs, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *TypesTestSuite) TestPackMisbehaviour() {
+ testCases := []struct {
+ name string
+ misbehaviour exported.Misbehaviour
+ expPass bool
+ }{
+ {
+ "solo machine misbehaviour",
+ ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateMisbehaviour(),
+ true,
+ },
+ {
+ "tendermint misbehaviour",
+ ibctmtypes.NewMisbehaviour("tendermint", suite.chainA.LastHeader, suite.chainA.LastHeader),
+ true,
+ },
+ {
+ "nil",
+ nil,
+ false,
+ },
+ }
+
+ testCasesAny := []caseAny{}
+
+ for _, tc := range testCases {
+ clientAny, err := types.PackMisbehaviour(tc.misbehaviour)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+
+ testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass})
+ }
+
+ for i, tc := range testCasesAny {
+ cs, err := types.UnpackMisbehaviour(tc.any)
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ suite.Require().Equal(testCases[i].misbehaviour, cs, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
diff --git a/core/02-client/types/encoding.go b/core/02-client/types/encoding.go
new file mode 100644
index 00000000..a912b13a
--- /dev/null
+++ b/core/02-client/types/encoding.go
@@ -0,0 +1,86 @@
+package types
+
+import (
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// MustUnmarshalClientState attempts to decode and return an ClientState object from
+// raw encoded bytes. It panics on error.
+func MustUnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) exported.ClientState {
+ clientState, err := UnmarshalClientState(cdc, bz)
+ if err != nil {
+ panic(fmt.Errorf("failed to decode client state: %w", err))
+ }
+
+ return clientState
+}
+
+// MustMarshalClientState attempts to encode an ClientState object and returns the
+// raw encoded bytes. It panics on error.
+func MustMarshalClientState(cdc codec.BinaryMarshaler, clientState exported.ClientState) []byte {
+ bz, err := MarshalClientState(cdc, clientState)
+ if err != nil {
+ panic(fmt.Errorf("failed to encode client state: %w", err))
+ }
+
+ return bz
+}
+
+// MarshalClientState protobuf serializes an ClientState interface
+func MarshalClientState(cdc codec.BinaryMarshaler, clientStateI exported.ClientState) ([]byte, error) {
+ return cdc.MarshalInterface(clientStateI)
+}
+
+// UnmarshalClientState returns an ClientState interface from raw encoded clientState
+// bytes of a Proto-based ClientState type. An error is returned upon decoding
+// failure.
+func UnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) (exported.ClientState, error) {
+ var clientState exported.ClientState
+ if err := cdc.UnmarshalInterface(bz, &clientState); err != nil {
+ return nil, err
+ }
+
+ return clientState, nil
+}
+
+// MustUnmarshalConsensusState attempts to decode and return an ConsensusState object from
+// raw encoded bytes. It panics on error.
+func MustUnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) exported.ConsensusState {
+ consensusState, err := UnmarshalConsensusState(cdc, bz)
+ if err != nil {
+ panic(fmt.Errorf("failed to decode consensus state: %w", err))
+ }
+
+ return consensusState
+}
+
+// MustMarshalConsensusState attempts to encode an ConsensusState object and returns the
+// raw encoded bytes. It panics on error.
+func MustMarshalConsensusState(cdc codec.BinaryMarshaler, consensusState exported.ConsensusState) []byte {
+ bz, err := MarshalConsensusState(cdc, consensusState)
+ if err != nil {
+ panic(fmt.Errorf("failed to encode consensus state: %w", err))
+ }
+
+ return bz
+}
+
+// MarshalConsensusState protobuf serializes an ConsensusState interface
+func MarshalConsensusState(cdc codec.BinaryMarshaler, cs exported.ConsensusState) ([]byte, error) {
+ return cdc.MarshalInterface(cs)
+}
+
+// UnmarshalConsensusState returns an ConsensusState interface from raw encoded clientState
+// bytes of a Proto-based ConsensusState type. An error is returned upon decoding
+// failure.
+func UnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) (exported.ConsensusState, error) {
+ var consensusState exported.ConsensusState
+ if err := cdc.UnmarshalInterface(bz, &consensusState); err != nil {
+ return nil, err
+ }
+
+ return consensusState, nil
+}
diff --git a/core/02-client/types/errors.go b/core/02-client/types/errors.go
new file mode 100644
index 00000000..5b44cd52
--- /dev/null
+++ b/core/02-client/types/errors.go
@@ -0,0 +1,35 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// IBC client sentinel errors
+var (
+ ErrClientExists = sdkerrors.Register(SubModuleName, 2, "light client already exists")
+ ErrInvalidClient = sdkerrors.Register(SubModuleName, 3, "light client is invalid")
+ ErrClientNotFound = sdkerrors.Register(SubModuleName, 4, "light client not found")
+ ErrClientFrozen = sdkerrors.Register(SubModuleName, 5, "light client is frozen due to misbehaviour")
+ ErrInvalidClientMetadata = sdkerrors.Register(SubModuleName, 6, "invalid client metadata")
+ ErrConsensusStateNotFound = sdkerrors.Register(SubModuleName, 7, "consensus state not found")
+ ErrInvalidConsensus = sdkerrors.Register(SubModuleName, 8, "invalid consensus state")
+ ErrClientTypeNotFound = sdkerrors.Register(SubModuleName, 9, "client type not found")
+ ErrInvalidClientType = sdkerrors.Register(SubModuleName, 10, "invalid client type")
+ ErrRootNotFound = sdkerrors.Register(SubModuleName, 11, "commitment root not found")
+ ErrInvalidHeader = sdkerrors.Register(SubModuleName, 12, "invalid client header")
+ ErrInvalidMisbehaviour = sdkerrors.Register(SubModuleName, 13, "invalid light client misbehaviour")
+ ErrFailedClientStateVerification = sdkerrors.Register(SubModuleName, 14, "client state verification failed")
+ ErrFailedClientConsensusStateVerification = sdkerrors.Register(SubModuleName, 15, "client consensus state verification failed")
+ ErrFailedConnectionStateVerification = sdkerrors.Register(SubModuleName, 16, "connection state verification failed")
+ ErrFailedChannelStateVerification = sdkerrors.Register(SubModuleName, 17, "channel state verification failed")
+ ErrFailedPacketCommitmentVerification = sdkerrors.Register(SubModuleName, 18, "packet commitment verification failed")
+ ErrFailedPacketAckVerification = sdkerrors.Register(SubModuleName, 19, "packet acknowledgement verification failed")
+ ErrFailedPacketReceiptVerification = sdkerrors.Register(SubModuleName, 20, "packet receipt verification failed")
+ ErrFailedNextSeqRecvVerification = sdkerrors.Register(SubModuleName, 21, "next sequence receive verification failed")
+ ErrSelfConsensusStateNotFound = sdkerrors.Register(SubModuleName, 22, "self consensus state not found")
+ ErrUpdateClientFailed = sdkerrors.Register(SubModuleName, 23, "unable to update light client")
+ ErrInvalidUpdateClientProposal = sdkerrors.Register(SubModuleName, 24, "invalid update client proposal")
+ ErrInvalidUpgradeClient = sdkerrors.Register(SubModuleName, 25, "invalid client upgrade")
+ ErrInvalidHeight = sdkerrors.Register(SubModuleName, 26, "invalid height")
+ ErrInvalidSubstitute = sdkerrors.Register(SubModuleName, 27, "invalid client state substitute")
+)
diff --git a/core/02-client/types/events.go b/core/02-client/types/events.go
new file mode 100644
index 00000000..d0760ba8
--- /dev/null
+++ b/core/02-client/types/events.go
@@ -0,0 +1,26 @@
+package types
+
+import (
+ "fmt"
+
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// IBC client events
+const (
+ AttributeKeyClientID = "client_id"
+ AttributeKeySubjectClientID = "subject_client_id"
+ AttributeKeyClientType = "client_type"
+ AttributeKeyConsensusHeight = "consensus_height"
+)
+
+// IBC client events vars
+var (
+ EventTypeCreateClient = "create_client"
+ EventTypeUpdateClient = "update_client"
+ EventTypeUpgradeClient = "upgrade_client"
+ EventTypeSubmitMisbehaviour = "client_misbehaviour"
+ EventTypeUpdateClientProposal = "update_client_proposal"
+
+ AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName)
+)
diff --git a/core/02-client/types/expected_keepers.go b/core/02-client/types/expected_keepers.go
new file mode 100644
index 00000000..defc8150
--- /dev/null
+++ b/core/02-client/types/expected_keepers.go
@@ -0,0 +1,14 @@
+package types
+
+import (
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+// StakingKeeper expected staking keeper
+type StakingKeeper interface {
+ GetHistoricalInfo(ctx sdk.Context, height int64) (stakingtypes.HistoricalInfo, bool)
+ UnbondingTime(ctx sdk.Context) time.Duration
+}
diff --git a/core/02-client/types/genesis.go b/core/02-client/types/genesis.go
new file mode 100644
index 00000000..3f197208
--- /dev/null
+++ b/core/02-client/types/genesis.go
@@ -0,0 +1,250 @@
+package types
+
+import (
+ "fmt"
+ "sort"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ codectypes.UnpackInterfacesMessage = IdentifiedClientState{}
+ _ codectypes.UnpackInterfacesMessage = ClientsConsensusStates{}
+ _ codectypes.UnpackInterfacesMessage = ClientConsensusStates{}
+ _ codectypes.UnpackInterfacesMessage = GenesisState{}
+)
+
+var (
+ _ sort.Interface = ClientsConsensusStates{}
+ _ exported.GenesisMetadata = GenesisMetadata{}
+)
+
+// ClientsConsensusStates defines a slice of ClientConsensusStates that supports the sort interface
+type ClientsConsensusStates []ClientConsensusStates
+
+// Len implements sort.Interface
+func (ccs ClientsConsensusStates) Len() int { return len(ccs) }
+
+// Less implements sort.Interface
+func (ccs ClientsConsensusStates) Less(i, j int) bool { return ccs[i].ClientId < ccs[j].ClientId }
+
+// Swap implements sort.Interface
+func (ccs ClientsConsensusStates) Swap(i, j int) { ccs[i], ccs[j] = ccs[j], ccs[i] }
+
+// Sort is a helper function to sort the set of ClientsConsensusStates in place
+func (ccs ClientsConsensusStates) Sort() ClientsConsensusStates {
+ sort.Sort(ccs)
+ return ccs
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (ccs ClientsConsensusStates) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ for _, clientConsensus := range ccs {
+ if err := clientConsensus.UnpackInterfaces(unpacker); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewClientConsensusStates creates a new ClientConsensusStates instance.
+func NewClientConsensusStates(clientID string, consensusStates []ConsensusStateWithHeight) ClientConsensusStates {
+ return ClientConsensusStates{
+ ClientId: clientID,
+ ConsensusStates: consensusStates,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (ccs ClientConsensusStates) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ for _, consStateWithHeight := range ccs.ConsensusStates {
+ if err := consStateWithHeight.UnpackInterfaces(unpacker); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewGenesisState creates a GenesisState instance.
+func NewGenesisState(
+ clients []IdentifiedClientState, clientsConsensus ClientsConsensusStates, clientsMetadata []IdentifiedGenesisMetadata,
+ params Params, createLocalhost bool, nextClientSequence uint64,
+) GenesisState {
+ return GenesisState{
+ Clients: clients,
+ ClientsConsensus: clientsConsensus,
+ ClientsMetadata: clientsMetadata,
+ Params: params,
+ CreateLocalhost: createLocalhost,
+ NextClientSequence: nextClientSequence,
+ }
+}
+
+// DefaultGenesisState returns the ibc client submodule's default genesis state.
+func DefaultGenesisState() GenesisState {
+ return GenesisState{
+ Clients: []IdentifiedClientState{},
+ ClientsConsensus: ClientsConsensusStates{},
+ Params: DefaultParams(),
+ CreateLocalhost: false,
+ NextClientSequence: 0,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (gs GenesisState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ for _, client := range gs.Clients {
+ if err := client.UnpackInterfaces(unpacker); err != nil {
+ return err
+ }
+ }
+
+ return gs.ClientsConsensus.UnpackInterfaces(unpacker)
+}
+
+// Validate performs basic genesis state validation returning an error upon any
+// failure.
+func (gs GenesisState) Validate() error {
+ // keep track of the max sequence to ensure it is less than
+ // the next sequence used in creating client identifers.
+ var maxSequence uint64 = 0
+
+ if err := gs.Params.Validate(); err != nil {
+ return err
+ }
+
+ validClients := make(map[string]string)
+
+ for i, client := range gs.Clients {
+ if err := host.ClientIdentifierValidator(client.ClientId); err != nil {
+ return fmt.Errorf("invalid client consensus state identifier %s index %d: %w", client.ClientId, i, err)
+ }
+
+ clientState, ok := client.ClientState.GetCachedValue().(exported.ClientState)
+ if !ok {
+ return fmt.Errorf("invalid client state with ID %s", client.ClientId)
+ }
+
+ if !gs.Params.IsAllowedClient(clientState.ClientType()) {
+ return fmt.Errorf("client type %s not allowed by genesis params", clientState.ClientType())
+ }
+ if err := clientState.Validate(); err != nil {
+ return fmt.Errorf("invalid client %v index %d: %w", client, i, err)
+ }
+
+ clientType, sequence, err := ParseClientIdentifier(client.ClientId)
+ if err != nil {
+ return err
+ }
+
+ if clientType != clientState.ClientType() {
+ return fmt.Errorf("client state type %s does not equal client type in client identifier %s", clientState.ClientType(), clientType)
+ }
+
+ if err := ValidateClientType(clientType); err != nil {
+ return err
+ }
+
+ if sequence > maxSequence {
+ maxSequence = sequence
+ }
+
+ // add client id to validClients map
+ validClients[client.ClientId] = clientState.ClientType()
+ }
+
+ for _, cc := range gs.ClientsConsensus {
+ // check that consensus state is for a client in the genesis clients list
+ clientType, ok := validClients[cc.ClientId]
+ if !ok {
+ return fmt.Errorf("consensus state in genesis has a client id %s that does not map to a genesis client", cc.ClientId)
+ }
+
+ for i, consensusState := range cc.ConsensusStates {
+ if consensusState.Height.IsZero() {
+ return fmt.Errorf("consensus state height cannot be zero")
+ }
+
+ cs, ok := consensusState.ConsensusState.GetCachedValue().(exported.ConsensusState)
+ if !ok {
+ return fmt.Errorf("invalid consensus state with client ID %s at height %s", cc.ClientId, consensusState.Height)
+ }
+
+ if err := cs.ValidateBasic(); err != nil {
+ return fmt.Errorf("invalid client consensus state %v clientID %s index %d: %w", cs, cc.ClientId, i, err)
+ }
+
+ // ensure consensus state type matches client state type
+ if clientType != cs.ClientType() {
+ return fmt.Errorf("consensus state client type %s does not equal client state client type %s", cs.ClientType(), clientType)
+ }
+
+ }
+ }
+
+ for _, clientMetadata := range gs.ClientsMetadata {
+ // check that metadata is for a client in the genesis clients list
+ _, ok := validClients[clientMetadata.ClientId]
+ if !ok {
+ return fmt.Errorf("metadata in genesis has a client id %s that does not map to a genesis client", clientMetadata.ClientId)
+ }
+
+ for i, gm := range clientMetadata.ClientMetadata {
+ if err := gm.Validate(); err != nil {
+ return fmt.Errorf("invalid client metadata %v clientID %s index %d: %w", gm, clientMetadata.ClientId, i, err)
+ }
+
+ }
+
+ }
+
+ if gs.CreateLocalhost && !gs.Params.IsAllowedClient(exported.Localhost) {
+ return fmt.Errorf("localhost client is not registered on the allowlist")
+ }
+
+ if maxSequence != 0 && maxSequence >= gs.NextClientSequence {
+ return fmt.Errorf("next client identifier sequence %d must be greater than the maximum sequence used in the provided client identifiers %d", gs.NextClientSequence, maxSequence)
+ }
+
+ return nil
+}
+
+// NewGenesisMetadata is a constructor for GenesisMetadata
+func NewGenesisMetadata(key, val []byte) GenesisMetadata {
+ return GenesisMetadata{
+ Key: key,
+ Value: val,
+ }
+}
+
+// GetKey returns the key of metadata. Implements exported.GenesisMetadata interface.
+func (gm GenesisMetadata) GetKey() []byte {
+ return gm.Key
+}
+
+// GetValue returns the value of metadata. Implements exported.GenesisMetadata interface.
+func (gm GenesisMetadata) GetValue() []byte {
+ return gm.Value
+}
+
+// Validate ensures key and value of metadata are not empty
+func (gm GenesisMetadata) Validate() error {
+ if len(gm.Key) == 0 {
+ return fmt.Errorf("genesis metadata key cannot be empty")
+ }
+ if len(gm.Value) == 0 {
+ return fmt.Errorf("genesis metadata value cannot be empty")
+ }
+ return nil
+}
+
+// NewIdentifiedGenesisMetadata takes in a client ID and list of genesis metadata for that client
+// and constructs a new IdentifiedGenesisMetadata.
+func NewIdentifiedGenesisMetadata(clientID string, gms []GenesisMetadata) IdentifiedGenesisMetadata {
+ return IdentifiedGenesisMetadata{
+ ClientId: clientID,
+ ClientMetadata: gms,
+ }
+}
diff --git a/core/02-client/types/genesis.pb.go b/core/02-client/types/genesis.pb.go
new file mode 100644
index 00000000..e4246f5c
--- /dev/null
+++ b/core/02-client/types/genesis.pb.go
@@ -0,0 +1,1060 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/client/v1/genesis.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibc client submodule's genesis state.
+type GenesisState struct {
+ // client states with their corresponding identifiers
+ Clients IdentifiedClientStates `protobuf:"bytes,1,rep,name=clients,proto3,castrepeated=IdentifiedClientStates" json:"clients"`
+ // consensus states from each client
+ ClientsConsensus ClientsConsensusStates `protobuf:"bytes,2,rep,name=clients_consensus,json=clientsConsensus,proto3,castrepeated=ClientsConsensusStates" json:"clients_consensus" yaml:"clients_consensus"`
+ // metadata from each client
+ ClientsMetadata []IdentifiedGenesisMetadata `protobuf:"bytes,3,rep,name=clients_metadata,json=clientsMetadata,proto3" json:"clients_metadata" yaml:"clients_metadata"`
+ Params Params `protobuf:"bytes,4,opt,name=params,proto3" json:"params"`
+ // create localhost on initialization
+ CreateLocalhost bool `protobuf:"varint,5,opt,name=create_localhost,json=createLocalhost,proto3" json:"create_localhost,omitempty" yaml:"create_localhost"`
+ // the sequence for the next generated client identifier
+ NextClientSequence uint64 `protobuf:"varint,6,opt,name=next_client_sequence,json=nextClientSequence,proto3" json:"next_client_sequence,omitempty" yaml:"next_client_sequence"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a1110e97fc5e4abf, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetClients() IdentifiedClientStates {
+ if m != nil {
+ return m.Clients
+ }
+ return nil
+}
+
+func (m *GenesisState) GetClientsConsensus() ClientsConsensusStates {
+ if m != nil {
+ return m.ClientsConsensus
+ }
+ return nil
+}
+
+func (m *GenesisState) GetClientsMetadata() []IdentifiedGenesisMetadata {
+ if m != nil {
+ return m.ClientsMetadata
+ }
+ return nil
+}
+
+func (m *GenesisState) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
+func (m *GenesisState) GetCreateLocalhost() bool {
+ if m != nil {
+ return m.CreateLocalhost
+ }
+ return false
+}
+
+func (m *GenesisState) GetNextClientSequence() uint64 {
+ if m != nil {
+ return m.NextClientSequence
+ }
+ return 0
+}
+
+// GenesisMetadata defines the genesis type for metadata that clients may return
+// with ExportMetadata
+type GenesisMetadata struct {
+ // store key of metadata without clientID-prefix
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // metadata value
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *GenesisMetadata) Reset() { *m = GenesisMetadata{} }
+func (m *GenesisMetadata) String() string { return proto.CompactTextString(m) }
+func (*GenesisMetadata) ProtoMessage() {}
+func (*GenesisMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a1110e97fc5e4abf, []int{1}
+}
+func (m *GenesisMetadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisMetadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisMetadata.Merge(m, src)
+}
+func (m *GenesisMetadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisMetadata proto.InternalMessageInfo
+
+// IdentifiedGenesisMetadata has the client metadata with the corresponding
+// client id.
+type IdentifiedGenesisMetadata struct {
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ ClientMetadata []GenesisMetadata `protobuf:"bytes,2,rep,name=client_metadata,json=clientMetadata,proto3" json:"client_metadata" yaml:"client_metadata"`
+}
+
+func (m *IdentifiedGenesisMetadata) Reset() { *m = IdentifiedGenesisMetadata{} }
+func (m *IdentifiedGenesisMetadata) String() string { return proto.CompactTextString(m) }
+func (*IdentifiedGenesisMetadata) ProtoMessage() {}
+func (*IdentifiedGenesisMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a1110e97fc5e4abf, []int{2}
+}
+func (m *IdentifiedGenesisMetadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IdentifiedGenesisMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_IdentifiedGenesisMetadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *IdentifiedGenesisMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IdentifiedGenesisMetadata.Merge(m, src)
+}
+func (m *IdentifiedGenesisMetadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *IdentifiedGenesisMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_IdentifiedGenesisMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IdentifiedGenesisMetadata proto.InternalMessageInfo
+
+func (m *IdentifiedGenesisMetadata) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *IdentifiedGenesisMetadata) GetClientMetadata() []GenesisMetadata {
+ if m != nil {
+ return m.ClientMetadata
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "ibcgo.core.client.v1.GenesisState")
+ proto.RegisterType((*GenesisMetadata)(nil), "ibcgo.core.client.v1.GenesisMetadata")
+ proto.RegisterType((*IdentifiedGenesisMetadata)(nil), "ibcgo.core.client.v1.IdentifiedGenesisMetadata")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/client/v1/genesis.proto", fileDescriptor_a1110e97fc5e4abf)
+}
+
+var fileDescriptor_a1110e97fc5e4abf = []byte{
+ // 531 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x40,
+ 0x14, 0xcc, 0xb6, 0x69, 0x68, 0xb7, 0x15, 0x0d, 0xab, 0xa8, 0x98, 0x16, 0xd9, 0xc1, 0x12, 0x92,
+ 0x25, 0x14, 0x9b, 0x84, 0x5b, 0x2e, 0x48, 0xae, 0x44, 0x55, 0x09, 0x24, 0x30, 0x37, 0x2e, 0xd6,
+ 0x66, 0xbd, 0xb8, 0x16, 0xb6, 0x37, 0x64, 0x37, 0x11, 0x11, 0x3f, 0xc0, 0x91, 0x03, 0x1f, 0xc0,
+ 0x99, 0x8f, 0xe0, 0xdc, 0x63, 0x8f, 0x9c, 0x42, 0x95, 0xfc, 0x41, 0xbe, 0x00, 0x79, 0x77, 0x4d,
+ 0x5b, 0x63, 0xe0, 0xf6, 0x32, 0x9e, 0x99, 0x37, 0x9a, 0x97, 0x85, 0x76, 0x32, 0x22, 0x31, 0xf3,
+ 0x08, 0x9b, 0x50, 0x8f, 0xa4, 0x09, 0xcd, 0x85, 0x37, 0xeb, 0x7b, 0x31, 0xcd, 0x29, 0x4f, 0xb8,
+ 0x3b, 0x9e, 0x30, 0xc1, 0x50, 0x47, 0x72, 0xdc, 0x82, 0xe3, 0x2a, 0x8e, 0x3b, 0xeb, 0x1f, 0x3e,
+ 0xa8, 0x55, 0xea, 0xef, 0x52, 0x78, 0xd8, 0x89, 0x59, 0xcc, 0xe4, 0xe8, 0x15, 0x93, 0x42, 0xed,
+ 0xcb, 0x26, 0xdc, 0x3b, 0x51, 0x0b, 0x5e, 0x0b, 0x2c, 0x28, 0xa2, 0xf0, 0x96, 0x92, 0x71, 0x03,
+ 0x74, 0x37, 0x9d, 0xdd, 0xc1, 0x23, 0xb7, 0x6e, 0xa3, 0x7b, 0x1a, 0xd1, 0x5c, 0x24, 0x6f, 0x13,
+ 0x1a, 0x1d, 0x4b, 0x4c, 0xaa, 0x7d, 0xf3, 0x7c, 0x61, 0x35, 0xbe, 0xfd, 0xb4, 0x0e, 0x6a, 0x3f,
+ 0xf3, 0xa0, 0xf4, 0x46, 0x5f, 0x00, 0xbc, 0xa3, 0xe7, 0x90, 0xb0, 0x9c, 0xd3, 0x9c, 0x4f, 0xb9,
+ 0xb1, 0xf1, 0xaf, 0x8d, 0xca, 0xe8, 0xb8, 0x24, 0x2b, 0x47, 0x7f, 0x58, 0x6c, 0x5c, 0x2f, 0x2c,
+ 0x63, 0x8e, 0xb3, 0x74, 0x68, 0xff, 0xe1, 0x69, 0x17, 0x69, 0x94, 0x94, 0x57, 0xb4, 0x41, 0x9b,
+ 0x54, 0x70, 0xf4, 0x11, 0x96, 0x58, 0x98, 0x51, 0x81, 0x23, 0x2c, 0xb0, 0xb1, 0x29, 0x43, 0x79,
+ 0xff, 0xab, 0x41, 0xb7, 0xf8, 0x42, 0xcb, 0x7c, 0x4b, 0x07, 0xbb, 0x7b, 0x33, 0x58, 0x69, 0x6b,
+ 0x07, 0xfb, 0x1a, 0x2a, 0x15, 0x68, 0x08, 0x5b, 0x63, 0x3c, 0xc1, 0x19, 0x37, 0x9a, 0x5d, 0xe0,
+ 0xec, 0x0e, 0xee, 0xd7, 0xaf, 0x7c, 0x29, 0x39, 0x7e, 0xb3, 0xf0, 0x0f, 0xb4, 0x02, 0x3d, 0x83,
+ 0x6d, 0x32, 0xa1, 0x58, 0xd0, 0x30, 0x65, 0x04, 0xa7, 0x67, 0x8c, 0x0b, 0x63, 0xab, 0x0b, 0x9c,
+ 0x6d, 0xff, 0xe8, 0x5a, 0x86, 0x0a, 0xa3, 0xc8, 0x20, 0xa1, 0xe7, 0x25, 0x82, 0x5e, 0xc1, 0x4e,
+ 0x4e, 0x3f, 0x88, 0x50, 0xad, 0x0b, 0x39, 0x7d, 0x3f, 0xa5, 0x39, 0xa1, 0x46, 0xab, 0x0b, 0x9c,
+ 0xa6, 0x6f, 0xad, 0x17, 0xd6, 0x91, 0xf2, 0xaa, 0x63, 0xd9, 0x01, 0x2a, 0x60, 0x7d, 0xf1, 0x12,
+ 0x7c, 0x0a, 0xf7, 0x2b, 0xdd, 0xa0, 0x36, 0xdc, 0x7c, 0x47, 0xe7, 0x06, 0xe8, 0x02, 0x67, 0x2f,
+ 0x28, 0x46, 0xd4, 0x81, 0x5b, 0x33, 0x9c, 0x4e, 0xa9, 0xb1, 0x21, 0x31, 0xf5, 0x63, 0xd8, 0xfc,
+ 0xf4, 0xd5, 0x6a, 0xd8, 0xdf, 0x01, 0xbc, 0xf7, 0xd7, 0x9e, 0x51, 0x1f, 0xee, 0xe8, 0x18, 0x49,
+ 0x24, 0x1d, 0x77, 0xfc, 0xce, 0x7a, 0x61, 0xb5, 0xaf, 0xd7, 0x1e, 0x26, 0x91, 0x1d, 0x6c, 0xab,
+ 0xf9, 0x34, 0x42, 0x39, 0xd4, 0xdd, 0x5f, 0x1d, 0x59, 0xfd, 0xf3, 0x1e, 0xd6, 0x37, 0x5e, 0x3d,
+ 0xad, 0xa9, 0x4f, 0x7b, 0x70, 0x63, 0xc7, 0xd5, 0x65, 0x6f, 0x2b, 0xe4, 0x37, 0xff, 0xe4, 0x7c,
+ 0x69, 0x82, 0x8b, 0xa5, 0x09, 0x2e, 0x97, 0x26, 0xf8, 0xbc, 0x32, 0x1b, 0x17, 0x2b, 0xb3, 0xf1,
+ 0x63, 0x65, 0x36, 0xde, 0xf4, 0xe2, 0x44, 0x9c, 0x4d, 0x47, 0x2e, 0x61, 0x99, 0x47, 0x18, 0xcf,
+ 0x18, 0xf7, 0x92, 0x11, 0xe9, 0x95, 0x4f, 0xf9, 0xf1, 0xa0, 0xa7, 0x5f, 0xb3, 0x98, 0x8f, 0x29,
+ 0x1f, 0xb5, 0xe4, 0xa3, 0x7d, 0xf2, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x8f, 0x24, 0x27, 0x29,
+ 0x04, 0x00, 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NextClientSequence != 0 {
+ i = encodeVarintGenesis(dAtA, i, uint64(m.NextClientSequence))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.CreateLocalhost {
+ i--
+ if m.CreateLocalhost {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.ClientsMetadata) > 0 {
+ for iNdEx := len(m.ClientsMetadata) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ClientsMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.ClientsConsensus) > 0 {
+ for iNdEx := len(m.ClientsConsensus) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ClientsConsensus[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Clients) > 0 {
+ for iNdEx := len(m.Clients) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Clients[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GenesisMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisMetadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IdentifiedGenesisMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IdentifiedGenesisMetadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IdentifiedGenesisMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ClientMetadata) > 0 {
+ for iNdEx := len(m.ClientMetadata) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ClientMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Clients) > 0 {
+ for _, e := range m.Clients {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.ClientsConsensus) > 0 {
+ for _, e := range m.ClientsConsensus {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.ClientsMetadata) > 0 {
+ for _, e := range m.ClientsMetadata {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ l = m.Params.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ if m.CreateLocalhost {
+ n += 2
+ }
+ if m.NextClientSequence != 0 {
+ n += 1 + sovGenesis(uint64(m.NextClientSequence))
+ }
+ return n
+}
+
+func (m *GenesisMetadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ return n
+}
+
+func (m *IdentifiedGenesisMetadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ if len(m.ClientMetadata) > 0 {
+ for _, e := range m.ClientMetadata {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Clients", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Clients = append(m.Clients, IdentifiedClientState{})
+ if err := m.Clients[len(m.Clients)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientsConsensus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientsConsensus = append(m.ClientsConsensus, ClientConsensusStates{})
+ if err := m.ClientsConsensus[len(m.ClientsConsensus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientsMetadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientsMetadata = append(m.ClientsMetadata, IdentifiedGenesisMetadata{})
+ if err := m.ClientsMetadata[len(m.ClientsMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateLocalhost", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CreateLocalhost = bool(v != 0)
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextClientSequence", wireType)
+ }
+ m.NextClientSequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextClientSequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GenesisMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IdentifiedGenesisMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IdentifiedGenesisMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IdentifiedGenesisMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientMetadata = append(m.ClientMetadata, GenesisMetadata{})
+ if err := m.ClientMetadata[len(m.ClientMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/02-client/types/genesis_test.go b/core/02-client/types/genesis_test.go
new file mode 100644
index 00000000..d57b8d1b
--- /dev/null
+++ b/core/02-client/types/genesis_test.go
@@ -0,0 +1,549 @@
+package types_test
+
+import (
+ "time"
+
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+const (
+ chainID = "chainID"
+ tmClientID0 = "07-tendermint-0"
+ tmClientID1 = "07-tendermint-1"
+ invalidClientID = "myclient-0"
+ clientID = tmClientID0
+
+ height = 10
+)
+
+var clientHeight = types.NewHeight(0, 10)
+
+func (suite *TypesTestSuite) TestMarshalGenesisState() {
+ cdc := suite.chainA.App.AppCodec()
+ clientA, _, _, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ genesis := client.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.IBCKeeper.ClientKeeper)
+
+ bz, err := cdc.MarshalJSON(&genesis)
+ suite.Require().NoError(err)
+ suite.Require().NotNil(bz)
+
+ var gs types.GenesisState
+ err = cdc.UnmarshalJSON(bz, &gs)
+ suite.Require().NoError(err)
+}
+
+func (suite *TypesTestSuite) TestValidateGenesis() {
+ privVal := ibctestingmock.NewPV()
+ pubKey, err := privVal.GetPubKey()
+ suite.Require().NoError(err)
+
+ now := time.Now().UTC()
+
+ val := tmtypes.NewValidator(pubKey, 10)
+ valSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{val})
+
+ heightMinus1 := types.NewHeight(0, height-1)
+ header := suite.chainA.CreateTMClientHeader(chainID, int64(clientHeight.RevisionHeight), heightMinus1, now, valSet, valSet, []tmtypes.PrivValidator{privVal})
+
+ testCases := []struct {
+ name string
+ genState types.GenesisState
+ expPass bool
+ }{
+ {
+ name: "default",
+ genState: types.DefaultGenesisState(),
+ expPass: true,
+ },
+ {
+ name: "valid custom genesis",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ []types.IdentifiedGenesisMetadata{
+ types.NewIdentifiedGenesisMetadata(
+ clientID,
+ []types.GenesisMetadata{
+ types.NewGenesisMetadata([]byte("key1"), []byte("val1")),
+ types.NewGenesisMetadata([]byte("key2"), []byte("val2")),
+ },
+ ),
+ },
+ types.NewParams(exported.Tendermint, exported.Localhost),
+ false,
+ 2,
+ ),
+ expPass: true,
+ },
+ {
+ name: "invalid clientid",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ invalidClientID, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ invalidClientID,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid client",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(exported.Localhost, localhosttypes.NewClientState("chaindID", types.ZeroHeight())),
+ },
+ nil,
+ nil,
+ types.NewParams(exported.Tendermint),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "consensus state client id does not match client id in genesis clients",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID1,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ types.NewHeight(0, 1),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid consensus state height",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ types.ZeroHeight(),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid consensus state",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ types.NewHeight(0, 1),
+ ibctmtypes.NewConsensusState(
+ time.Time{}, commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "client in genesis clients is disallowed by params",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Solomachine),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "metadata client-id does not match a genesis client",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ clientID, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ clientID,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ []types.IdentifiedGenesisMetadata{
+ types.NewIdentifiedGenesisMetadata(
+ "wrongclientid",
+ []types.GenesisMetadata{
+ types.NewGenesisMetadata([]byte("key1"), []byte("val1")),
+ types.NewGenesisMetadata([]byte("key2"), []byte("val2")),
+ },
+ ),
+ },
+ types.NewParams(exported.Tendermint, exported.Localhost),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid metadata",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ clientID, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ clientID,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ []types.IdentifiedGenesisMetadata{
+ types.NewIdentifiedGenesisMetadata(
+ clientID,
+ []types.GenesisMetadata{
+ types.NewGenesisMetadata([]byte(""), []byte("val1")),
+ types.NewGenesisMetadata([]byte("key2"), []byte("val2")),
+ },
+ ),
+ },
+ types.NewParams(exported.Tendermint),
+ false,
+ 0,
+ ),
+ },
+ {
+ name: "invalid params",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(" "),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid param",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(" "),
+ true,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "localhost client not registered on allowlist",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID1, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost+"-0", localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID1,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint),
+ true,
+ 2,
+ ),
+ expPass: false,
+ },
+ {
+ name: "next sequence too small",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint, exported.Localhost),
+ false,
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "failed to parse client identifier in client state loop",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ "my-client", ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ types.NewIdentifiedClientState(
+ exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ tmClientID0,
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint, exported.Localhost),
+ false,
+ 5,
+ ),
+ expPass: false,
+ },
+ {
+ name: "consensus state different than client state type",
+ genState: types.NewGenesisState(
+ []types.IdentifiedClientState{
+ types.NewIdentifiedClientState(
+ exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight),
+ ),
+ },
+ []types.ClientConsensusStates{
+ types.NewClientConsensusStates(
+ exported.Localhost+"-1",
+ []types.ConsensusStateWithHeight{
+ types.NewConsensusStateWithHeight(
+ header.GetHeight().(types.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ nil,
+ types.NewParams(exported.Tendermint, exported.Localhost),
+ false,
+ 5,
+ ),
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ err := tc.genState.Validate()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
diff --git a/core/02-client/types/height.go b/core/02-client/types/height.go
new file mode 100644
index 00000000..4216d54e
--- /dev/null
+++ b/core/02-client/types/height.go
@@ -0,0 +1,188 @@
+package types
+
+import (
+ "fmt"
+ "math/big"
+ "regexp"
+ "strconv"
+ "strings"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.Height = (*Height)(nil)
+
+// IsRevisionFormat checks if a chainID is in the format required for parsing revisions
+// The chainID must be in the form: `{chainID}-{revision}
+// 24-host may enforce stricter checks on chainID
+var IsRevisionFormat = regexp.MustCompile(`^.*[^-]-{1}[1-9][0-9]*$`).MatchString
+
+// ZeroHeight is a helper function which returns an uninitialized height.
+func ZeroHeight() Height {
+ return Height{}
+}
+
+// NewHeight is a constructor for the IBC height type
+func NewHeight(revisionNumber, revisionHeight uint64) Height {
+ return Height{
+ RevisionNumber: revisionNumber,
+ RevisionHeight: revisionHeight,
+ }
+}
+
+// GetRevisionNumber returns the revision-number of the height
+func (h Height) GetRevisionNumber() uint64 {
+ return h.RevisionNumber
+}
+
+// GetRevisionHeight returns the revision-height of the height
+func (h Height) GetRevisionHeight() uint64 {
+ return h.RevisionHeight
+}
+
+// Compare implements a method to compare two heights. When comparing two heights a, b
+// we can call a.Compare(b) which will return
+// -1 if a < b
+// 0 if a = b
+// 1 if a > b
+//
+// It first compares based on revision numbers, whichever has the higher revision number is the higher height
+// If revision number is the same, then the revision height is compared
+func (h Height) Compare(other exported.Height) int64 {
+ height, ok := other.(Height)
+ if !ok {
+ panic(fmt.Sprintf("cannot compare against invalid height type: %T. expected height type: %T", other, h))
+ }
+ var a, b big.Int
+ if h.RevisionNumber != height.RevisionNumber {
+ a.SetUint64(h.RevisionNumber)
+ b.SetUint64(height.RevisionNumber)
+ } else {
+ a.SetUint64(h.RevisionHeight)
+ b.SetUint64(height.RevisionHeight)
+ }
+ return int64(a.Cmp(&b))
+}
+
+// LT Helper comparison function returns true if h < other
+func (h Height) LT(other exported.Height) bool {
+ return h.Compare(other) == -1
+}
+
+// LTE Helper comparison function returns true if h <= other
+func (h Height) LTE(other exported.Height) bool {
+ cmp := h.Compare(other)
+ return cmp <= 0
+}
+
+// GT Helper comparison function returns true if h > other
+func (h Height) GT(other exported.Height) bool {
+ return h.Compare(other) == 1
+}
+
+// GTE Helper comparison function returns true if h >= other
+func (h Height) GTE(other exported.Height) bool {
+ cmp := h.Compare(other)
+ return cmp >= 0
+}
+
+// EQ Helper comparison function returns true if h == other
+func (h Height) EQ(other exported.Height) bool {
+ return h.Compare(other) == 0
+}
+
+// String returns a string representation of Height
+func (h Height) String() string {
+ return fmt.Sprintf("%d-%d", h.RevisionNumber, h.RevisionHeight)
+}
+
+// Decrement will return a new height with the RevisionHeight decremented
+// If the RevisionHeight is already at lowest value (1), then false success flag is returend
+func (h Height) Decrement() (decremented exported.Height, success bool) {
+ if h.RevisionHeight == 0 {
+ return Height{}, false
+ }
+ return NewHeight(h.RevisionNumber, h.RevisionHeight-1), true
+}
+
+// Increment will return a height with the same revision number but an
+// incremented revision height
+func (h Height) Increment() exported.Height {
+ return NewHeight(h.RevisionNumber, h.RevisionHeight+1)
+}
+
+// IsZero returns true if height revision and revision-height are both 0
+func (h Height) IsZero() bool {
+ return h.RevisionNumber == 0 && h.RevisionHeight == 0
+}
+
+// MustParseHeight will attempt to parse a string representation of a height and panic if
+// parsing fails.
+func MustParseHeight(heightStr string) Height {
+ height, err := ParseHeight(heightStr)
+ if err != nil {
+ panic(err)
+ }
+
+ return height
+}
+
+// ParseHeight is a utility function that takes a string representation of the height
+// and returns a Height struct
+func ParseHeight(heightStr string) (Height, error) {
+ splitStr := strings.Split(heightStr, "-")
+ if len(splitStr) != 2 {
+ return Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "expected height string format: {revision}-{height}. Got: %s", heightStr)
+ }
+ revisionNumber, err := strconv.ParseUint(splitStr[0], 10, 64)
+ if err != nil {
+ return Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "invalid revision number. parse err: %s", err)
+ }
+ revisionHeight, err := strconv.ParseUint(splitStr[1], 10, 64)
+ if err != nil {
+ return Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "invalid revision height. parse err: %s", err)
+ }
+ return NewHeight(revisionNumber, revisionHeight), nil
+}
+
+// SetRevisionNumber takes a chainID in valid revision format and swaps the revision number
+// in the chainID with the given revision number.
+func SetRevisionNumber(chainID string, revision uint64) (string, error) {
+ if !IsRevisionFormat(chainID) {
+ return "", sdkerrors.Wrapf(
+ sdkerrors.ErrInvalidChainID, "chainID is not in revision format: %s", chainID,
+ )
+ }
+
+ splitStr := strings.Split(chainID, "-")
+ // swap out revision number with given revision
+ splitStr[len(splitStr)-1] = strconv.Itoa(int(revision))
+ return strings.Join(splitStr, "-"), nil
+}
+
+// ParseChainID is a utility function that returns an revision number from the given ChainID.
+// ParseChainID attempts to parse a chain id in the format: `{chainID}-{revision}`
+// and return the revisionnumber as a uint64.
+// If the chainID is not in the expected format, a default revision value of 0 is returned.
+func ParseChainID(chainID string) uint64 {
+ if !IsRevisionFormat(chainID) {
+ // chainID is not in revision format, return 0 as default
+ return 0
+ }
+ splitStr := strings.Split(chainID, "-")
+ revision, err := strconv.ParseUint(splitStr[len(splitStr)-1], 10, 64)
+ // sanity check: error should always be nil since regex only allows numbers in last element
+ if err != nil {
+ panic(fmt.Sprintf("regex allowed non-number value as last split element for chainID: %s", chainID))
+ }
+ return revision
+}
+
+// GetSelfHeight is a utility function that returns self height given context
+// Revision number is retrieved from ctx.ChainID()
+func GetSelfHeight(ctx sdk.Context) Height {
+ revision := ParseChainID(ctx.ChainID())
+ return NewHeight(revision, uint64(ctx.BlockHeight()))
+}
diff --git a/core/02-client/types/height_test.go b/core/02-client/types/height_test.go
new file mode 100644
index 00000000..a455b7f5
--- /dev/null
+++ b/core/02-client/types/height_test.go
@@ -0,0 +1,155 @@
+package types_test
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+func TestZeroHeight(t *testing.T) {
+ require.Equal(t, types.Height{}, types.ZeroHeight())
+}
+
+func TestCompareHeights(t *testing.T) {
+ testCases := []struct {
+ name string
+ height1 types.Height
+ height2 types.Height
+ compareSign int64
+ }{
+ {"revision number 1 is lesser", types.NewHeight(1, 3), types.NewHeight(3, 4), -1},
+ {"revision number 1 is greater", types.NewHeight(7, 5), types.NewHeight(4, 5), 1},
+ {"revision height 1 is lesser", types.NewHeight(3, 4), types.NewHeight(3, 9), -1},
+ {"revision height 1 is greater", types.NewHeight(3, 8), types.NewHeight(3, 3), 1},
+ {"revision number is MaxUint64", types.NewHeight(math.MaxUint64, 1), types.NewHeight(0, 1), 1},
+ {"revision height is MaxUint64", types.NewHeight(1, math.MaxUint64), types.NewHeight(1, 0), 1},
+ {"height is equal", types.NewHeight(4, 4), types.NewHeight(4, 4), 0},
+ }
+
+ for i, tc := range testCases {
+ compare := tc.height1.Compare(tc.height2)
+
+ switch tc.compareSign {
+ case -1:
+ require.True(t, compare == -1, "case %d: %s should return negative value on comparison, got: %d",
+ i, tc.name, compare)
+ case 0:
+ require.True(t, compare == 0, "case %d: %s should return zero on comparison, got: %d",
+ i, tc.name, compare)
+ case 1:
+ require.True(t, compare == 1, "case %d: %s should return positive value on comparison, got: %d",
+ i, tc.name, compare)
+ }
+ }
+}
+
+func TestDecrement(t *testing.T) {
+ validDecrement := types.NewHeight(3, 3)
+ expected := types.NewHeight(3, 2)
+
+ actual, success := validDecrement.Decrement()
+ require.Equal(t, expected, actual, "decrementing %s did not return expected height: %s. got %s",
+ validDecrement, expected, actual)
+ require.True(t, success, "decrement failed unexpectedly")
+
+ invalidDecrement := types.NewHeight(3, 0)
+ actual, success = invalidDecrement.Decrement()
+
+ require.Equal(t, types.ZeroHeight(), actual, "invalid decrement returned non-zero height: %s", actual)
+ require.False(t, success, "invalid decrement passed")
+}
+
+func TestString(t *testing.T) {
+ _, err := types.ParseHeight("height")
+ require.Error(t, err, "invalid height string passed")
+
+ _, err = types.ParseHeight("revision-10")
+ require.Error(t, err, "invalid revision string passed")
+
+ _, err = types.ParseHeight("3-height")
+ require.Error(t, err, "invalid revision-height string passed")
+
+ height := types.NewHeight(3, 4)
+ recovered, err := types.ParseHeight(height.String())
+
+ require.NoError(t, err, "valid height string could not be parsed")
+ require.Equal(t, height, recovered, "recovered height not equal to original height")
+
+ parse, err := types.ParseHeight("3-10")
+ require.NoError(t, err, "parse err")
+ require.Equal(t, types.NewHeight(3, 10), parse, "parse height returns wrong height")
+}
+
+func (suite *TypesTestSuite) TestMustParseHeight() {
+ suite.Require().Panics(func() {
+ types.MustParseHeight("height")
+ })
+
+ suite.Require().NotPanics(func() {
+ types.MustParseHeight("111-1")
+ })
+
+ suite.Require().NotPanics(func() {
+ types.MustParseHeight("0-0")
+ })
+}
+
+func TestParseChainID(t *testing.T) {
+ cases := []struct {
+ chainID string
+ revision uint64
+ formatted bool
+ }{
+ {"gaiamainnet-3", 3, true},
+ {"a-1", 1, true},
+ {"gaia-mainnet-40", 40, true},
+ {"gaiamainnet-3-39", 39, true},
+ {"gaiamainnet--", 0, false},
+ {"gaiamainnet-03", 0, false},
+ {"gaiamainnet--4", 0, false},
+ {"gaiamainnet-3.4", 0, false},
+ {"gaiamainnet", 0, false},
+ {"a--1", 0, false},
+ {"-1", 0, false},
+ {"--1", 0, false},
+ }
+
+ for i, tc := range cases {
+ require.Equal(t, tc.formatted, types.IsRevisionFormat(tc.chainID), "id %s does not match expected format", tc.chainID)
+
+ revision := types.ParseChainID(tc.chainID)
+ require.Equal(t, tc.revision, revision, "case %d returns incorrect revision", i)
+ }
+
+}
+
+func TestSetRevisionNumber(t *testing.T) {
+ // Test SetRevisionNumber
+ chainID, err := types.SetRevisionNumber("gaiamainnet", 3)
+ require.Error(t, err, "invalid revision format passed SetRevisionNumber")
+ require.Equal(t, "", chainID, "invalid revision format returned non-empty string on SetRevisionNumber")
+ chainID = "gaiamainnet-3"
+
+ chainID, err = types.SetRevisionNumber(chainID, 4)
+ require.NoError(t, err, "valid revision format failed SetRevisionNumber")
+ require.Equal(t, "gaiamainnet-4", chainID, "valid revision format returned incorrect string on SetRevisionNumber")
+}
+
+func (suite *TypesTestSuite) TestSelfHeight() {
+ ctx := suite.chainA.GetContext()
+
+ // Test default revision
+ ctx = ctx.WithChainID("gaiamainnet")
+ ctx = ctx.WithBlockHeight(10)
+ height := types.GetSelfHeight(ctx)
+ suite.Require().Equal(types.NewHeight(0, 10), height, "default self height failed")
+
+ // Test successful revision format
+ ctx = ctx.WithChainID("gaiamainnet-3")
+ ctx = ctx.WithBlockHeight(18)
+ height = types.GetSelfHeight(ctx)
+ suite.Require().Equal(types.NewHeight(3, 18), height, "valid self height failed")
+}
diff --git a/core/02-client/types/keys.go b/core/02-client/types/keys.go
new file mode 100644
index 00000000..321f5e3f
--- /dev/null
+++ b/core/02-client/types/keys.go
@@ -0,0 +1,65 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ // SubModuleName defines the IBC client name
+ SubModuleName string = "client"
+
+ // RouterKey is the message route for IBC client
+ RouterKey string = SubModuleName
+
+ // QuerierRoute is the querier route for IBC client
+ QuerierRoute string = SubModuleName
+
+ // KeyNextClientSequence is the key used to store the next client sequence in
+ // the keeper.
+ KeyNextClientSequence = "nextClientSequence"
+)
+
+// FormatClientIdentifier returns the client identifier with the sequence appended.
+// This is a SDK specific format not enforced by IBC protocol.
+func FormatClientIdentifier(clientType string, sequence uint64) string {
+ return fmt.Sprintf("%s-%d", clientType, sequence)
+}
+
+// IsClientIDFormat checks if a clientID is in the format required on the SDK for
+// parsing client identifiers. The client identifier must be in the form: `{client-type}-{N}
+var IsClientIDFormat = regexp.MustCompile(`^.*[^-]-[0-9]{1,20}$`).MatchString
+
+// IsValidClientID checks if the clientID is valid and can be parsed into the client
+// identifier format.
+func IsValidClientID(clientID string) bool {
+ _, _, err := ParseClientIdentifier(clientID)
+ return err == nil
+}
+
+// ParseClientIdentifier parses the client type and sequence from the client identifier.
+func ParseClientIdentifier(clientID string) (string, uint64, error) {
+ if !IsClientIDFormat(clientID) {
+ return "", 0, sdkerrors.Wrapf(host.ErrInvalidID, "invalid client identifier %s is not in format: `{client-type}-{N}`", clientID)
+ }
+
+ splitStr := strings.Split(clientID, "-")
+ lastIndex := len(splitStr) - 1
+
+ clientType := strings.Join(splitStr[:lastIndex], "-")
+ if strings.TrimSpace(clientType) == "" {
+ return "", 0, sdkerrors.Wrap(host.ErrInvalidID, "client identifier must be in format: `{client-type}-{N}` and client type cannot be blank")
+ }
+
+ sequence, err := strconv.ParseUint(splitStr[lastIndex], 10, 64)
+ if err != nil {
+ return "", 0, sdkerrors.Wrap(err, "failed to parse client identifier sequence")
+ }
+
+ return clientType, sequence, nil
+}
diff --git a/core/02-client/types/keys_test.go b/core/02-client/types/keys_test.go
new file mode 100644
index 00000000..49381452
--- /dev/null
+++ b/core/02-client/types/keys_test.go
@@ -0,0 +1,54 @@
+package types_test
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+)
+
+// tests ParseClientIdentifier and IsValidClientID
+func TestParseClientIdentifier(t *testing.T) {
+ testCases := []struct {
+ name string
+ clientID string
+ clientType string
+ expSeq uint64
+ expPass bool
+ }{
+ {"valid 0", "tendermint-0", "tendermint", 0, true},
+ {"valid 1", "tendermint-1", "tendermint", 1, true},
+ {"valid solemachine", "solomachine-v1-1", "solomachine-v1", 1, true},
+ {"valid large sequence", types.FormatClientIdentifier("tendermint", math.MaxUint64), "tendermint", math.MaxUint64, true},
+ {"valid short client type", "t-0", "t", 0, true},
+ // one above uint64 max
+ {"invalid uint64", "tendermint-18446744073709551616", "tendermint", 0, false},
+ // uint64 == 20 characters
+ {"invalid large sequence", "tendermint-2345682193567182931243", "tendermint", 0, false},
+ {"missing dash", "tendermint0", "tendermint", 0, false},
+ {"blank id", " ", " ", 0, false},
+ {"empty id", "", "", 0, false},
+ {"negative sequence", "tendermint--1", "tendermint", 0, false},
+ {"invalid format", "tendermint-tm", "tendermint", 0, false},
+ {"empty clientype", " -100", "tendermint", 0, false},
+ }
+
+ for _, tc := range testCases {
+
+ clientType, seq, err := types.ParseClientIdentifier(tc.clientID)
+ valid := types.IsValidClientID(tc.clientID)
+ require.Equal(t, tc.expSeq, seq, tc.clientID)
+
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ require.True(t, valid)
+ require.Equal(t, tc.clientType, clientType)
+ } else {
+ require.Error(t, err, tc.name, tc.clientID)
+ require.False(t, valid)
+ require.Equal(t, "", clientType)
+ }
+ }
+}
diff --git a/core/02-client/types/msgs.go b/core/02-client/types/msgs.go
new file mode 100644
index 00000000..1e884123
--- /dev/null
+++ b/core/02-client/types/msgs.go
@@ -0,0 +1,343 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// message types for the IBC client
+const (
+ TypeMsgCreateClient string = "create_client"
+ TypeMsgUpdateClient string = "update_client"
+ TypeMsgUpgradeClient string = "upgrade_client"
+ TypeMsgSubmitMisbehaviour string = "submit_misbehaviour"
+)
+
+var (
+ _ sdk.Msg = &MsgCreateClient{}
+ _ sdk.Msg = &MsgUpdateClient{}
+ _ sdk.Msg = &MsgSubmitMisbehaviour{}
+ _ sdk.Msg = &MsgUpgradeClient{}
+
+ _ codectypes.UnpackInterfacesMessage = MsgCreateClient{}
+ _ codectypes.UnpackInterfacesMessage = MsgUpdateClient{}
+ _ codectypes.UnpackInterfacesMessage = MsgSubmitMisbehaviour{}
+ _ codectypes.UnpackInterfacesMessage = MsgUpgradeClient{}
+)
+
+// NewMsgCreateClient creates a new MsgCreateClient instance
+//nolint:interfacer
+func NewMsgCreateClient(
+ clientState exported.ClientState, consensusState exported.ConsensusState, signer sdk.AccAddress,
+) (*MsgCreateClient, error) {
+
+ anyClientState, err := PackClientState(clientState)
+ if err != nil {
+ return nil, err
+ }
+
+ anyConsensusState, err := PackConsensusState(consensusState)
+ if err != nil {
+ return nil, err
+ }
+
+ return &MsgCreateClient{
+ ClientState: anyClientState,
+ ConsensusState: anyConsensusState,
+ Signer: signer.String(),
+ }, nil
+}
+
+// Route implements sdk.Msg
+func (msg MsgCreateClient) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgCreateClient) Type() string {
+ return TypeMsgCreateClient
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgCreateClient) ValidateBasic() error {
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ clientState, err := UnpackClientState(msg.ClientState)
+ if err != nil {
+ return err
+ }
+ if err := clientState.Validate(); err != nil {
+ return err
+ }
+ if clientState.ClientType() == exported.Localhost {
+ return sdkerrors.Wrap(ErrInvalidClient, "localhost client can only be created on chain initialization")
+ }
+ consensusState, err := UnpackConsensusState(msg.ConsensusState)
+ if err != nil {
+ return err
+ }
+ if clientState.ClientType() != consensusState.ClientType() {
+ return sdkerrors.Wrap(ErrInvalidClientType, "client type for client state and consensus state do not match")
+ }
+ if err := ValidateClientType(clientState.ClientType()); err != nil {
+ return sdkerrors.Wrap(err, "client type does not meet naming constraints")
+ }
+ return consensusState.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgCreateClient) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgCreateClient) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (msg MsgCreateClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ var clientState exported.ClientState
+ err := unpacker.UnpackAny(msg.ClientState, &clientState)
+ if err != nil {
+ return err
+ }
+
+ var consensusState exported.ConsensusState
+ return unpacker.UnpackAny(msg.ConsensusState, &consensusState)
+}
+
+// NewMsgUpdateClient creates a new MsgUpdateClient instance
+//nolint:interfacer
+func NewMsgUpdateClient(id string, header exported.Header, signer sdk.AccAddress) (*MsgUpdateClient, error) {
+ anyHeader, err := PackHeader(header)
+ if err != nil {
+ return nil, err
+ }
+
+ return &MsgUpdateClient{
+ ClientId: id,
+ Header: anyHeader,
+ Signer: signer.String(),
+ }, nil
+}
+
+// Route implements sdk.Msg
+func (msg MsgUpdateClient) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgUpdateClient) Type() string {
+ return TypeMsgUpdateClient
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgUpdateClient) ValidateBasic() error {
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ header, err := UnpackHeader(msg.Header)
+ if err != nil {
+ return err
+ }
+ if err := header.ValidateBasic(); err != nil {
+ return err
+ }
+ if msg.ClientId == exported.Localhost {
+ return sdkerrors.Wrap(ErrInvalidClient, "localhost client is only updated on ABCI BeginBlock")
+ }
+ return host.ClientIdentifierValidator(msg.ClientId)
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgUpdateClient) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgUpdateClient) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (msg MsgUpdateClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ var header exported.Header
+ return unpacker.UnpackAny(msg.Header, &header)
+}
+
+// NewMsgUpgradeClient creates a new MsgUpgradeClient instance
+// nolint: interfacer
+func NewMsgUpgradeClient(clientID string, clientState exported.ClientState, consState exported.ConsensusState,
+ proofUpgradeClient, proofUpgradeConsState []byte, signer sdk.AccAddress) (*MsgUpgradeClient, error) {
+ anyClient, err := PackClientState(clientState)
+ if err != nil {
+ return nil, err
+ }
+ anyConsState, err := PackConsensusState(consState)
+ if err != nil {
+ return nil, err
+ }
+
+ return &MsgUpgradeClient{
+ ClientId: clientID,
+ ClientState: anyClient,
+ ConsensusState: anyConsState,
+ ProofUpgradeClient: proofUpgradeClient,
+ ProofUpgradeConsensusState: proofUpgradeConsState,
+ Signer: signer.String(),
+ }, nil
+}
+
+// Route implements sdk.Msg
+func (msg MsgUpgradeClient) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgUpgradeClient) Type() string {
+ return TypeMsgUpgradeClient
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgUpgradeClient) ValidateBasic() error {
+ // will not validate client state as committed client may not form a valid client state.
+ // client implementations are responsible for ensuring final upgraded client is valid.
+ clientState, err := UnpackClientState(msg.ClientState)
+ if err != nil {
+ return err
+ }
+ // will not validate consensus state here since the trusted kernel may not form a valid consenus state.
+ // client implementations are responsible for ensuring client can submit new headers against this consensus state.
+ consensusState, err := UnpackConsensusState(msg.ConsensusState)
+ if err != nil {
+ return err
+ }
+
+ if clientState.ClientType() != consensusState.ClientType() {
+ return sdkerrors.Wrapf(ErrInvalidUpgradeClient, "consensus state's client-type does not match client. expected: %s, got: %s",
+ clientState.ClientType(), consensusState.ClientType())
+ }
+ if len(msg.ProofUpgradeClient) == 0 {
+ return sdkerrors.Wrap(ErrInvalidUpgradeClient, "proof of upgrade client cannot be empty")
+ }
+ if len(msg.ProofUpgradeConsensusState) == 0 {
+ return sdkerrors.Wrap(ErrInvalidUpgradeClient, "proof of upgrade consensus state cannot be empty")
+ }
+ _, err = sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return host.ClientIdentifierValidator(msg.ClientId)
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgUpgradeClient) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgUpgradeClient) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (msg MsgUpgradeClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ var (
+ clientState exported.ClientState
+ consState exported.ConsensusState
+ )
+ if err := unpacker.UnpackAny(msg.ClientState, &clientState); err != nil {
+ return err
+ }
+ return unpacker.UnpackAny(msg.ConsensusState, &consState)
+}
+
+// NewMsgSubmitMisbehaviour creates a new MsgSubmitMisbehaviour instance.
+//nolint:interfacer
+func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.Misbehaviour, signer sdk.AccAddress) (*MsgSubmitMisbehaviour, error) {
+ anyMisbehaviour, err := PackMisbehaviour(misbehaviour)
+ if err != nil {
+ return nil, err
+ }
+
+ return &MsgSubmitMisbehaviour{
+ ClientId: clientID,
+ Misbehaviour: anyMisbehaviour,
+ Signer: signer.String(),
+ }, nil
+}
+
+// Route returns the MsgSubmitClientMisbehaviour's route.
+func (msg MsgSubmitMisbehaviour) Route() string { return host.RouterKey }
+
+// Type returns the MsgSubmitMisbehaviour's type.
+func (msg MsgSubmitMisbehaviour) Type() string {
+ return TypeMsgSubmitMisbehaviour
+}
+
+// ValidateBasic performs basic (non-state-dependant) validation on a MsgSubmitMisbehaviour.
+func (msg MsgSubmitMisbehaviour) ValidateBasic() error {
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ misbehaviour, err := UnpackMisbehaviour(msg.Misbehaviour)
+ if err != nil {
+ return err
+ }
+ if err := misbehaviour.ValidateBasic(); err != nil {
+ return err
+ }
+ if misbehaviour.GetClientID() != msg.ClientId {
+ return sdkerrors.Wrapf(
+ ErrInvalidMisbehaviour,
+ "misbehaviour client-id doesn't match client-id from message (%s ≠ %s)",
+ misbehaviour.GetClientID(), msg.ClientId,
+ )
+ }
+
+ return host.ClientIdentifierValidator(msg.ClientId)
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgSubmitMisbehaviour) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners returns the single expected signer for a MsgSubmitMisbehaviour.
+func (msg MsgSubmitMisbehaviour) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (msg MsgSubmitMisbehaviour) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ var misbehaviour exported.Misbehaviour
+ return unpacker.UnpackAny(msg.Misbehaviour, &misbehaviour)
+}
diff --git a/core/02-client/types/msgs_test.go b/core/02-client/types/msgs_test.go
new file mode 100644
index 00000000..e42725ba
--- /dev/null
+++ b/core/02-client/types/msgs_test.go
@@ -0,0 +1,619 @@
+package types_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type TypesTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+func (suite *TypesTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+}
+
+func TestTypesTestSuite(t *testing.T) {
+ suite.Run(t, new(TypesTestSuite))
+}
+
+// tests that different clients within MsgCreateClient can be marshaled
+// and unmarshaled.
+func (suite *TypesTestSuite) TestMarshalMsgCreateClient() {
+ var (
+ msg *types.MsgCreateClient
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ }{
+ {
+ "solo machine client", func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ },
+ {
+ "tendermint client", func() {
+ tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ tc.malleate()
+
+ cdc := suite.chainA.App.AppCodec()
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(msg)
+ suite.Require().NoError(err)
+
+ // unmarshal message
+ newMsg := &types.MsgCreateClient{}
+ err = cdc.UnmarshalJSON(bz, newMsg)
+ suite.Require().NoError(err)
+
+ suite.Require().True(proto.Equal(msg, newMsg))
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
+ var (
+ msg = &types.MsgCreateClient{}
+ err error
+ )
+
+ cases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "valid - tendermint client",
+ func() {
+ tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ {
+ "invalid tendermint client",
+ func() {
+ msg, err = types.NewMsgCreateClient(&ibctmtypes.ClientState{}, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "failed to unpack client",
+ func() {
+ msg.ClientState = nil
+ },
+ false,
+ },
+ {
+ "failed to unpack consensus state",
+ func() {
+ tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ msg.ConsensusState = nil
+ },
+ false,
+ },
+ {
+ "invalid signer",
+ func() {
+ msg.Signer = ""
+ },
+ false,
+ },
+ {
+ "valid - solomachine client",
+ func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ {
+ "invalid solomachine client",
+ func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgCreateClient(&solomachinetypes.ClientState{}, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "invalid solomachine consensus state",
+ func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), &solomachinetypes.ConsensusState{}, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "invalid - client state and consensus state client types do not match",
+ func() {
+ tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgCreateClient(tendermintClient, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc.malleate()
+ err = msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+// tests that different header within MsgUpdateClient can be marshaled
+// and unmarshaled.
+func (suite *TypesTestSuite) TestMarshalMsgUpdateClient() {
+ var (
+ msg *types.MsgUpdateClient
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ }{
+ {
+ "solo machine client", func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ },
+ {
+ "tendermint client", func() {
+ msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ tc.malleate()
+
+ cdc := suite.chainA.App.AppCodec()
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(msg)
+ suite.Require().NoError(err)
+
+ // unmarshal message
+ newMsg := &types.MsgUpdateClient{}
+ err = cdc.UnmarshalJSON(bz, newMsg)
+ suite.Require().NoError(err)
+
+ suite.Require().True(proto.Equal(msg, newMsg))
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() {
+ var (
+ msg = &types.MsgUpdateClient{}
+ err error
+ )
+
+ cases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "invalid client-id",
+ func() {
+ msg.ClientId = ""
+ },
+ false,
+ },
+ {
+ "valid - tendermint header",
+ func() {
+ msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ {
+ "invalid tendermint header",
+ func() {
+ msg, err = types.NewMsgUpdateClient("tendermint", &ibctmtypes.Header{}, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "failed to unpack header",
+ func() {
+ msg.Header = nil
+ },
+ false,
+ },
+ {
+ "invalid signer",
+ func() {
+ msg.Signer = ""
+ },
+ false,
+ },
+ {
+ "valid - solomachine header",
+ func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ {
+ "invalid solomachine header",
+ func() {
+ msg, err = types.NewMsgUpdateClient("solomachine", &solomachinetypes.Header{}, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "unsupported - localhost",
+ func() {
+ msg, err = types.NewMsgUpdateClient(exported.Localhost, suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc.malleate()
+ err = msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *TypesTestSuite) TestMarshalMsgUpgradeClient() {
+ var (
+ msg *types.MsgUpgradeClient
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ }{
+ {
+ "client upgrades to new tendermint client",
+ func() {
+ tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ tendermintConsState := &ibctmtypes.ConsensusState{NextValidatorsHash: []byte("nextValsHash")}
+ msg, err = types.NewMsgUpgradeClient("clientid", tendermintClient, tendermintConsState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ },
+ {
+ "client upgrades to new solomachine client",
+ func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1)
+ msg, err = types.NewMsgUpgradeClient("clientid", soloMachine.ClientState(), soloMachine.ConsensusState(), []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ tc.malleate()
+
+ cdc := suite.chainA.App.AppCodec()
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(msg)
+ suite.Require().NoError(err)
+
+ // unmarshal message
+ newMsg := &types.MsgUpgradeClient{}
+ err = cdc.UnmarshalJSON(bz, newMsg)
+ suite.Require().NoError(err)
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() {
+ cases := []struct {
+ name string
+ malleate func(*types.MsgUpgradeClient)
+ expPass bool
+ }{
+ {
+ name: "success",
+ malleate: func(msg *types.MsgUpgradeClient) {},
+ expPass: true,
+ },
+ {
+ name: "client id empty",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.ClientId = ""
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid client id",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.ClientId = "invalid~chain/id"
+ },
+ expPass: false,
+ },
+ {
+ name: "unpacking clientstate fails",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.ClientState = nil
+ },
+ expPass: false,
+ },
+ {
+ name: "unpacking consensus state fails",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.ConsensusState = nil
+ },
+ expPass: false,
+ },
+ {
+ name: "client and consensus type does not match",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ soloConsensus, err := types.PackConsensusState(soloMachine.ConsensusState())
+ suite.Require().NoError(err)
+ msg.ConsensusState = soloConsensus
+ },
+ expPass: false,
+ },
+ {
+ name: "empty client proof",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.ProofUpgradeClient = nil
+ },
+ expPass: false,
+ },
+ {
+ name: "empty consensus state proof",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.ProofUpgradeConsensusState = nil
+ },
+ expPass: false,
+ },
+ {
+ name: "empty signer",
+ malleate: func(msg *types.MsgUpgradeClient) {
+ msg.Signer = " "
+ },
+ expPass: false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ clientState := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ consState := &ibctmtypes.ConsensusState{NextValidatorsHash: []byte("nextValsHash")}
+ msg, err := types.NewMsgUpgradeClient("testclientid", clientState, consState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+
+ tc.malleate(msg)
+ err = msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, "valid case %s failed", tc.name)
+ } else {
+ suite.Require().Error(err, "invalid case %s passed", tc.name)
+ }
+ }
+}
+
+// tests that different misbehaviours within MsgSubmitMisbehaviour can be marshaled
+// and unmarshaled.
+func (suite *TypesTestSuite) TestMarshalMsgSubmitMisbehaviour() {
+ var (
+ msg *types.MsgSubmitMisbehaviour
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ }{
+ {
+ "solo machine client", func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ },
+ {
+ "tendermint client", func() {
+ height := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height))
+ heightMinus1 := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height)-1)
+ header1 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
+ header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
+
+ misbehaviour := ibctmtypes.NewMisbehaviour("tendermint", header1, header2)
+ msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ tc.malleate()
+
+ cdc := suite.chainA.App.AppCodec()
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(msg)
+ suite.Require().NoError(err)
+
+ // unmarshal message
+ newMsg := &types.MsgSubmitMisbehaviour{}
+ err = cdc.UnmarshalJSON(bz, newMsg)
+ suite.Require().NoError(err)
+
+ suite.Require().True(proto.Equal(msg, newMsg))
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() {
+ var (
+ msg = &types.MsgSubmitMisbehaviour{}
+ err error
+ )
+
+ cases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "invalid client-id",
+ func() {
+ msg.ClientId = ""
+ },
+ false,
+ },
+ {
+ "valid - tendermint misbehaviour",
+ func() {
+ height := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height))
+ heightMinus1 := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height)-1)
+ header1 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
+ header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
+
+ misbehaviour := ibctmtypes.NewMisbehaviour("tendermint", header1, header2)
+ msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ {
+ "invalid tendermint misbehaviour",
+ func() {
+ msg, err = types.NewMsgSubmitMisbehaviour("tendermint", &ibctmtypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "failed to unpack misbehaviourt",
+ func() {
+ msg.Misbehaviour = nil
+ },
+ false,
+ },
+ {
+ "invalid signer",
+ func() {
+ msg.Signer = ""
+ },
+ false,
+ },
+ {
+ "valid - solomachine misbehaviour",
+ func() {
+ soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
+ msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ {
+ "invalid solomachine misbehaviour",
+ func() {
+ msg, err = types.NewMsgSubmitMisbehaviour("solomachine", &solomachinetypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ {
+ "client-id mismatch",
+ func() {
+ soloMachineMisbehaviour := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateMisbehaviour()
+ msg, err = types.NewMsgSubmitMisbehaviour("external", soloMachineMisbehaviour, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc.malleate()
+ err = msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
diff --git a/core/02-client/types/params.go b/core/02-client/types/params.go
new file mode 100644
index 00000000..6477e3f6
--- /dev/null
+++ b/core/02-client/types/params.go
@@ -0,0 +1,71 @@
+package types
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+)
+
+var (
+ // DefaultAllowedClients are "06-solomachine" and "07-tendermint"
+ DefaultAllowedClients = []string{exported.Solomachine, exported.Tendermint}
+
+ // KeyAllowedClients is store's key for AllowedClients Params
+ KeyAllowedClients = []byte("AllowedClients")
+)
+
+// ParamKeyTable type declaration for parameters
+func ParamKeyTable() paramtypes.KeyTable {
+ return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
+}
+
+// NewParams creates a new parameter configuration for the ibc transfer module
+func NewParams(allowedClients ...string) Params {
+ return Params{
+ AllowedClients: allowedClients,
+ }
+}
+
+// DefaultParams is the default parameter configuration for the ibc-transfer module
+func DefaultParams() Params {
+ return NewParams(DefaultAllowedClients...)
+}
+
+// Validate all ibc-transfer module parameters
+func (p Params) Validate() error {
+ return validateClients(p.AllowedClients)
+}
+
+// ParamSetPairs implements params.ParamSet
+func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs {
+ return paramtypes.ParamSetPairs{
+ paramtypes.NewParamSetPair(KeyAllowedClients, p.AllowedClients, validateClients),
+ }
+}
+
+// IsAllowedClient checks if the given client type is registered on the allowlist.
+func (p Params) IsAllowedClient(clientType string) bool {
+ for _, allowedClient := range p.AllowedClients {
+ if allowedClient == clientType {
+ return true
+ }
+ }
+ return false
+}
+
+func validateClients(i interface{}) error {
+ clients, ok := i.([]string)
+ if !ok {
+ return fmt.Errorf("invalid parameter type: %T", i)
+ }
+
+ for i, clientType := range clients {
+ if strings.TrimSpace(clientType) == "" {
+ return fmt.Errorf("client type %d cannot be blank", i)
+ }
+ }
+
+ return nil
+}
diff --git a/core/02-client/types/params_test.go b/core/02-client/types/params_test.go
new file mode 100644
index 00000000..dac80a4b
--- /dev/null
+++ b/core/02-client/types/params_test.go
@@ -0,0 +1,30 @@
+package types
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+func TestValidateParams(t *testing.T) {
+ testCases := []struct {
+ name string
+ params Params
+ expPass bool
+ }{
+ {"default params", DefaultParams(), true},
+ {"custom params", NewParams(exported.Tendermint), true},
+ {"blank client", NewParams(" "), false},
+ }
+
+ for _, tc := range testCases {
+ err := tc.params.Validate()
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/core/02-client/types/proposal.go b/core/02-client/types/proposal.go
new file mode 100644
index 00000000..95b10aaf
--- /dev/null
+++ b/core/02-client/types/proposal.go
@@ -0,0 +1,64 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+)
+
+const (
+ // ProposalTypeClientUpdate defines the type for a ClientUpdateProposal
+ ProposalTypeClientUpdate = "ClientUpdate"
+)
+
+var _ govtypes.Content = &ClientUpdateProposal{}
+
+func init() {
+ govtypes.RegisterProposalType(ProposalTypeClientUpdate)
+}
+
+// NewClientUpdateProposal creates a new client update proposal.
+func NewClientUpdateProposal(title, description, subjectClientID, substituteClientID string, initialHeight Height) *ClientUpdateProposal {
+ return &ClientUpdateProposal{
+ Title: title,
+ Description: description,
+ SubjectClientId: subjectClientID,
+ SubstituteClientId: substituteClientID,
+ InitialHeight: initialHeight,
+ }
+}
+
+// GetTitle returns the title of a client update proposal.
+func (cup *ClientUpdateProposal) GetTitle() string { return cup.Title }
+
+// GetDescription returns the description of a client update proposal.
+func (cup *ClientUpdateProposal) GetDescription() string { return cup.Description }
+
+// ProposalRoute returns the routing key of a client update proposal.
+func (cup *ClientUpdateProposal) ProposalRoute() string { return RouterKey }
+
+// ProposalType returns the type of a client update proposal.
+func (cup *ClientUpdateProposal) ProposalType() string { return ProposalTypeClientUpdate }
+
+// ValidateBasic runs basic stateless validity checks
+func (cup *ClientUpdateProposal) ValidateBasic() error {
+ err := govtypes.ValidateAbstract(cup)
+ if err != nil {
+ return err
+ }
+
+ if cup.SubjectClientId == cup.SubstituteClientId {
+ return sdkerrors.Wrap(ErrInvalidSubstitute, "subject and substitute client identifiers are equal")
+ }
+ if _, _, err := ParseClientIdentifier(cup.SubjectClientId); err != nil {
+ return err
+ }
+ if _, _, err := ParseClientIdentifier(cup.SubstituteClientId); err != nil {
+ return err
+ }
+
+ if cup.InitialHeight.IsZero() {
+ return sdkerrors.Wrap(ErrInvalidHeight, "initial height cannot be zero height")
+ }
+
+ return nil
+}
diff --git a/core/02-client/types/proposal_test.go b/core/02-client/types/proposal_test.go
new file mode 100644
index 00000000..597e5cf8
--- /dev/null
+++ b/core/02-client/types/proposal_test.go
@@ -0,0 +1,86 @@
+package types_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *TypesTestSuite) TestValidateBasic() {
+ subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectClientState := suite.chainA.GetClientState(subject)
+ substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ initialHeight := types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
+
+ testCases := []struct {
+ name string
+ proposal govtypes.Content
+ expPass bool
+ }{
+ {
+ "success",
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight),
+ true,
+ },
+ {
+ "fails validate abstract - empty title",
+ types.NewClientUpdateProposal("", ibctesting.Description, subject, substitute, initialHeight),
+ false,
+ },
+ {
+ "subject and substitute use the same identifier",
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, subject, initialHeight),
+ false,
+ },
+ {
+ "invalid subject clientID",
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight),
+ false,
+ },
+ {
+ "invalid substitute clientID",
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight),
+ false,
+ },
+ {
+ "initial height is zero",
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, types.ZeroHeight()),
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+
+ err := tc.proposal.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+// tests a client update proposal can be marshaled and unmarshaled
+func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() {
+ // create proposal
+ proposal := types.NewClientUpdateProposal("update IBC client", "description", "subject", "substitute", types.NewHeight(1, 0))
+
+ // create codec
+ ir := codectypes.NewInterfaceRegistry()
+ types.RegisterInterfaces(ir)
+ govtypes.RegisterInterfaces(ir)
+ cdc := codec.NewProtoCodec(ir)
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(proposal)
+ suite.Require().NoError(err)
+
+ // unmarshal proposal
+ newProposal := &types.ClientUpdateProposal{}
+ err = cdc.UnmarshalJSON(bz, newProposal)
+ suite.Require().NoError(err)
+}
diff --git a/core/02-client/types/query.go b/core/02-client/types/query.go
new file mode 100644
index 00000000..c46bbfcf
--- /dev/null
+++ b/core/02-client/types/query.go
@@ -0,0 +1,65 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ codectypes.UnpackInterfacesMessage = QueryClientStateResponse{}
+ _ codectypes.UnpackInterfacesMessage = QueryClientStatesResponse{}
+ _ codectypes.UnpackInterfacesMessage = QueryConsensusStateResponse{}
+ _ codectypes.UnpackInterfacesMessage = QueryConsensusStatesResponse{}
+)
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qcsr QueryClientStatesResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ for _, cs := range qcsr.ClientStates {
+ if err := cs.UnpackInterfaces(unpacker); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewQueryClientStateResponse creates a new QueryClientStateResponse instance.
+func NewQueryClientStateResponse(
+ clientStateAny *codectypes.Any, proof []byte, height Height,
+) *QueryClientStateResponse {
+ return &QueryClientStateResponse{
+ ClientState: clientStateAny,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qcsr QueryClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(qcsr.ClientState, new(exported.ClientState))
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qcsr QueryConsensusStatesResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ for _, cs := range qcsr.ConsensusStates {
+ if err := cs.UnpackInterfaces(unpacker); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NewQueryConsensusStateResponse creates a new QueryConsensusStateResponse instance.
+func NewQueryConsensusStateResponse(
+ consensusStateAny *codectypes.Any, proof []byte, height Height,
+) *QueryConsensusStateResponse {
+ return &QueryConsensusStateResponse{
+ ConsensusState: consensusStateAny,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qcsr QueryConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(qcsr.ConsensusState, new(exported.ConsensusState))
+}
diff --git a/core/02-client/types/query.pb.go b/core/02-client/types/query.pb.go
new file mode 100644
index 00000000..bf74f2eb
--- /dev/null
+++ b/core/02-client/types/query.pb.go
@@ -0,0 +1,2685 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/client/v1/query.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/codec/types"
+ query "github.com/cosmos/cosmos-sdk/types/query"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// QueryClientStateRequest is the request type for the Query/ClientState RPC
+// method
+type QueryClientStateRequest struct {
+ // client state unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+}
+
+func (m *QueryClientStateRequest) Reset() { *m = QueryClientStateRequest{} }
+func (m *QueryClientStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryClientStateRequest) ProtoMessage() {}
+func (*QueryClientStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{0}
+}
+func (m *QueryClientStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientStateRequest.Merge(m, src)
+}
+func (m *QueryClientStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientStateRequest proto.InternalMessageInfo
+
+func (m *QueryClientStateRequest) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+// QueryClientStateResponse is the response type for the Query/ClientState RPC
+// method. Besides the client state, it includes a proof and the height from
+// which the proof was retrieved.
+type QueryClientStateResponse struct {
+ // client state associated with the request identifier
+ ClientState *types.Any `protobuf:"bytes,1,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryClientStateResponse) Reset() { *m = QueryClientStateResponse{} }
+func (m *QueryClientStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryClientStateResponse) ProtoMessage() {}
+func (*QueryClientStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{1}
+}
+func (m *QueryClientStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientStateResponse.Merge(m, src)
+}
+func (m *QueryClientStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientStateResponse proto.InternalMessageInfo
+
+func (m *QueryClientStateResponse) GetClientState() *types.Any {
+ if m != nil {
+ return m.ClientState
+ }
+ return nil
+}
+
+func (m *QueryClientStateResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryClientStateResponse) GetProofHeight() Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return Height{}
+}
+
+// QueryClientStatesRequest is the request type for the Query/ClientStates RPC
+// method
+type QueryClientStatesRequest struct {
+ // pagination request
+ Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryClientStatesRequest) Reset() { *m = QueryClientStatesRequest{} }
+func (m *QueryClientStatesRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryClientStatesRequest) ProtoMessage() {}
+func (*QueryClientStatesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{2}
+}
+func (m *QueryClientStatesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientStatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientStatesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientStatesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientStatesRequest.Merge(m, src)
+}
+func (m *QueryClientStatesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientStatesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientStatesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientStatesRequest proto.InternalMessageInfo
+
+func (m *QueryClientStatesRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryClientStatesResponse is the response type for the Query/ClientStates RPC
+// method.
+type QueryClientStatesResponse struct {
+ // list of stored ClientStates of the chain.
+ ClientStates IdentifiedClientStates `protobuf:"bytes,1,rep,name=client_states,json=clientStates,proto3,castrepeated=IdentifiedClientStates" json:"client_states"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryClientStatesResponse) Reset() { *m = QueryClientStatesResponse{} }
+func (m *QueryClientStatesResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryClientStatesResponse) ProtoMessage() {}
+func (*QueryClientStatesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{3}
+}
+func (m *QueryClientStatesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientStatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientStatesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientStatesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientStatesResponse.Merge(m, src)
+}
+func (m *QueryClientStatesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientStatesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientStatesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientStatesResponse proto.InternalMessageInfo
+
+func (m *QueryClientStatesResponse) GetClientStates() IdentifiedClientStates {
+ if m != nil {
+ return m.ClientStates
+ }
+ return nil
+}
+
+func (m *QueryClientStatesResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryConsensusStateRequest is the request type for the Query/ConsensusState
+// RPC method. Besides the consensus state, it includes a proof and the height
+// from which the proof was retrieved.
+type QueryConsensusStateRequest struct {
+ // client identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+ // consensus state revision number
+ RevisionNumber uint64 `protobuf:"varint,2,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"`
+ // consensus state revision height
+ RevisionHeight uint64 `protobuf:"varint,3,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty"`
+ // latest_height overrrides the height field and queries the latest stored
+ // ConsensusState
+ LatestHeight bool `protobuf:"varint,4,opt,name=latest_height,json=latestHeight,proto3" json:"latest_height,omitempty"`
+}
+
+func (m *QueryConsensusStateRequest) Reset() { *m = QueryConsensusStateRequest{} }
+func (m *QueryConsensusStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConsensusStateRequest) ProtoMessage() {}
+func (*QueryConsensusStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{4}
+}
+func (m *QueryConsensusStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConsensusStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConsensusStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConsensusStateRequest.Merge(m, src)
+}
+func (m *QueryConsensusStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConsensusStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConsensusStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConsensusStateRequest proto.InternalMessageInfo
+
+func (m *QueryConsensusStateRequest) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *QueryConsensusStateRequest) GetRevisionNumber() uint64 {
+ if m != nil {
+ return m.RevisionNumber
+ }
+ return 0
+}
+
+func (m *QueryConsensusStateRequest) GetRevisionHeight() uint64 {
+ if m != nil {
+ return m.RevisionHeight
+ }
+ return 0
+}
+
+func (m *QueryConsensusStateRequest) GetLatestHeight() bool {
+ if m != nil {
+ return m.LatestHeight
+ }
+ return false
+}
+
+// QueryConsensusStateResponse is the response type for the Query/ConsensusState
+// RPC method
+type QueryConsensusStateResponse struct {
+ // consensus state associated with the client identifier at the given height
+ ConsensusState *types.Any `protobuf:"bytes,1,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryConsensusStateResponse) Reset() { *m = QueryConsensusStateResponse{} }
+func (m *QueryConsensusStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConsensusStateResponse) ProtoMessage() {}
+func (*QueryConsensusStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{5}
+}
+func (m *QueryConsensusStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConsensusStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConsensusStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConsensusStateResponse.Merge(m, src)
+}
+func (m *QueryConsensusStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConsensusStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConsensusStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConsensusStateResponse proto.InternalMessageInfo
+
+func (m *QueryConsensusStateResponse) GetConsensusState() *types.Any {
+ if m != nil {
+ return m.ConsensusState
+ }
+ return nil
+}
+
+func (m *QueryConsensusStateResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryConsensusStateResponse) GetProofHeight() Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return Height{}
+}
+
+// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
+// RPC method.
+type QueryConsensusStatesRequest struct {
+ // client identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+ // pagination request
+ Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryConsensusStatesRequest) Reset() { *m = QueryConsensusStatesRequest{} }
+func (m *QueryConsensusStatesRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConsensusStatesRequest) ProtoMessage() {}
+func (*QueryConsensusStatesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{6}
+}
+func (m *QueryConsensusStatesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConsensusStatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConsensusStatesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConsensusStatesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConsensusStatesRequest.Merge(m, src)
+}
+func (m *QueryConsensusStatesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConsensusStatesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConsensusStatesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConsensusStatesRequest proto.InternalMessageInfo
+
+func (m *QueryConsensusStatesRequest) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *QueryConsensusStatesRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryConsensusStatesResponse is the response type for the
+// Query/ConsensusStates RPC method
+type QueryConsensusStatesResponse struct {
+ // consensus states associated with the identifier
+ ConsensusStates []ConsensusStateWithHeight `protobuf:"bytes,1,rep,name=consensus_states,json=consensusStates,proto3" json:"consensus_states"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryConsensusStatesResponse) Reset() { *m = QueryConsensusStatesResponse{} }
+func (m *QueryConsensusStatesResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConsensusStatesResponse) ProtoMessage() {}
+func (*QueryConsensusStatesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{7}
+}
+func (m *QueryConsensusStatesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConsensusStatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConsensusStatesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConsensusStatesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConsensusStatesResponse.Merge(m, src)
+}
+func (m *QueryConsensusStatesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConsensusStatesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConsensusStatesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConsensusStatesResponse proto.InternalMessageInfo
+
+func (m *QueryConsensusStatesResponse) GetConsensusStates() []ConsensusStateWithHeight {
+ if m != nil {
+ return m.ConsensusStates
+ }
+ return nil
+}
+
+func (m *QueryConsensusStatesResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryClientParamsRequest is the request type for the Query/ClientParams RPC
+// method.
+type QueryClientParamsRequest struct {
+}
+
+func (m *QueryClientParamsRequest) Reset() { *m = QueryClientParamsRequest{} }
+func (m *QueryClientParamsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryClientParamsRequest) ProtoMessage() {}
+func (*QueryClientParamsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{8}
+}
+func (m *QueryClientParamsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientParamsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientParamsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientParamsRequest.Merge(m, src)
+}
+func (m *QueryClientParamsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientParamsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientParamsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientParamsRequest proto.InternalMessageInfo
+
+// QueryClientParamsResponse is the response type for the Query/ClientParams RPC
+// method.
+type QueryClientParamsResponse struct {
+ // params defines the parameters of the module.
+ Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"`
+}
+
+func (m *QueryClientParamsResponse) Reset() { *m = QueryClientParamsResponse{} }
+func (m *QueryClientParamsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryClientParamsResponse) ProtoMessage() {}
+func (*QueryClientParamsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{9}
+}
+func (m *QueryClientParamsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientParamsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientParamsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientParamsResponse.Merge(m, src)
+}
+func (m *QueryClientParamsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientParamsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientParamsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientParamsResponse proto.InternalMessageInfo
+
+func (m *QueryClientParamsResponse) GetParams() *Params {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*QueryClientStateRequest)(nil), "ibcgo.core.client.v1.QueryClientStateRequest")
+ proto.RegisterType((*QueryClientStateResponse)(nil), "ibcgo.core.client.v1.QueryClientStateResponse")
+ proto.RegisterType((*QueryClientStatesRequest)(nil), "ibcgo.core.client.v1.QueryClientStatesRequest")
+ proto.RegisterType((*QueryClientStatesResponse)(nil), "ibcgo.core.client.v1.QueryClientStatesResponse")
+ proto.RegisterType((*QueryConsensusStateRequest)(nil), "ibcgo.core.client.v1.QueryConsensusStateRequest")
+ proto.RegisterType((*QueryConsensusStateResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStateResponse")
+ proto.RegisterType((*QueryConsensusStatesRequest)(nil), "ibcgo.core.client.v1.QueryConsensusStatesRequest")
+ proto.RegisterType((*QueryConsensusStatesResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStatesResponse")
+ proto.RegisterType((*QueryClientParamsRequest)(nil), "ibcgo.core.client.v1.QueryClientParamsRequest")
+ proto.RegisterType((*QueryClientParamsResponse)(nil), "ibcgo.core.client.v1.QueryClientParamsResponse")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/client/v1/query.proto", fileDescriptor_833c7bc6da1addd1) }
+
+var fileDescriptor_833c7bc6da1addd1 = []byte{
+ // 817 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x4f, 0x1b, 0x47,
+ 0x1c, 0xf7, 0xf0, 0x12, 0x8c, 0x0d, 0xae, 0x46, 0x6e, 0x31, 0x0b, 0x35, 0xc6, 0x48, 0xc5, 0x6d,
+ 0xe5, 0x19, 0xec, 0x3e, 0x2f, 0x3d, 0x94, 0xaa, 0x50, 0x2e, 0x15, 0x6c, 0x0f, 0x95, 0x7a, 0xb1,
+ 0x76, 0xd7, 0xe3, 0xf5, 0x4a, 0xf6, 0xce, 0xe2, 0x59, 0x5b, 0x42, 0x88, 0x0b, 0x1f, 0xa0, 0x8a,
+ 0x94, 0x5b, 0xae, 0xb9, 0xe5, 0x84, 0x72, 0xcb, 0x37, 0xe0, 0x88, 0x14, 0x29, 0xca, 0x29, 0x89,
+ 0x70, 0x3e, 0x43, 0xce, 0xd1, 0xce, 0xcc, 0x9a, 0x5d, 0xb3, 0x84, 0x25, 0x4a, 0x6e, 0xe3, 0xff,
+ 0xf3, 0xf7, 0xfb, 0xbf, 0xbc, 0xb0, 0xec, 0x98, 0x96, 0xcd, 0x88, 0xc5, 0xfa, 0x94, 0x58, 0x5d,
+ 0x87, 0xba, 0x3e, 0x19, 0xd6, 0xc9, 0xd1, 0x80, 0xf6, 0x8f, 0xb1, 0xd7, 0x67, 0x3e, 0x43, 0x05,
+ 0x61, 0x81, 0x03, 0x0b, 0x2c, 0x2d, 0xf0, 0xb0, 0xae, 0x7d, 0x67, 0x31, 0xde, 0x63, 0x9c, 0x98,
+ 0x06, 0xa7, 0xd2, 0x9c, 0x0c, 0xeb, 0x26, 0xf5, 0x8d, 0x3a, 0xf1, 0x0c, 0xdb, 0x71, 0x0d, 0xdf,
+ 0x61, 0xae, 0x8c, 0xa0, 0x6d, 0x24, 0xe6, 0x50, 0xb1, 0xa4, 0xc9, 0x8a, 0xcd, 0x98, 0xdd, 0xa5,
+ 0x44, 0xfc, 0x32, 0x07, 0x6d, 0x62, 0xb8, 0x2a, 0xbf, 0xb6, 0xa6, 0x54, 0x86, 0xe7, 0x10, 0xc3,
+ 0x75, 0x99, 0x2f, 0x42, 0x73, 0xa5, 0x2d, 0xd8, 0xcc, 0x66, 0xe2, 0x49, 0x82, 0x97, 0x94, 0x56,
+ 0x7e, 0x86, 0xcb, 0x87, 0x01, 0xa6, 0x3f, 0x44, 0x8e, 0x7f, 0x7c, 0xc3, 0xa7, 0x3a, 0x3d, 0x1a,
+ 0x50, 0xee, 0xa3, 0x55, 0xb8, 0x20, 0x33, 0x37, 0x9d, 0x56, 0x11, 0x94, 0x41, 0x75, 0x41, 0x9f,
+ 0x97, 0x82, 0xfd, 0x56, 0xe5, 0x1c, 0xc0, 0xe2, 0x4d, 0x47, 0xee, 0x31, 0x97, 0x53, 0xf4, 0x0b,
+ 0xcc, 0x29, 0x4f, 0x1e, 0xc8, 0x85, 0x73, 0xb6, 0x51, 0xc0, 0x12, 0x1f, 0x0e, 0xa1, 0xe3, 0xdf,
+ 0xdd, 0x63, 0x3d, 0x6b, 0x5d, 0x07, 0x40, 0x05, 0x38, 0xeb, 0xf5, 0x19, 0x6b, 0x17, 0xa7, 0xca,
+ 0xa0, 0x9a, 0xd3, 0xe5, 0x0f, 0xf4, 0x27, 0xcc, 0x89, 0x47, 0xb3, 0x43, 0x1d, 0xbb, 0xe3, 0x17,
+ 0xa7, 0x45, 0xb8, 0x35, 0x9c, 0x54, 0x6e, 0xfc, 0x97, 0xb0, 0xd9, 0x99, 0xb9, 0x78, 0xb5, 0x9e,
+ 0xd1, 0xb3, 0xc2, 0x4f, 0x8a, 0x2a, 0xe6, 0x4d, 0xc4, 0x3c, 0xe4, 0xba, 0x0b, 0xe1, 0x75, 0x33,
+ 0x14, 0xde, 0x6f, 0xb0, 0xec, 0x1c, 0x0e, 0x3a, 0x87, 0x65, 0xa3, 0x55, 0xe7, 0xf0, 0x81, 0x61,
+ 0x87, 0x75, 0xd2, 0x23, 0x9e, 0x95, 0x17, 0x00, 0xae, 0x24, 0x24, 0x51, 0x75, 0xf1, 0xe0, 0x62,
+ 0xb4, 0x2e, 0xbc, 0x08, 0xca, 0xd3, 0xd5, 0x6c, 0xe3, 0xfb, 0x64, 0x26, 0xfb, 0x2d, 0xea, 0xfa,
+ 0x4e, 0xdb, 0xa1, 0xad, 0x48, 0xb0, 0x9d, 0x52, 0x40, 0xec, 0xc9, 0xeb, 0xf5, 0xaf, 0x12, 0xd5,
+ 0x5c, 0xcf, 0x45, 0xea, 0xc9, 0xd1, 0x5e, 0x8c, 0xd7, 0x94, 0xe0, 0xb5, 0x75, 0x27, 0x2f, 0x09,
+ 0x37, 0x46, 0xec, 0x1c, 0x40, 0x4d, 0x12, 0x0b, 0x54, 0x2e, 0x1f, 0xf0, 0xd4, 0xb3, 0x82, 0xb6,
+ 0x60, 0xbe, 0x4f, 0x87, 0x0e, 0x77, 0x98, 0xdb, 0x74, 0x07, 0x3d, 0x93, 0xf6, 0x05, 0x92, 0x19,
+ 0x7d, 0x29, 0x14, 0xff, 0x2d, 0xa4, 0x31, 0xc3, 0x48, 0xaf, 0x23, 0x86, 0xb2, 0x95, 0x68, 0x13,
+ 0x2e, 0x76, 0x03, 0x7e, 0x7e, 0x68, 0x36, 0x53, 0x06, 0xd5, 0x79, 0x3d, 0x27, 0x85, 0xaa, 0xdf,
+ 0xcf, 0x00, 0x5c, 0x4d, 0x84, 0xac, 0xba, 0xf1, 0x1b, 0xcc, 0x5b, 0xa1, 0x26, 0xc5, 0xa0, 0x2e,
+ 0x59, 0xb1, 0x30, 0x9f, 0x77, 0x56, 0xcf, 0x92, 0xb1, 0xf3, 0x54, 0xf5, 0xde, 0x4d, 0x68, 0xfa,
+ 0xc7, 0x0c, 0xf3, 0x05, 0x80, 0x6b, 0xc9, 0x20, 0x54, 0x05, 0x9b, 0xf0, 0x8b, 0x89, 0x0a, 0x86,
+ 0x23, 0x8d, 0x93, 0x09, 0xc7, 0x03, 0xfd, 0xeb, 0xf8, 0x9d, 0x58, 0x09, 0xf2, 0xf1, 0x12, 0x7f,
+ 0xc2, 0xf1, 0xd5, 0x62, 0xbb, 0x7f, 0x60, 0xf4, 0x8d, 0x5e, 0x58, 0xcb, 0xca, 0x61, 0x6c, 0x65,
+ 0x43, 0x9d, 0xa2, 0xf8, 0x23, 0x9c, 0xf3, 0x84, 0x44, 0xcd, 0xc6, 0x2d, 0x9d, 0x54, 0x5e, 0xca,
+ 0xb6, 0xf1, 0x6e, 0x0e, 0xce, 0x8a, 0x98, 0xe8, 0x31, 0x80, 0xd9, 0xc8, 0x7e, 0xa2, 0x5a, 0xb2,
+ 0xff, 0x2d, 0x37, 0x58, 0xc3, 0x69, 0xcd, 0x25, 0xdc, 0xca, 0x4f, 0x67, 0xcf, 0xdf, 0x3e, 0x9c,
+ 0x22, 0xa8, 0x46, 0x1c, 0xd3, 0x4a, 0xfe, 0x1f, 0x51, 0x8d, 0x22, 0x27, 0xe3, 0x01, 0x3a, 0x45,
+ 0x8f, 0x00, 0xcc, 0x45, 0xaf, 0x08, 0x4a, 0x99, 0x37, 0xac, 0xa1, 0x46, 0x52, 0xdb, 0x2b, 0xa0,
+ 0xdf, 0x0a, 0xa0, 0x9b, 0x68, 0xe3, 0x4e, 0xa0, 0x68, 0x04, 0xe0, 0x52, 0x7c, 0x70, 0xd0, 0xf6,
+ 0x87, 0xd2, 0x25, 0x1d, 0x28, 0xad, 0x7e, 0x0f, 0x0f, 0x05, 0xb1, 0x2b, 0x20, 0xb6, 0x51, 0x2b,
+ 0x11, 0xe2, 0xc4, 0xdc, 0x47, 0xcb, 0x49, 0xc2, 0x6b, 0x45, 0x4e, 0x26, 0xee, 0xde, 0x29, 0x91,
+ 0x67, 0x21, 0xa2, 0x90, 0x82, 0x53, 0xf4, 0x14, 0xc0, 0xfc, 0xc4, 0x9e, 0xa1, 0xf4, 0xa0, 0xc7,
+ 0x8d, 0x68, 0xdc, 0xc7, 0x45, 0x11, 0xfd, 0x55, 0x10, 0x6d, 0xa0, 0xed, 0xfb, 0x12, 0x45, 0xff,
+ 0x8f, 0xe7, 0x46, 0x2e, 0x40, 0x8a, 0xb9, 0x89, 0xed, 0x5e, 0x8a, 0xb9, 0x89, 0xef, 0x63, 0xe5,
+ 0x6b, 0x81, 0x75, 0x19, 0x7d, 0x29, 0xb1, 0x8e, 0x61, 0xca, 0xc5, 0xdb, 0xd9, 0xbb, 0xb8, 0x2a,
+ 0x81, 0xcb, 0xab, 0x12, 0x78, 0x73, 0x55, 0x02, 0x0f, 0x46, 0xa5, 0xcc, 0xe5, 0xa8, 0x94, 0x79,
+ 0x39, 0x2a, 0x65, 0xfe, 0xab, 0xd9, 0x8e, 0xdf, 0x19, 0x98, 0xd8, 0x62, 0x3d, 0xa2, 0xbe, 0xc8,
+ 0x1c, 0xd3, 0xaa, 0x85, 0x5f, 0x5b, 0xdb, 0x8d, 0x9a, 0x0a, 0xe6, 0x1f, 0x7b, 0x94, 0x9b, 0x73,
+ 0xe2, 0xf6, 0xff, 0xf0, 0x3e, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x73, 0xec, 0x7c, 0xf6, 0x09, 0x00,
+ 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// QueryClient is the client API for Query service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryClient interface {
+ // ClientState queries an IBC light client.
+ ClientState(ctx context.Context, in *QueryClientStateRequest, opts ...grpc.CallOption) (*QueryClientStateResponse, error)
+ // ClientStates queries all the IBC light clients of a chain.
+ ClientStates(ctx context.Context, in *QueryClientStatesRequest, opts ...grpc.CallOption) (*QueryClientStatesResponse, error)
+ // ConsensusState queries a consensus state associated with a client state at
+ // a given height.
+ ConsensusState(ctx context.Context, in *QueryConsensusStateRequest, opts ...grpc.CallOption) (*QueryConsensusStateResponse, error)
+ // ConsensusStates queries all the consensus state associated with a given
+ // client.
+ ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error)
+ // ClientParams queries all parameters of the ibc client.
+ ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error)
+}
+
+type queryClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewQueryClient(cc grpc1.ClientConn) QueryClient {
+ return &queryClient{cc}
+}
+
+func (c *queryClient) ClientState(ctx context.Context, in *QueryClientStateRequest, opts ...grpc.CallOption) (*QueryClientStateResponse, error) {
+ out := new(QueryClientStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ClientStates(ctx context.Context, in *QueryClientStatesRequest, opts ...grpc.CallOption) (*QueryClientStatesResponse, error) {
+ out := new(QueryClientStatesResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientStates", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ConsensusState(ctx context.Context, in *QueryConsensusStateRequest, opts ...grpc.CallOption) (*QueryConsensusStateResponse, error) {
+ out := new(QueryConsensusStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ConsensusState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error) {
+ out := new(QueryConsensusStatesResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ConsensusStates", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error) {
+ out := new(QueryClientParamsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientParams", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServer is the server API for Query service.
+type QueryServer interface {
+ // ClientState queries an IBC light client.
+ ClientState(context.Context, *QueryClientStateRequest) (*QueryClientStateResponse, error)
+ // ClientStates queries all the IBC light clients of a chain.
+ ClientStates(context.Context, *QueryClientStatesRequest) (*QueryClientStatesResponse, error)
+ // ConsensusState queries a consensus state associated with a client state at
+ // a given height.
+ ConsensusState(context.Context, *QueryConsensusStateRequest) (*QueryConsensusStateResponse, error)
+ // ConsensusStates queries all the consensus state associated with a given
+ // client.
+ ConsensusStates(context.Context, *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error)
+ // ClientParams queries all parameters of the ibc client.
+ ClientParams(context.Context, *QueryClientParamsRequest) (*QueryClientParamsResponse, error)
+}
+
+// UnimplementedQueryServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServer struct {
+}
+
+func (*UnimplementedQueryServer) ClientState(ctx context.Context, req *QueryClientStateRequest) (*QueryClientStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ClientState not implemented")
+}
+func (*UnimplementedQueryServer) ClientStates(ctx context.Context, req *QueryClientStatesRequest) (*QueryClientStatesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ClientStates not implemented")
+}
+func (*UnimplementedQueryServer) ConsensusState(ctx context.Context, req *QueryConsensusStateRequest) (*QueryConsensusStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConsensusState not implemented")
+}
+func (*UnimplementedQueryServer) ConsensusStates(ctx context.Context, req *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConsensusStates not implemented")
+}
+func (*UnimplementedQueryServer) ClientParams(ctx context.Context, req *QueryClientParamsRequest) (*QueryClientParamsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ClientParams not implemented")
+}
+
+func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
+ s.RegisterService(&_Query_serviceDesc, srv)
+}
+
+func _Query_ClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryClientStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ClientState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Query/ClientState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ClientState(ctx, req.(*QueryClientStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ClientStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryClientStatesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ClientStates(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Query/ClientStates",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ClientStates(ctx, req.(*QueryClientStatesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConsensusStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ConsensusState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Query/ConsensusState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ConsensusState(ctx, req.(*QueryConsensusStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ConsensusStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConsensusStatesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ConsensusStates(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Query/ConsensusStates",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ConsensusStates(ctx, req.(*QueryConsensusStatesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ClientParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryClientParamsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ClientParams(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Query/ClientParams",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ClientParams(ctx, req.(*QueryClientParamsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Query_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.core.client.v1.Query",
+ HandlerType: (*QueryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ClientState",
+ Handler: _Query_ClientState_Handler,
+ },
+ {
+ MethodName: "ClientStates",
+ Handler: _Query_ClientStates_Handler,
+ },
+ {
+ MethodName: "ConsensusState",
+ Handler: _Query_ConsensusState_Handler,
+ },
+ {
+ MethodName: "ConsensusStates",
+ Handler: _Query_ConsensusStates_Handler,
+ },
+ {
+ MethodName: "ClientParams",
+ Handler: _Query_ClientParams_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/core/client/v1/query.proto",
+}
+
+func (m *QueryClientStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientStatesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientStatesRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientStatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientStatesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientStatesResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientStatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientStates) > 0 {
+ for iNdEx := len(m.ClientStates) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ClientStates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConsensusStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LatestHeight {
+ i--
+ if m.LatestHeight {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.RevisionHeight != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.RevisionHeight))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.RevisionNumber != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.RevisionNumber))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConsensusStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConsensusStatesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConsensusStatesRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConsensusStatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConsensusStatesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConsensusStatesResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConsensusStatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ConsensusStates) > 0 {
+ for iNdEx := len(m.ConsensusStates) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ConsensusStates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientParamsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientParamsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientParamsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientParamsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Params != nil {
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
+ offset -= sovQuery(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *QueryClientStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryClientStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryClientStatesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryClientStatesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ClientStates) > 0 {
+ for _, e := range m.ClientStates {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryConsensusStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.RevisionNumber != 0 {
+ n += 1 + sovQuery(uint64(m.RevisionNumber))
+ }
+ if m.RevisionHeight != 0 {
+ n += 1 + sovQuery(uint64(m.RevisionHeight))
+ }
+ if m.LatestHeight {
+ n += 2
+ }
+ return n
+}
+
+func (m *QueryConsensusStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryConsensusStatesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryConsensusStatesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ConsensusStates) > 0 {
+ for _, e := range m.ConsensusStates {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryClientParamsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *QueryClientParamsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Params != nil {
+ l = m.Params.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func sovQuery(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozQuery(x uint64) (n int) {
+ return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *QueryClientStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientStatesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientStatesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientStatesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientStatesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientStatesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientStatesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientStates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientStates = append(m.ClientStates, IdentifiedClientState{})
+ if err := m.ClientStates[len(m.ClientStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConsensusStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConsensusStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType)
+ }
+ m.RevisionNumber = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionNumber |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType)
+ }
+ m.RevisionHeight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionHeight |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LatestHeight", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LatestHeight = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConsensusStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConsensusStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConsensusStatesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConsensusStatesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConsensusStatesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConsensusStatesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConsensusStatesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConsensusStatesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusStates", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConsensusStates = append(m.ConsensusStates, ConsensusStateWithHeight{})
+ if err := m.ConsensusStates[len(m.ConsensusStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientParamsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientParamsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientParamsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientParamsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Params == nil {
+ m.Params = &Params{}
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipQuery(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupQuery
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/02-client/types/query.pb.gw.go b/core/02-client/types/query.pb.gw.go
new file mode 100644
index 00000000..ceef5c32
--- /dev/null
+++ b/core/02-client/types/query.pb.gw.go
@@ -0,0 +1,602 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: ibcgo/core/client/v1/query.proto
+
+/*
+Package types is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package types
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+
+func request_Query_ClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ msg, err := client.ClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ msg, err := server.ClientState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_ClientStates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
+)
+
+func request_Query_ClientStates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientStatesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ClientStates_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.ClientStates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ClientStates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientStatesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ClientStates_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.ClientStates(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_ConsensusState_0 = &utilities.DoubleArray{Encoding: map[string]int{"client_id": 0, "revision_number": 1, "revision_height": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}}
+)
+
+func request_Query_ConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ val, ok = pathParams["revision_number"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number")
+ }
+
+ protoReq.RevisionNumber, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err)
+ }
+
+ val, ok = pathParams["revision_height"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height")
+ }
+
+ protoReq.RevisionHeight, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusState_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.ConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ val, ok = pathParams["revision_number"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number")
+ }
+
+ protoReq.RevisionNumber, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err)
+ }
+
+ val, ok = pathParams["revision_height"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height")
+ }
+
+ protoReq.RevisionHeight, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusState_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.ConsensusState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_ConsensusStates_0 = &utilities.DoubleArray{Encoding: map[string]int{"client_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
+)
+
+func request_Query_ConsensusStates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConsensusStatesRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusStates_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.ConsensusStates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ConsensusStates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConsensusStatesRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusStates_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.ConsensusStates(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_ClientParams_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := client.ClientParams(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ClientParams_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientParamsRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := server.ClientParams(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
+// UnaryRPC :call QueryServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
+func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
+
+ mux.Handle("GET", pattern_Query_ClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ClientState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ClientStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ClientStates_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ConsensusState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConsensusStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ConsensusStates_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConsensusStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ClientParams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ClientParams_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientParams_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterQueryHandler(ctx, mux, conn)
+}
+
+// RegisterQueryHandler registers the http handlers for service Query to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
+}
+
+// RegisterQueryHandlerClient registers the http handlers for service Query
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "QueryClient" to call the correct interceptors.
+func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
+
+ mux.Handle("GET", pattern_Query_ClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ClientState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ClientStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ClientStates_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ConsensusState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConsensusStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ConsensusStates_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConsensusStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ClientParams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ClientParams_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientParams_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Query_ClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "client_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ClientStates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "client", "v1", "client_states"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "client", "v1", "consensus_states", "client_id", "revision", "revision_number", "height", "revision_height"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ConsensusStates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "consensus_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ClientParams_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"ibc", "client", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true)))
+)
+
+var (
+ forward_Query_ClientState_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ClientStates_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ConsensusState_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ConsensusStates_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ClientParams_0 = runtime.ForwardResponseMessage
+)
diff --git a/core/02-client/types/tx.pb.go b/core/02-client/types/tx.pb.go
new file mode 100644
index 00000000..1adac387
--- /dev/null
+++ b/core/02-client/types/tx.pb.go
@@ -0,0 +1,2074 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/client/v1/tx.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/codec/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgCreateClient defines a message to create an IBC client
+type MsgCreateClient struct {
+ // light client state
+ ClientState *types.Any `protobuf:"bytes,1,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+ // consensus state associated with the client that corresponds to a given
+ // height.
+ ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
+ // signer address
+ Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgCreateClient) Reset() { *m = MsgCreateClient{} }
+func (m *MsgCreateClient) String() string { return proto.CompactTextString(m) }
+func (*MsgCreateClient) ProtoMessage() {}
+func (*MsgCreateClient) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{0}
+}
+func (m *MsgCreateClient) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgCreateClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgCreateClient.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgCreateClient) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgCreateClient.Merge(m, src)
+}
+func (m *MsgCreateClient) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgCreateClient) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgCreateClient.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgCreateClient proto.InternalMessageInfo
+
+// MsgCreateClientResponse defines the Msg/CreateClient response type.
+type MsgCreateClientResponse struct {
+}
+
+func (m *MsgCreateClientResponse) Reset() { *m = MsgCreateClientResponse{} }
+func (m *MsgCreateClientResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgCreateClientResponse) ProtoMessage() {}
+func (*MsgCreateClientResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{1}
+}
+func (m *MsgCreateClientResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgCreateClientResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgCreateClientResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgCreateClientResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgCreateClientResponse.Merge(m, src)
+}
+func (m *MsgCreateClientResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgCreateClientResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgCreateClientResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgCreateClientResponse proto.InternalMessageInfo
+
+// MsgUpdateClient defines an sdk.Msg to update a IBC client state using
+// the given header.
+type MsgUpdateClient struct {
+ // client unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // header to update the light client
+ Header *types.Any `protobuf:"bytes,2,opt,name=header,proto3" json:"header,omitempty"`
+ // signer address
+ Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgUpdateClient) Reset() { *m = MsgUpdateClient{} }
+func (m *MsgUpdateClient) String() string { return proto.CompactTextString(m) }
+func (*MsgUpdateClient) ProtoMessage() {}
+func (*MsgUpdateClient) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{2}
+}
+func (m *MsgUpdateClient) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgUpdateClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgUpdateClient.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgUpdateClient) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgUpdateClient.Merge(m, src)
+}
+func (m *MsgUpdateClient) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgUpdateClient) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgUpdateClient.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgUpdateClient proto.InternalMessageInfo
+
+// MsgUpdateClientResponse defines the Msg/UpdateClient response type.
+type MsgUpdateClientResponse struct {
+}
+
+func (m *MsgUpdateClientResponse) Reset() { *m = MsgUpdateClientResponse{} }
+func (m *MsgUpdateClientResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgUpdateClientResponse) ProtoMessage() {}
+func (*MsgUpdateClientResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{3}
+}
+func (m *MsgUpdateClientResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgUpdateClientResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgUpdateClientResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgUpdateClientResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgUpdateClientResponse.Merge(m, src)
+}
+func (m *MsgUpdateClientResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgUpdateClientResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgUpdateClientResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgUpdateClientResponse proto.InternalMessageInfo
+
+// MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
+// state
+type MsgUpgradeClient struct {
+ // client unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // upgraded client state
+ ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+ // upgraded consensus state, only contains enough information to serve as a
+ // basis of trust in update logic
+ ConsensusState *types.Any `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
+ // proof that old chain committed to new client
+ ProofUpgradeClient []byte `protobuf:"bytes,4,opt,name=proof_upgrade_client,json=proofUpgradeClient,proto3" json:"proof_upgrade_client,omitempty" yaml:"proof_upgrade_client"`
+ // proof that old chain committed to new consensus state
+ ProofUpgradeConsensusState []byte `protobuf:"bytes,5,opt,name=proof_upgrade_consensus_state,json=proofUpgradeConsensusState,proto3" json:"proof_upgrade_consensus_state,omitempty" yaml:"proof_upgrade_consensus_state"`
+ // signer address
+ Signer string `protobuf:"bytes,6,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgUpgradeClient) Reset() { *m = MsgUpgradeClient{} }
+func (m *MsgUpgradeClient) String() string { return proto.CompactTextString(m) }
+func (*MsgUpgradeClient) ProtoMessage() {}
+func (*MsgUpgradeClient) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{4}
+}
+func (m *MsgUpgradeClient) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgUpgradeClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgUpgradeClient.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgUpgradeClient) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgUpgradeClient.Merge(m, src)
+}
+func (m *MsgUpgradeClient) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgUpgradeClient) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgUpgradeClient.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgUpgradeClient proto.InternalMessageInfo
+
+// MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.
+type MsgUpgradeClientResponse struct {
+}
+
+func (m *MsgUpgradeClientResponse) Reset() { *m = MsgUpgradeClientResponse{} }
+func (m *MsgUpgradeClientResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgUpgradeClientResponse) ProtoMessage() {}
+func (*MsgUpgradeClientResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{5}
+}
+func (m *MsgUpgradeClientResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgUpgradeClientResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgUpgradeClientResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgUpgradeClientResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgUpgradeClientResponse.Merge(m, src)
+}
+func (m *MsgUpgradeClientResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgUpgradeClientResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgUpgradeClientResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgUpgradeClientResponse proto.InternalMessageInfo
+
+// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
+// light client misbehaviour.
+type MsgSubmitMisbehaviour struct {
+ // client unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // misbehaviour used for freezing the light client
+ Misbehaviour *types.Any `protobuf:"bytes,2,opt,name=misbehaviour,proto3" json:"misbehaviour,omitempty"`
+ // signer address
+ Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgSubmitMisbehaviour) Reset() { *m = MsgSubmitMisbehaviour{} }
+func (m *MsgSubmitMisbehaviour) String() string { return proto.CompactTextString(m) }
+func (*MsgSubmitMisbehaviour) ProtoMessage() {}
+func (*MsgSubmitMisbehaviour) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{6}
+}
+func (m *MsgSubmitMisbehaviour) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgSubmitMisbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgSubmitMisbehaviour.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgSubmitMisbehaviour) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgSubmitMisbehaviour.Merge(m, src)
+}
+func (m *MsgSubmitMisbehaviour) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgSubmitMisbehaviour) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgSubmitMisbehaviour.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgSubmitMisbehaviour proto.InternalMessageInfo
+
+// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response
+// type.
+type MsgSubmitMisbehaviourResponse struct {
+}
+
+func (m *MsgSubmitMisbehaviourResponse) Reset() { *m = MsgSubmitMisbehaviourResponse{} }
+func (m *MsgSubmitMisbehaviourResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgSubmitMisbehaviourResponse) ProtoMessage() {}
+func (*MsgSubmitMisbehaviourResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3848774a44f81317, []int{7}
+}
+func (m *MsgSubmitMisbehaviourResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgSubmitMisbehaviourResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgSubmitMisbehaviourResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgSubmitMisbehaviourResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgSubmitMisbehaviourResponse.Merge(m, src)
+}
+func (m *MsgSubmitMisbehaviourResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgSubmitMisbehaviourResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgSubmitMisbehaviourResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgSubmitMisbehaviourResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgCreateClient)(nil), "ibcgo.core.client.v1.MsgCreateClient")
+ proto.RegisterType((*MsgCreateClientResponse)(nil), "ibcgo.core.client.v1.MsgCreateClientResponse")
+ proto.RegisterType((*MsgUpdateClient)(nil), "ibcgo.core.client.v1.MsgUpdateClient")
+ proto.RegisterType((*MsgUpdateClientResponse)(nil), "ibcgo.core.client.v1.MsgUpdateClientResponse")
+ proto.RegisterType((*MsgUpgradeClient)(nil), "ibcgo.core.client.v1.MsgUpgradeClient")
+ proto.RegisterType((*MsgUpgradeClientResponse)(nil), "ibcgo.core.client.v1.MsgUpgradeClientResponse")
+ proto.RegisterType((*MsgSubmitMisbehaviour)(nil), "ibcgo.core.client.v1.MsgSubmitMisbehaviour")
+ proto.RegisterType((*MsgSubmitMisbehaviourResponse)(nil), "ibcgo.core.client.v1.MsgSubmitMisbehaviourResponse")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/client/v1/tx.proto", fileDescriptor_3848774a44f81317) }
+
+var fileDescriptor_3848774a44f81317 = []byte{
+ // 600 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x3d, 0x6f, 0xd3, 0x40,
+ 0x18, 0x8e, 0x1b, 0x88, 0xda, 0x6b, 0xa0, 0x95, 0x09, 0x6d, 0x6a, 0x14, 0x3b, 0x58, 0x80, 0x22,
+ 0x41, 0xce, 0x24, 0x5d, 0x50, 0x37, 0xd2, 0x01, 0x31, 0x44, 0x02, 0x57, 0x0c, 0xb0, 0x44, 0xfe,
+ 0xb8, 0x5e, 0x4e, 0x24, 0xbe, 0xc8, 0x67, 0x47, 0xe4, 0x1f, 0x30, 0x32, 0xf0, 0x03, 0x3a, 0xf1,
+ 0x03, 0xf8, 0x15, 0x8c, 0x1d, 0x18, 0x98, 0xa2, 0x2a, 0x59, 0x98, 0xf3, 0x0b, 0x50, 0xee, 0x9c,
+ 0x10, 0x1b, 0xc7, 0x0a, 0x5f, 0x9b, 0xdf, 0x7b, 0x9f, 0x7b, 0x9e, 0xf7, 0xf1, 0xfb, 0xde, 0x1d,
+ 0xa8, 0x10, 0xdb, 0xc1, 0xd4, 0x70, 0xa8, 0x8f, 0x0c, 0xa7, 0x47, 0x90, 0x17, 0x18, 0xc3, 0x86,
+ 0x11, 0xbc, 0x83, 0x03, 0x9f, 0x06, 0x54, 0x2e, 0xf1, 0x34, 0x9c, 0xa7, 0xa1, 0x48, 0xc3, 0x61,
+ 0x43, 0x29, 0x61, 0x8a, 0x29, 0x07, 0x18, 0xf3, 0x2f, 0x81, 0x55, 0x8e, 0x30, 0xa5, 0xb8, 0x87,
+ 0x0c, 0x1e, 0xd9, 0xe1, 0xb9, 0x61, 0x79, 0xa3, 0x28, 0x75, 0x37, 0x55, 0x25, 0x22, 0xe4, 0x10,
+ 0xfd, 0x4a, 0x02, 0x7b, 0x6d, 0x86, 0x4f, 0x7d, 0x64, 0x05, 0xe8, 0x94, 0x67, 0xe4, 0x17, 0xa0,
+ 0x28, 0x30, 0x1d, 0x16, 0x58, 0x01, 0x2a, 0x4b, 0x55, 0xa9, 0xb6, 0xdb, 0x2c, 0x41, 0x21, 0x04,
+ 0x17, 0x42, 0xf0, 0xa9, 0x37, 0x6a, 0x1d, 0xce, 0xc6, 0xda, 0xad, 0x91, 0xd5, 0xef, 0x9d, 0xe8,
+ 0xab, 0x7b, 0x74, 0x73, 0x57, 0x84, 0x67, 0xf3, 0x48, 0x7e, 0x0d, 0xf6, 0x1c, 0xea, 0x31, 0xe4,
+ 0xb1, 0x90, 0x45, 0xa4, 0x5b, 0x19, 0xa4, 0xca, 0x6c, 0xac, 0x1d, 0x44, 0xa4, 0xf1, 0x6d, 0xba,
+ 0x79, 0x73, 0xb9, 0x22, 0xa8, 0x0f, 0x40, 0x81, 0x11, 0xec, 0x21, 0xbf, 0x9c, 0xaf, 0x4a, 0xb5,
+ 0x1d, 0x33, 0x8a, 0x4e, 0xb6, 0xdf, 0x5f, 0x68, 0xb9, 0xef, 0x17, 0x5a, 0x4e, 0x3f, 0x02, 0x87,
+ 0x09, 0x87, 0x26, 0x62, 0x83, 0x39, 0x8b, 0xfe, 0x51, 0xb8, 0x7f, 0x35, 0x70, 0x7f, 0xba, 0x6f,
+ 0x80, 0x9d, 0xc8, 0x09, 0x71, 0xb9, 0xf5, 0x9d, 0x56, 0x69, 0x36, 0xd6, 0xf6, 0x63, 0x26, 0x89,
+ 0xab, 0x9b, 0xdb, 0xe2, 0xfb, 0xb9, 0x2b, 0x3f, 0x02, 0x85, 0x2e, 0xb2, 0x5c, 0xe4, 0x67, 0xb9,
+ 0x32, 0x23, 0xcc, 0xc6, 0x15, 0xaf, 0x56, 0xb5, 0xac, 0xf8, 0x6b, 0x1e, 0xec, 0xf3, 0x1c, 0xf6,
+ 0x2d, 0xf7, 0x2f, 0x4a, 0x4e, 0xf6, 0x78, 0xeb, 0x7f, 0xf4, 0x38, 0xff, 0x8f, 0x7a, 0xfc, 0x12,
+ 0x94, 0x06, 0x3e, 0xa5, 0xe7, 0x9d, 0x50, 0xd8, 0xee, 0x08, 0xdd, 0xf2, 0xb5, 0xaa, 0x54, 0x2b,
+ 0xb6, 0xb4, 0xd9, 0x58, 0xbb, 0x23, 0x98, 0xd2, 0x50, 0xba, 0x29, 0xf3, 0xe5, 0xf8, 0x2f, 0x7b,
+ 0x0b, 0x2a, 0x09, 0x70, 0xa2, 0xf6, 0xeb, 0x9c, 0xbb, 0x36, 0x1b, 0x6b, 0xf7, 0x52, 0xb9, 0x93,
+ 0x35, 0x2b, 0x31, 0x91, 0x75, 0x33, 0x5a, 0x58, 0xd3, 0x71, 0x05, 0x94, 0x93, 0x5d, 0x5d, 0xb6,
+ 0xfc, 0x93, 0x04, 0x6e, 0xb7, 0x19, 0x3e, 0x0b, 0xed, 0x3e, 0x09, 0xda, 0x84, 0xd9, 0xa8, 0x6b,
+ 0x0d, 0x09, 0x0d, 0xfd, 0x3f, 0xe9, 0xfb, 0x13, 0x50, 0xec, 0xaf, 0x50, 0x64, 0x0e, 0x6c, 0x0c,
+ 0xb9, 0xc1, 0xd8, 0x6a, 0xa0, 0x92, 0x5a, 0xe7, 0xc2, 0x49, 0xf3, 0x73, 0x1e, 0xe4, 0xdb, 0x0c,
+ 0xcb, 0x2e, 0x28, 0xc6, 0x2e, 0x9c, 0xfb, 0x30, 0xed, 0xbe, 0x83, 0x89, 0x53, 0xab, 0xd4, 0x37,
+ 0x82, 0x2d, 0xd4, 0xe6, 0x2a, 0xb1, 0x83, 0xbd, 0x5e, 0x65, 0x15, 0x96, 0xa1, 0x92, 0x76, 0x20,
+ 0x65, 0x0c, 0x6e, 0xc4, 0x27, 0xeb, 0x41, 0xc6, 0xfe, 0x15, 0x9c, 0x02, 0x37, 0xc3, 0x2d, 0x85,
+ 0x86, 0x40, 0x4e, 0x19, 0x81, 0x87, 0x6b, 0x59, 0x7e, 0x05, 0x2b, 0xc7, 0xbf, 0x01, 0x5e, 0xe8,
+ 0xb6, 0x9e, 0x7d, 0x99, 0xa8, 0xd2, 0xe5, 0x44, 0x95, 0xae, 0x26, 0xaa, 0xf4, 0x61, 0xaa, 0xe6,
+ 0x2e, 0xa7, 0x6a, 0xee, 0xdb, 0x54, 0xcd, 0xbd, 0xa9, 0x63, 0x12, 0x74, 0x43, 0x1b, 0x3a, 0xb4,
+ 0x6f, 0x38, 0x94, 0xf5, 0x29, 0x33, 0x88, 0xed, 0xd4, 0x17, 0x2f, 0xce, 0xe3, 0x66, 0x3d, 0x7a,
+ 0x74, 0x82, 0xd1, 0x00, 0x31, 0xbb, 0xc0, 0x87, 0xeb, 0xf8, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xb4, 0xb9, 0x0f, 0xae, 0xfc, 0x06, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // CreateClient defines a rpc handler method for MsgCreateClient.
+ CreateClient(ctx context.Context, in *MsgCreateClient, opts ...grpc.CallOption) (*MsgCreateClientResponse, error)
+ // UpdateClient defines a rpc handler method for MsgUpdateClient.
+ UpdateClient(ctx context.Context, in *MsgUpdateClient, opts ...grpc.CallOption) (*MsgUpdateClientResponse, error)
+ // UpgradeClient defines a rpc handler method for MsgUpgradeClient.
+ UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opts ...grpc.CallOption) (*MsgUpgradeClientResponse, error)
+ // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.
+ SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn) MsgClient {
+ return &msgClient{cc}
+}
+
+func (c *msgClient) CreateClient(ctx context.Context, in *MsgCreateClient, opts ...grpc.CallOption) (*MsgCreateClientResponse, error) {
+ out := new(MsgCreateClientResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/CreateClient", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) UpdateClient(ctx context.Context, in *MsgUpdateClient, opts ...grpc.CallOption) (*MsgUpdateClientResponse, error) {
+ out := new(MsgUpdateClientResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/UpdateClient", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opts ...grpc.CallOption) (*MsgUpgradeClientResponse, error) {
+ out := new(MsgUpgradeClientResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/UpgradeClient", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error) {
+ out := new(MsgSubmitMisbehaviourResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/SubmitMisbehaviour", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // CreateClient defines a rpc handler method for MsgCreateClient.
+ CreateClient(context.Context, *MsgCreateClient) (*MsgCreateClientResponse, error)
+ // UpdateClient defines a rpc handler method for MsgUpdateClient.
+ UpdateClient(context.Context, *MsgUpdateClient) (*MsgUpdateClientResponse, error)
+ // UpgradeClient defines a rpc handler method for MsgUpgradeClient.
+ UpgradeClient(context.Context, *MsgUpgradeClient) (*MsgUpgradeClientResponse, error)
+ // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.
+ SubmitMisbehaviour(context.Context, *MsgSubmitMisbehaviour) (*MsgSubmitMisbehaviourResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer) CreateClient(ctx context.Context, req *MsgCreateClient) (*MsgCreateClientResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateClient not implemented")
+}
+func (*UnimplementedMsgServer) UpdateClient(ctx context.Context, req *MsgUpdateClient) (*MsgUpdateClientResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateClient not implemented")
+}
+func (*UnimplementedMsgServer) UpgradeClient(ctx context.Context, req *MsgUpgradeClient) (*MsgUpgradeClientResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpgradeClient not implemented")
+}
+func (*UnimplementedMsgServer) SubmitMisbehaviour(ctx context.Context, req *MsgSubmitMisbehaviour) (*MsgSubmitMisbehaviourResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SubmitMisbehaviour not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgCreateClient)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).CreateClient(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Msg/CreateClient",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).CreateClient(ctx, req.(*MsgCreateClient))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgUpdateClient)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).UpdateClient(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Msg/UpdateClient",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).UpdateClient(ctx, req.(*MsgUpdateClient))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_UpgradeClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgUpgradeClient)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).UpgradeClient(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Msg/UpgradeClient",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).UpgradeClient(ctx, req.(*MsgUpgradeClient))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_SubmitMisbehaviour_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgSubmitMisbehaviour)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).SubmitMisbehaviour(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Msg/SubmitMisbehaviour",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).SubmitMisbehaviour(ctx, req.(*MsgSubmitMisbehaviour))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.core.client.v1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateClient",
+ Handler: _Msg_CreateClient_Handler,
+ },
+ {
+ MethodName: "UpdateClient",
+ Handler: _Msg_UpdateClient_Handler,
+ },
+ {
+ MethodName: "UpgradeClient",
+ Handler: _Msg_UpgradeClient_Handler,
+ },
+ {
+ MethodName: "SubmitMisbehaviour",
+ Handler: _Msg_SubmitMisbehaviour_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/core/client/v1/tx.proto",
+}
+
+func (m *MsgCreateClient) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgCreateClient) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgCreateClient) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgCreateClientResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgCreateClientResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgCreateClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgUpdateClient) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgUpdateClient) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgUpdateClient) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgUpdateClientResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgUpdateClientResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgUpdateClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgUpgradeClient) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgUpgradeClient) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgUpgradeClient) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.ProofUpgradeConsensusState) > 0 {
+ i -= len(m.ProofUpgradeConsensusState)
+ copy(dAtA[i:], m.ProofUpgradeConsensusState)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUpgradeConsensusState)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.ProofUpgradeClient) > 0 {
+ i -= len(m.ProofUpgradeClient)
+ copy(dAtA[i:], m.ProofUpgradeClient)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUpgradeClient)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgUpgradeClientResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgUpgradeClientResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgUpgradeClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgSubmitMisbehaviour) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgSubmitMisbehaviour) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgSubmitMisbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Misbehaviour != nil {
+ {
+ size, err := m.Misbehaviour.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgSubmitMisbehaviourResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgSubmitMisbehaviourResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgSubmitMisbehaviourResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MsgCreateClient) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgCreateClientResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgUpdateClient) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgUpdateClientResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgUpgradeClient) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofUpgradeClient)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofUpgradeConsensusState)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgUpgradeClientResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgSubmitMisbehaviour) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.Misbehaviour != nil {
+ l = m.Misbehaviour.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgSubmitMisbehaviourResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MsgCreateClient) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgCreateClient: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgCreateClient: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgCreateClientResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgCreateClientResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgCreateClientResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgUpdateClient) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgUpdateClient: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgUpdateClient: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &types.Any{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgUpdateClientResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgUpdateClientResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgUpdateClientResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgUpgradeClient) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgUpgradeClient: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgUpgradeClient: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofUpgradeClient", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofUpgradeClient = append(m.ProofUpgradeClient[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofUpgradeClient == nil {
+ m.ProofUpgradeClient = []byte{}
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofUpgradeConsensusState", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofUpgradeConsensusState = append(m.ProofUpgradeConsensusState[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofUpgradeConsensusState == nil {
+ m.ProofUpgradeConsensusState = []byte{}
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgUpgradeClientResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgUpgradeClientResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgUpgradeClientResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgSubmitMisbehaviour) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgSubmitMisbehaviour: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgSubmitMisbehaviour: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Misbehaviour", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Misbehaviour == nil {
+ m.Misbehaviour = &types.Any{}
+ }
+ if err := m.Misbehaviour.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgSubmitMisbehaviourResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgSubmitMisbehaviourResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgSubmitMisbehaviourResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/03-connection/client/cli/cli.go b/core/03-connection/client/cli/cli.go
new file mode 100644
index 00000000..01bb6f9b
--- /dev/null
+++ b/core/03-connection/client/cli/cli.go
@@ -0,0 +1,46 @@
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+)
+
+// GetQueryCmd returns the query commands for IBC connections
+func GetQueryCmd() *cobra.Command {
+ queryCmd := &cobra.Command{
+ Use: types.SubModuleName,
+ Short: "IBC connection query subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ }
+
+ queryCmd.AddCommand(
+ GetCmdQueryConnections(),
+ GetCmdQueryConnection(),
+ GetCmdQueryClientConnections(),
+ )
+
+ return queryCmd
+}
+
+// NewTxCmd returns a CLI command handler for all x/ibc connection transaction commands.
+func NewTxCmd() *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: types.SubModuleName,
+ Short: "IBC connection transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ txCmd.AddCommand(
+ NewConnectionOpenInitCmd(),
+ NewConnectionOpenTryCmd(),
+ NewConnectionOpenAckCmd(),
+ NewConnectionOpenConfirmCmd(),
+ )
+
+ return txCmd
+}
diff --git a/core/03-connection/client/cli/query.go b/core/03-connection/client/cli/query.go
new file mode 100644
index 00000000..21c4bd8f
--- /dev/null
+++ b/core/03-connection/client/cli/query.go
@@ -0,0 +1,118 @@
+package cli
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// GetCmdQueryConnections defines the command to query all the connection ends
+// that this chain mantains.
+func GetCmdQueryConnections() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "connections",
+ Short: "Query all connections",
+ Long: "Query all connections ends from a chain",
+ Example: fmt.Sprintf("%s query %s %s connections", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryConnectionsRequest{
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.Connections(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "connection ends")
+
+ return cmd
+}
+
+// GetCmdQueryConnection defines the command to query a connection end
+func GetCmdQueryConnection() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "end [connection-id]",
+ Short: "Query stored connection end",
+ Long: "Query stored connection end",
+ Example: fmt.Sprintf("%s query %s %s end [connection-id]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ connectionID := args[0]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ connRes, err := utils.QueryConnection(clientCtx, connectionID, prove)
+ if err != nil {
+ return err
+ }
+
+ clientCtx = clientCtx.WithHeight(int64(connRes.ProofHeight.RevisionHeight))
+ return clientCtx.PrintProto(connRes)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryClientConnections defines the command to query a client connections
+func GetCmdQueryClientConnections() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "path [client-id]",
+ Short: "Query stored client connection paths",
+ Long: "Query stored client connection paths",
+ Example: fmt.Sprintf("%s query %s %s path [client-id]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ clientID := args[0]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ connPathsRes, err := utils.QueryClientConnections(clientCtx, clientID, prove)
+ if err != nil {
+ return err
+ }
+
+ clientCtx = clientCtx.WithHeight(int64(connPathsRes.ProofHeight.RevisionHeight))
+ return clientCtx.PrintProto(connPathsRes)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/core/03-connection/client/cli/tx.go b/core/03-connection/client/cli/tx.go
new file mode 100644
index 00000000..68b1a620
--- /dev/null
+++ b/core/03-connection/client/cli/tx.go
@@ -0,0 +1,348 @@
+package cli
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ "github.com/cosmos/cosmos-sdk/version"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ flagVersionIdentifier = "version-identifier"
+ flagVersionFeatures = "version-features"
+ flagDelayPeriod = "delay-period"
+)
+
+// NewConnectionOpenInitCmd defines the command to initialize a connection on
+// chain A with a given counterparty chain B
+func NewConnectionOpenInitCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "open-init [client-id] [counterparty-client-id] [path/to/counterparty_prefix.json]",
+ Short: "Initialize connection on chain A",
+ Long: `Initialize a connection on chain A with a given counterparty chain B.
+ - 'version-identifier' flag can be a single pre-selected version identifier to be used in the handshake.
+ - 'version-features' flag can be a list of features separated by commas to accompany the version identifier.`,
+ Example: fmt.Sprintf(
+ "%s tx %s %s open-init [client-id] [counterparty-client-id] [path/to/counterparty_prefix.json] --version-identifier=\"1.0\" --version-features=\"ORDER_UNORDERED\" --delay-period=500",
+ version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ clientID := args[0]
+ counterpartyClientID := args[1]
+
+ counterpartyPrefix, err := utils.ParsePrefix(clientCtx.LegacyAmino, args[2])
+ if err != nil {
+ return err
+ }
+
+ var version *types.Version
+ versionIdentifier, _ := cmd.Flags().GetString(flagVersionIdentifier)
+
+ if versionIdentifier != "" {
+ var features []string
+
+ versionFeatures, _ := cmd.Flags().GetString(flagVersionFeatures)
+ if versionFeatures != "" {
+ features = strings.Split(versionFeatures, ",")
+ }
+
+ version = types.NewVersion(versionIdentifier, features)
+ }
+
+ delayPeriod, err := cmd.Flags().GetUint64(flagDelayPeriod)
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgConnectionOpenInit(
+ clientID, counterpartyClientID,
+ counterpartyPrefix, version, delayPeriod, clientCtx.GetFromAddress(),
+ )
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ConnectionOpenInit(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ // NOTE: we should use empty default values since the user may not want to select a version
+ // at this step in the handshake.
+ cmd.Flags().String(flagVersionIdentifier, "", "version identifier to be used in the connection handshake version negotiation")
+ cmd.Flags().String(flagVersionFeatures, "", "version features list separated by commas without spaces. The features must function with the version identifier.")
+ cmd.Flags().Uint64(flagDelayPeriod, 0, "delay period that must pass before packet verification can pass against a consensus state")
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewConnectionOpenTryCmd defines the command to relay a try open a connection on
+// chain B
+func NewConnectionOpenTryCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: strings.TrimSpace(`open-try [connection-id] [client-id]
+[counterparty-connection-id] [counterparty-client-id] [path/to/counterparty_prefix.json] [path/to/client_state.json]
+[path/to/counterparty_version1.json,path/to/counterparty_version2.json...] [consensus-height] [proof-height] [path/to/proof_init.json] [path/to/proof_client.json] [path/to/proof_consensus.json]`),
+ Short: "initiate connection handshake between two chains",
+ Long: "Initialize a connection on chain A with a given counterparty chain B. Provide counterparty versions separated by commas",
+ Example: fmt.Sprintf(
+ `%s tx %s %s open-try connection-id] [client-id] \
+[counterparty-connection-id] [counterparty-client-id] [path/to/counterparty_prefix.json] [path/to/client_state.json]\
+[counterparty-versions] [consensus-height] [proof-height] [path/to/proof_init.json] [path/to/proof_client.json] [path/to/proof_consensus.json]`,
+ version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(12),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ connectionID := args[0]
+ clientID := args[1]
+ counterpartyConnectionID := args[2]
+ counterpartyClientID := args[3]
+
+ counterpartyPrefix, err := utils.ParsePrefix(clientCtx.LegacyAmino, args[4])
+ if err != nil {
+ return err
+ }
+
+ counterpartyClient, err := utils.ParseClientState(clientCtx.LegacyAmino, args[5])
+ if err != nil {
+ return err
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ versionsStr := strings.Split(args[6], ",")
+ counterpartyVersions := make([]*types.Version, len(versionsStr))
+
+ for _, ver := range versionsStr {
+
+ // attempt to unmarshal version
+ version := &types.Version{}
+ if err := cdc.UnmarshalJSON([]byte(ver), version); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(ver)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for version were provided")
+ }
+
+ if err := cdc.UnmarshalJSON(contents, version); err != nil {
+ return errors.Wrap(err, "error unmarshalling version file")
+ }
+ }
+ }
+
+ consensusHeight, err := clienttypes.ParseHeight(args[7])
+ if err != nil {
+ return err
+ }
+ proofHeight, err := clienttypes.ParseHeight(args[8])
+ if err != nil {
+ return err
+ }
+
+ proofInit, err := utils.ParseProof(clientCtx.LegacyAmino, args[9])
+ if err != nil {
+ return err
+ }
+
+ proofClient, err := utils.ParseProof(clientCtx.LegacyAmino, args[10])
+ if err != nil {
+ return err
+ }
+
+ proofConsensus, err := utils.ParseProof(clientCtx.LegacyAmino, args[11])
+ if err != nil {
+ return err
+ }
+
+ delayPeriod, err := cmd.Flags().GetUint64(flagDelayPeriod)
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgConnectionOpenTry(
+ connectionID, clientID, counterpartyConnectionID, counterpartyClientID,
+ counterpartyClient, counterpartyPrefix, counterpartyVersions, delayPeriod,
+ proofInit, proofClient, proofConsensus, proofHeight,
+ consensusHeight, clientCtx.GetFromAddress(),
+ )
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ConnectionOpenTry(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ cmd.Flags().Uint64(flagDelayPeriod, 0, "delay period that must pass before packet verification can pass against a consensus state")
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewConnectionOpenAckCmd defines the command to relay the acceptance of a
+// connection open attempt from chain B to chain A
+func NewConnectionOpenAckCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: `open-ack [connection-id] [counterparty-connection-id] [path/to/client_state.json] [consensus-height] [proof-height]
+ [path/to/proof_try.json] [path/to/proof_client.json] [path/to/proof_consensus.json] [version]`,
+ Short: "relay the acceptance of a connection open attempt",
+ Long: "Relay the acceptance of a connection open attempt from chain B to chain A",
+ Example: fmt.Sprintf(
+ `%s tx %s %s open-ack [connection-id] [counterparty-connection-id] [path/to/client_state.json] [consensus-height] [proof-height]
+ [path/to/proof_try.json] [path/to/proof_client.json] [path/to/proof_consensus.json] [version]`,
+ version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(9),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ connectionID := args[0]
+ counterpartyConnectionID := args[1]
+
+ counterpartyClient, err := utils.ParseClientState(clientCtx.LegacyAmino, args[2])
+ if err != nil {
+ return err
+ }
+
+ consensusHeight, err := clienttypes.ParseHeight(args[3])
+ if err != nil {
+ return err
+ }
+ proofHeight, err := clienttypes.ParseHeight(args[4])
+ if err != nil {
+ return err
+ }
+
+ proofTry, err := utils.ParseProof(clientCtx.LegacyAmino, args[5])
+ if err != nil {
+ return err
+ }
+
+ proofClient, err := utils.ParseProof(clientCtx.LegacyAmino, args[6])
+ if err != nil {
+ return err
+ }
+
+ proofConsensus, err := utils.ParseProof(clientCtx.LegacyAmino, args[7])
+ if err != nil {
+ return err
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ // attempt to unmarshal version
+ version := &types.Version{}
+ if err := cdc.UnmarshalJSON([]byte(args[8]), version); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(args[8])
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for version were provided")
+ }
+
+ if err := cdc.UnmarshalJSON(contents, version); err != nil {
+ return errors.Wrap(err, "error unmarshalling version file")
+ }
+ }
+
+ msg := types.NewMsgConnectionOpenAck(
+ connectionID, counterpartyConnectionID, counterpartyClient, proofTry, proofClient, proofConsensus, proofHeight,
+ consensusHeight, version, clientCtx.GetFromAddress(),
+ )
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ConnectionOpenAck(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewConnectionOpenConfirmCmd defines the command to initialize a connection on
+// chain A with a given counterparty chain B
+func NewConnectionOpenConfirmCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "open-confirm [connection-id] [proof-height] [path/to/proof_ack.json]",
+ Short: "confirm to chain B that connection is open on chain A",
+ Long: "Confirm to chain B that connection is open on chain A",
+ Example: fmt.Sprintf(
+ "%s tx %s %s open-confirm [connection-id] [proof-height] [path/to/proof_ack.json]",
+ version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ connectionID := args[0]
+ proofHeight, err := clienttypes.ParseHeight(args[1])
+ if err != nil {
+ return err
+ }
+
+ proofAck, err := utils.ParseProof(clientCtx.LegacyAmino, args[2])
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgConnectionOpenConfirm(
+ connectionID, proofAck, proofHeight, clientCtx.GetFromAddress(),
+ )
+
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ConnectionOpenConfirm(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/core/03-connection/client/utils/utils.go b/core/03-connection/client/utils/utils.go
new file mode 100644
index 00000000..e1eb1ce0
--- /dev/null
+++ b/core/03-connection/client/utils/utils.go
@@ -0,0 +1,219 @@
+package utils
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/pkg/errors"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clientutils "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// QueryConnection returns a connection end.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client.
+func QueryConnection(
+ clientCtx client.Context, connectionID string, prove bool,
+) (*types.QueryConnectionResponse, error) {
+ if prove {
+ return queryConnectionABCI(clientCtx, connectionID)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryConnectionRequest{
+ ConnectionId: connectionID,
+ }
+
+ return queryClient.Connection(context.Background(), req)
+}
+
+func queryConnectionABCI(clientCtx client.Context, connectionID string) (*types.QueryConnectionResponse, error) {
+ key := host.ConnectionKey(connectionID)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if connection exists
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrap(types.ErrConnectionNotFound, connectionID)
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ var connection types.ConnectionEnd
+ if err := cdc.UnmarshalBinaryBare(value, &connection); err != nil {
+ return nil, err
+ }
+
+ return types.NewQueryConnectionResponse(connection, proofBz, proofHeight), nil
+}
+
+// QueryClientConnections queries the connection paths registered for a particular client.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client.
+func QueryClientConnections(
+ clientCtx client.Context, clientID string, prove bool,
+) (*types.QueryClientConnectionsResponse, error) {
+ if prove {
+ return queryClientConnectionsABCI(clientCtx, clientID)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryClientConnectionsRequest{
+ ClientId: clientID,
+ }
+
+ return queryClient.ClientConnections(context.Background(), req)
+}
+
+func queryClientConnectionsABCI(clientCtx client.Context, clientID string) (*types.QueryClientConnectionsResponse, error) {
+ key := host.ClientConnectionsKey(clientID)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if connection paths exist
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrap(types.ErrClientConnectionPathsNotFound, clientID)
+ }
+
+ var paths []string
+ if err := clientCtx.LegacyAmino.UnmarshalBinaryBare(value, &paths); err != nil {
+ return nil, err
+ }
+
+ return types.NewQueryClientConnectionsResponse(paths, proofBz, proofHeight), nil
+}
+
+// QueryConnectionClientState returns the ClientState of a connection end. If
+// prove is true, it performs an ABCI store query in order to retrieve the
+// merkle proof. Otherwise, it uses the gRPC query client.
+func QueryConnectionClientState(
+ clientCtx client.Context, connectionID string, prove bool,
+) (*types.QueryConnectionClientStateResponse, error) {
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryConnectionClientStateRequest{
+ ConnectionId: connectionID,
+ }
+
+ res, err := queryClient.ConnectionClientState(context.Background(), req)
+ if err != nil {
+ return nil, err
+ }
+
+ if prove {
+ clientStateRes, err := clientutils.QueryClientStateABCI(clientCtx, res.IdentifiedClientState.ClientId)
+ if err != nil {
+ return nil, err
+ }
+
+ // use client state returned from ABCI query in case query height differs
+ identifiedClientState := clienttypes.IdentifiedClientState{
+ ClientId: res.IdentifiedClientState.ClientId,
+ ClientState: clientStateRes.ClientState,
+ }
+
+ res = types.NewQueryConnectionClientStateResponse(identifiedClientState, clientStateRes.Proof, clientStateRes.ProofHeight)
+ }
+
+ return res, nil
+}
+
+// QueryConnectionConsensusState returns the ConsensusState of a connection end. If
+// prove is true, it performs an ABCI store query in order to retrieve the
+// merkle proof. Otherwise, it uses the gRPC query client.
+func QueryConnectionConsensusState(
+ clientCtx client.Context, connectionID string, height clienttypes.Height, prove bool,
+) (*types.QueryConnectionConsensusStateResponse, error) {
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryConnectionConsensusStateRequest{
+ ConnectionId: connectionID,
+ RevisionNumber: height.RevisionNumber,
+ RevisionHeight: height.RevisionHeight,
+ }
+
+ res, err := queryClient.ConnectionConsensusState(context.Background(), req)
+ if err != nil {
+ return nil, err
+ }
+
+ if prove {
+ consensusStateRes, err := clientutils.QueryConsensusStateABCI(clientCtx, res.ClientId, height)
+ if err != nil {
+ return nil, err
+ }
+
+ res = types.NewQueryConnectionConsensusStateResponse(res.ClientId, consensusStateRes.ConsensusState, height, consensusStateRes.Proof, consensusStateRes.ProofHeight)
+ }
+
+ return res, nil
+}
+
+// ParseClientState unmarshals a cmd input argument from a JSON string to a client state
+// If the input is not a JSON, it looks for a path to the JSON file
+func ParseClientState(cdc *codec.LegacyAmino, arg string) (exported.ClientState, error) {
+ var clientState exported.ClientState
+ if err := cdc.UnmarshalJSON([]byte(arg), &clientState); err != nil {
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(arg)
+ if err != nil {
+ return nil, errors.New("either JSON input nor path to .json file were provided")
+ }
+ if err := cdc.UnmarshalJSON(contents, &clientState); err != nil {
+ return nil, errors.Wrap(err, "error unmarshalling client state")
+ }
+ }
+ return clientState, nil
+}
+
+// ParsePrefix unmarshals an cmd input argument from a JSON string to a commitment
+// Prefix. If the input is not a JSON, it looks for a path to the JSON file.
+func ParsePrefix(cdc *codec.LegacyAmino, arg string) (commitmenttypes.MerklePrefix, error) {
+ var prefix commitmenttypes.MerklePrefix
+ if err := cdc.UnmarshalJSON([]byte(arg), &prefix); err != nil {
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(arg)
+ if err != nil {
+ return commitmenttypes.MerklePrefix{}, errors.New("neither JSON input nor path to .json file were provided")
+ }
+ if err := cdc.UnmarshalJSON(contents, &prefix); err != nil {
+ return commitmenttypes.MerklePrefix{}, errors.Wrap(err, "error unmarshalling commitment prefix")
+ }
+ }
+ return prefix, nil
+}
+
+// ParseProof unmarshals a cmd input argument from a JSON string to a commitment
+// Proof. If the input is not a JSON, it looks for a path to the JSON file. It
+// then marshals the commitment proof into a proto encoded byte array.
+func ParseProof(cdc *codec.LegacyAmino, arg string) ([]byte, error) {
+ var merkleProof commitmenttypes.MerkleProof
+ if err := cdc.UnmarshalJSON([]byte(arg), &merkleProof); err != nil {
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(arg)
+ if err != nil {
+ return nil, errors.New("neither JSON input nor path to .json file were provided")
+ }
+ if err := cdc.UnmarshalJSON(contents, &merkleProof); err != nil {
+ return nil, fmt.Errorf("error unmarshalling commitment proof: %w", err)
+ }
+ }
+
+ return cdc.MarshalBinaryBare(&merkleProof)
+}
diff --git a/core/03-connection/genesis.go b/core/03-connection/genesis.go
new file mode 100644
index 00000000..a1bb30f1
--- /dev/null
+++ b/core/03-connection/genesis.go
@@ -0,0 +1,28 @@
+package connection
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+)
+
+// InitGenesis initializes the ibc connection submodule's state from a provided genesis
+// state.
+func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) {
+ for _, connection := range gs.Connections {
+ conn := types.NewConnectionEnd(connection.State, connection.ClientId, connection.Counterparty, connection.Versions, connection.DelayPeriod)
+ k.SetConnection(ctx, connection.Id, conn)
+ }
+ for _, connPaths := range gs.ClientConnectionPaths {
+ k.SetClientConnectionPaths(ctx, connPaths.ClientId, connPaths.Paths)
+ }
+ k.SetNextConnectionSequence(ctx, gs.NextConnectionSequence)
+}
+
+// ExportGenesis returns the ibc connection submodule's exported genesis.
+func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
+ return types.GenesisState{
+ Connections: k.GetAllConnections(ctx),
+ ClientConnectionPaths: k.GetAllClientConnectionPaths(ctx),
+ }
+}
diff --git a/core/03-connection/keeper/grpc_query.go b/core/03-connection/keeper/grpc_query.go
new file mode 100644
index 00000000..62b1c00a
--- /dev/null
+++ b/core/03-connection/keeper/grpc_query.go
@@ -0,0 +1,179 @@
+package keeper
+
+import (
+ "context"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+var _ types.QueryServer = Keeper{}
+
+// Connection implements the Query/Connection gRPC method
+func (q Keeper) Connection(c context.Context, req *types.QueryConnectionRequest) (*types.QueryConnectionResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ConnectionIdentifierValidator(req.ConnectionId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ connection, found := q.GetConnection(ctx, req.ConnectionId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrap(types.ErrConnectionNotFound, req.ConnectionId).Error(),
+ )
+ }
+
+ return &types.QueryConnectionResponse{
+ Connection: &connection,
+ ProofHeight: clienttypes.GetSelfHeight(ctx),
+ }, nil
+}
+
+// Connections implements the Query/Connections gRPC method
+func (q Keeper) Connections(c context.Context, req *types.QueryConnectionsRequest) (*types.QueryConnectionsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ connections := []*types.IdentifiedConnection{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.KeyConnectionPrefix))
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
+ var result types.ConnectionEnd
+ if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil {
+ return err
+ }
+
+ connectionID, err := host.ParseConnectionPath(string(key))
+ if err != nil {
+ return err
+ }
+
+ identifiedConnection := types.NewIdentifiedConnection(connectionID, result)
+ connections = append(connections, &identifiedConnection)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &types.QueryConnectionsResponse{
+ Connections: connections,
+ Pagination: pageRes,
+ Height: clienttypes.GetSelfHeight(ctx),
+ }, nil
+}
+
+// ClientConnections implements the Query/ClientConnections gRPC method
+func (q Keeper) ClientConnections(c context.Context, req *types.QueryClientConnectionsRequest) (*types.QueryClientConnectionsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ clientConnectionPaths, found := q.GetClientConnectionPaths(ctx, req.ClientId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrap(types.ErrClientConnectionPathsNotFound, req.ClientId).Error(),
+ )
+ }
+
+ return &types.QueryClientConnectionsResponse{
+ ConnectionPaths: clientConnectionPaths,
+ ProofHeight: clienttypes.GetSelfHeight(ctx),
+ }, nil
+}
+
+// ConnectionClientState implements the Query/ConnectionClientState gRPC method
+func (q Keeper) ConnectionClientState(c context.Context, req *types.QueryConnectionClientStateRequest) (*types.QueryConnectionClientStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ConnectionIdentifierValidator(req.ConnectionId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ connection, found := q.GetConnection(ctx, req.ConnectionId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(types.ErrConnectionNotFound, "connection-id: %s", req.ConnectionId).Error(),
+ )
+ }
+
+ clientState, found := q.clientKeeper.GetClientState(ctx, connection.ClientId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(clienttypes.ErrClientNotFound, "client-id: %s", connection.ClientId).Error(),
+ )
+ }
+
+ identifiedClientState := clienttypes.NewIdentifiedClientState(connection.ClientId, clientState)
+
+ height := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryConnectionClientStateResponse(identifiedClientState, nil, height), nil
+
+}
+
+// ConnectionConsensusState implements the Query/ConnectionConsensusState gRPC method
+func (q Keeper) ConnectionConsensusState(c context.Context, req *types.QueryConnectionConsensusStateRequest) (*types.QueryConnectionConsensusStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ConnectionIdentifierValidator(req.ConnectionId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ connection, found := q.GetConnection(ctx, req.ConnectionId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(types.ErrConnectionNotFound, "connection-id: %s", req.ConnectionId).Error(),
+ )
+ }
+
+ height := clienttypes.NewHeight(req.RevisionNumber, req.RevisionHeight)
+ consensusState, found := q.clientKeeper.GetClientConsensusState(ctx, connection.ClientId, height)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(clienttypes.ErrConsensusStateNotFound, "client-id: %s", connection.ClientId).Error(),
+ )
+ }
+
+ anyConsensusState, err := clienttypes.PackConsensusState(consensusState)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ proofHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryConnectionConsensusStateResponse(connection.ClientId, anyConsensusState, height, nil, proofHeight), nil
+}
diff --git a/core/03-connection/keeper/grpc_query_test.go b/core/03-connection/keeper/grpc_query_test.go
new file mode 100644
index 00000000..14fdb425
--- /dev/null
+++ b/core/03-connection/keeper/grpc_query_test.go
@@ -0,0 +1,420 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *KeeperTestSuite) TestQueryConnection() {
+ var (
+ req *types.QueryConnectionRequest
+ expConnection types.ConnectionEnd
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {"invalid connectionID",
+ func() {
+ req = &types.QueryConnectionRequest{}
+ },
+ false,
+ },
+ {"connection not found",
+ func() {
+ req = &types.QueryConnectionRequest{
+ ConnectionId: ibctesting.InvalidID,
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
+ connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+
+ counterparty := types.NewCounterparty(clientB, connB.ID, suite.chainB.GetPrefix())
+ expConnection = types.NewConnectionEnd(types.INIT, clientA, counterparty, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 500)
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, expConnection)
+
+ req = &types.QueryConnectionRequest{
+ ConnectionId: connA.ID,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.Connection(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(&expConnection, res.Connection)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryConnections() {
+ var (
+ req *types.QueryConnectionsRequest
+ expConnections = []*types.IdentifiedConnection{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "empty pagination",
+ func() {
+ req = &types.QueryConnectionsRequest{}
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ clientA, clientB, connA0, connB0 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ clientA1, clientB1, connA1, connB1 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ connA2, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ counterparty1 := types.NewCounterparty(clientB, connB0.ID, suite.chainB.GetPrefix())
+ counterparty2 := types.NewCounterparty(clientB1, connB1.ID, suite.chainB.GetPrefix())
+ // counterparty connection id is blank after open init
+ counterparty3 := types.NewCounterparty(clientB, "", suite.chainB.GetPrefix())
+
+ conn1 := types.NewConnectionEnd(types.OPEN, clientA, counterparty1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
+ conn2 := types.NewConnectionEnd(types.OPEN, clientA1, counterparty2, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
+ conn3 := types.NewConnectionEnd(types.INIT, clientA, counterparty3, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
+
+ iconn1 := types.NewIdentifiedConnection(connA0.ID, conn1)
+ iconn2 := types.NewIdentifiedConnection(connA1.ID, conn2)
+ iconn3 := types.NewIdentifiedConnection(connA2.ID, conn3)
+
+ expConnections = []*types.IdentifiedConnection{&iconn1, &iconn2, &iconn3}
+
+ req = &types.QueryConnectionsRequest{
+ Pagination: &query.PageRequest{
+ Limit: 3,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.Connections(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expConnections, res.Connections)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryClientConnections() {
+ var (
+ req *types.QueryClientConnectionsRequest
+ expPaths []string
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {"invalid connectionID",
+ func() {
+ req = &types.QueryClientConnectionsRequest{}
+ },
+ false,
+ },
+ {"connection not found",
+ func() {
+ req = &types.QueryClientConnectionsRequest{
+ ClientId: ibctesting.InvalidID,
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ clientA, clientB, connA0, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ connA1, _ := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
+ expPaths = []string{connA0.ID, connA1.ID}
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), clientA, expPaths)
+
+ req = &types.QueryClientConnectionsRequest{
+ ClientId: clientA,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ClientConnections(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expPaths, res.ConnectionPaths)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryConnectionClientState() {
+ var (
+ req *types.QueryConnectionClientStateRequest
+ expIdentifiedClientState clienttypes.IdentifiedClientState
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid connection ID",
+ func() {
+ req = &types.QueryConnectionClientStateRequest{
+ ConnectionId: "",
+ }
+ },
+ false,
+ },
+ {
+ "connection not found",
+ func() {
+ req = &types.QueryConnectionClientStateRequest{
+ ConnectionId: "test-connection-id",
+ }
+ },
+ false,
+ },
+ {
+ "client state not found",
+ func() {
+ _, _, connA, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ // set connection to empty so clientID is empty
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, types.ConnectionEnd{})
+
+ req = &types.QueryConnectionClientStateRequest{
+ ConnectionId: connA.ID,
+ }
+ }, false,
+ },
+ {
+ "success",
+ func() {
+ clientA, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ expClientState := suite.chainA.GetClientState(clientA)
+ expIdentifiedClientState = clienttypes.NewIdentifiedClientState(clientA, expClientState)
+
+ req = &types.QueryConnectionClientStateRequest{
+ ConnectionId: connA.ID,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ConnectionClientState(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState)
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.IdentifiedClientState.ClientState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() {
+ var (
+ req *types.QueryConnectionConsensusStateRequest
+ expConsensusState exported.ConsensusState
+ expClientID string
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid connection ID",
+ func() {
+ req = &types.QueryConnectionConsensusStateRequest{
+ ConnectionId: "",
+ RevisionNumber: 0,
+ RevisionHeight: 1,
+ }
+ },
+ false,
+ },
+ {
+ "connection not found",
+ func() {
+ req = &types.QueryConnectionConsensusStateRequest{
+ ConnectionId: "test-connection-id",
+ RevisionNumber: 0,
+ RevisionHeight: 1,
+ }
+ },
+ false,
+ },
+ {
+ "consensus state not found",
+ func() {
+ _, _, connA, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ req = &types.QueryConnectionConsensusStateRequest{
+ ConnectionId: connA.ID,
+ RevisionNumber: 0,
+ RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height
+ }
+ }, false,
+ },
+ {
+ "success",
+ func() {
+ clientA, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ clientState := suite.chainA.GetClientState(clientA)
+ expConsensusState, _ = suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ suite.Require().NotNil(expConsensusState)
+ expClientID = clientA
+
+ req = &types.QueryConnectionConsensusStateRequest{
+ ConnectionId: connA.ID,
+ RevisionNumber: clientState.GetLatestHeight().GetRevisionNumber(),
+ RevisionHeight: clientState.GetLatestHeight().GetRevisionHeight(),
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ConnectionConsensusState(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ consensusState, err := clienttypes.UnpackConsensusState(res.ConsensusState)
+ suite.Require().NoError(err)
+ suite.Require().Equal(expConsensusState, consensusState)
+ suite.Require().Equal(expClientID, res.ClientId)
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.ConsensusState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/core/03-connection/keeper/handshake.go b/core/03-connection/keeper/handshake.go
new file mode 100644
index 00000000..b8f7466f
--- /dev/null
+++ b/core/03-connection/keeper/handshake.go
@@ -0,0 +1,342 @@
+package keeper
+
+import (
+ "bytes"
+
+ "github.com/gogo/protobuf/proto"
+
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// ConnOpenInit initialises a connection attempt on chain A. The generated connection identifier
+// is returned.
+//
+// NOTE: Msg validation verifies the supplied identifiers and ensures that the counterparty
+// connection identifier is empty.
+func (k Keeper) ConnOpenInit(
+ ctx sdk.Context,
+ clientID string,
+ counterparty types.Counterparty, // counterpartyPrefix, counterpartyClientIdentifier
+ version *types.Version,
+ delayPeriod uint64,
+) (string, error) {
+ versions := types.GetCompatibleVersions()
+ if version != nil {
+ if !types.IsSupportedVersion(version) {
+ return "", sdkerrors.Wrap(types.ErrInvalidVersion, "version is not supported")
+ }
+
+ versions = []exported.Version{version}
+ }
+
+ // connection defines chain A's ConnectionEnd
+ connectionID := k.GenerateConnectionIdentifier(ctx)
+ connection := types.NewConnectionEnd(types.INIT, clientID, counterparty, types.ExportedVersionsToProto(versions), delayPeriod)
+ k.SetConnection(ctx, connectionID, connection)
+
+ if err := k.addConnectionToClient(ctx, clientID, connectionID); err != nil {
+ return "", err
+ }
+
+ k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", "NONE", "new-state", "INIT")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "connection", "open-init")
+ }()
+
+ return connectionID, nil
+}
+
+// ConnOpenTry relays notice of a connection attempt on chain A to chain B (this
+// code is executed on chain B).
+//
+// NOTE:
+// - Here chain A acts as the counterparty
+// - Identifiers are checked on msg validation
+func (k Keeper) ConnOpenTry(
+ ctx sdk.Context,
+ previousConnectionID string, // previousIdentifier
+ counterparty types.Counterparty, // counterpartyConnectionIdentifier, counterpartyPrefix and counterpartyClientIdentifier
+ delayPeriod uint64,
+ clientID string, // clientID of chainA
+ clientState exported.ClientState, // clientState that chainA has for chainB
+ counterpartyVersions []exported.Version, // supported versions of chain A
+ proofInit []byte, // proof that chainA stored connectionEnd in state (on ConnOpenInit)
+ proofClient []byte, // proof that chainA stored a light client of chainB
+ proofConsensus []byte, // proof that chainA stored chainB's consensus state at consensus height
+ proofHeight exported.Height, // height at which relayer constructs proof of A storing connectionEnd in state
+ consensusHeight exported.Height, // latest height of chain B which chain A has stored in its chain B client
+) (string, error) {
+ var (
+ connectionID string
+ previousConnection types.ConnectionEnd
+ found bool
+ )
+
+ // empty connection identifier indicates continuing a previous connection handshake
+ if previousConnectionID != "" {
+ // ensure that the previous connection exists
+ previousConnection, found = k.GetConnection(ctx, previousConnectionID)
+ if !found {
+ return "", sdkerrors.Wrapf(types.ErrConnectionNotFound, "previous connection does not exist for supplied previous connectionID %s", previousConnectionID)
+ }
+
+ // ensure that the existing connection's
+ // counterparty is chainA and connection is on INIT stage.
+ // Check that existing connection versions for initialized connection is equal to compatible
+ // versions for this chain.
+ // ensure that existing connection's delay period is the same as desired delay period.
+ if !(previousConnection.Counterparty.ConnectionId == "" &&
+ bytes.Equal(previousConnection.Counterparty.Prefix.Bytes(), counterparty.Prefix.Bytes()) &&
+ previousConnection.ClientId == clientID &&
+ previousConnection.Counterparty.ClientId == counterparty.ClientId &&
+ previousConnection.DelayPeriod == delayPeriod) {
+ return "", sdkerrors.Wrap(types.ErrInvalidConnection, "connection fields mismatch previous connection fields")
+ }
+
+ if !(previousConnection.State == types.INIT) {
+ return "", sdkerrors.Wrapf(types.ErrInvalidConnectionState, "previous connection state is in state %s, expected INIT", previousConnection.State)
+ }
+
+ // continue with previous connection
+ connectionID = previousConnectionID
+
+ } else {
+ // generate a new connection
+ connectionID = k.GenerateConnectionIdentifier(ctx)
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ if consensusHeight.GTE(selfHeight) {
+ return "", sdkerrors.Wrapf(
+ sdkerrors.ErrInvalidHeight,
+ "consensus height is greater than or equal to the current block height (%s >= %s)", consensusHeight, selfHeight,
+ )
+ }
+
+ // validate client parameters of a chainB client stored on chainA
+ if err := k.clientKeeper.ValidateSelfClient(ctx, clientState); err != nil {
+ return "", err
+ }
+
+ expectedConsensusState, found := k.clientKeeper.GetSelfConsensusState(ctx, consensusHeight)
+ if !found {
+ return "", sdkerrors.Wrap(clienttypes.ErrSelfConsensusStateNotFound, consensusHeight.String())
+ }
+
+ // expectedConnection defines Chain A's ConnectionEnd
+ // NOTE: chain A's counterparty is chain B (i.e where this code is executed)
+ // NOTE: chainA and chainB must have the same delay period
+ prefix := k.GetCommitmentPrefix()
+ expectedCounterparty := types.NewCounterparty(clientID, "", commitmenttypes.NewMerklePrefix(prefix.Bytes()))
+ expectedConnection := types.NewConnectionEnd(types.INIT, counterparty.ClientId, expectedCounterparty, types.ExportedVersionsToProto(counterpartyVersions), delayPeriod)
+
+ supportedVersions := types.GetCompatibleVersions()
+ if len(previousConnection.Versions) != 0 {
+ supportedVersions = previousConnection.GetVersions()
+ }
+
+ // chain B picks a version from Chain A's available versions that is compatible
+ // with Chain B's supported IBC versions. PickVersion will select the intersection
+ // of the supported versions and the counterparty versions.
+ version, err := types.PickVersion(supportedVersions, counterpartyVersions)
+ if err != nil {
+ return "", err
+ }
+
+ // connection defines chain B's ConnectionEnd
+ connection := types.NewConnectionEnd(types.TRYOPEN, clientID, counterparty, []*types.Version{version}, delayPeriod)
+
+ // Check that ChainA committed expectedConnectionEnd to its state
+ if err := k.VerifyConnectionState(
+ ctx, connection, proofHeight, proofInit, counterparty.ConnectionId,
+ expectedConnection,
+ ); err != nil {
+ return "", err
+ }
+
+ // Check that ChainA stored the clientState provided in the msg
+ if err := k.VerifyClientState(ctx, connection, proofHeight, proofClient, clientState); err != nil {
+ return "", err
+ }
+
+ // Check that ChainA stored the correct ConsensusState of chainB at the given consensusHeight
+ if err := k.VerifyClientConsensusState(
+ ctx, connection, proofHeight, consensusHeight, proofConsensus, expectedConsensusState,
+ ); err != nil {
+ return "", err
+ }
+
+ // store connection in chainB state
+ if err := k.addConnectionToClient(ctx, clientID, connectionID); err != nil {
+ return "", sdkerrors.Wrapf(err, "failed to add connection with ID %s to client with ID %s", connectionID, clientID)
+ }
+
+ k.SetConnection(ctx, connectionID, connection)
+ k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", previousConnection.State.String(), "new-state", "TRYOPEN")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "connection", "open-try")
+ }()
+
+ return connectionID, nil
+}
+
+// ConnOpenAck relays acceptance of a connection open attempt from chain B back
+// to chain A (this code is executed on chain A).
+//
+// NOTE: Identifiers are checked on msg validation.
+func (k Keeper) ConnOpenAck(
+ ctx sdk.Context,
+ connectionID string,
+ clientState exported.ClientState, // client state for chainA on chainB
+ version *types.Version, // version that ChainB chose in ConnOpenTry
+ counterpartyConnectionID string,
+ proofTry []byte, // proof that connectionEnd was added to ChainB state in ConnOpenTry
+ proofClient []byte, // proof of client state on chainB for chainA
+ proofConsensus []byte, // proof that chainB has stored ConsensusState of chainA on its client
+ proofHeight exported.Height, // height that relayer constructed proofTry
+ consensusHeight exported.Height, // latest height of chainA that chainB has stored on its chainA client
+) error {
+ // Check that chainB client hasn't stored invalid height
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ if consensusHeight.GTE(selfHeight) {
+ return sdkerrors.Wrapf(
+ sdkerrors.ErrInvalidHeight,
+ "consensus height is greater than or equal to the current block height (%s >= %s)", consensusHeight, selfHeight,
+ )
+ }
+
+ // Retrieve connection
+ connection, found := k.GetConnection(ctx, connectionID)
+ if !found {
+ return sdkerrors.Wrap(types.ErrConnectionNotFound, connectionID)
+ }
+
+ // Verify the provided version against the previously set connection state
+ switch {
+ // connection on ChainA must be in INIT or TRYOPEN
+ case connection.State != types.INIT && connection.State != types.TRYOPEN:
+ return sdkerrors.Wrapf(
+ types.ErrInvalidConnectionState,
+ "connection state is not INIT or TRYOPEN (got %s)", connection.State.String(),
+ )
+
+ // if the connection is INIT then the provided version must be supproted
+ case connection.State == types.INIT && !types.IsSupportedVersion(version):
+ return sdkerrors.Wrapf(
+ types.ErrInvalidConnectionState,
+ "connection state is in INIT but the provided version is not supported %s", version,
+ )
+
+ // if the connection is in TRYOPEN then the version must be the only set version in the
+ // retreived connection state.
+ case connection.State == types.TRYOPEN && (len(connection.Versions) != 1 || !proto.Equal(connection.Versions[0], version)):
+ return sdkerrors.Wrapf(
+ types.ErrInvalidConnectionState,
+ "connection state is in TRYOPEN but the provided version (%s) is not set in the previous connection versions %s", version, connection.Versions,
+ )
+ }
+
+ // validate client parameters of a chainA client stored on chainB
+ if err := k.clientKeeper.ValidateSelfClient(ctx, clientState); err != nil {
+ return err
+ }
+
+ // Retrieve chainA's consensus state at consensusheight
+ expectedConsensusState, found := k.clientKeeper.GetSelfConsensusState(ctx, consensusHeight)
+ if !found {
+ return clienttypes.ErrSelfConsensusStateNotFound
+ }
+
+ prefix := k.GetCommitmentPrefix()
+ expectedCounterparty := types.NewCounterparty(connection.ClientId, connectionID, commitmenttypes.NewMerklePrefix(prefix.Bytes()))
+ expectedConnection := types.NewConnectionEnd(types.TRYOPEN, connection.Counterparty.ClientId, expectedCounterparty, []*types.Version{version}, connection.DelayPeriod)
+
+ // Ensure that ChainB stored expected connectionEnd in its state during ConnOpenTry
+ if err := k.VerifyConnectionState(
+ ctx, connection, proofHeight, proofTry, counterpartyConnectionID,
+ expectedConnection,
+ ); err != nil {
+ return err
+ }
+
+ // Check that ChainB stored the clientState provided in the msg
+ if err := k.VerifyClientState(ctx, connection, proofHeight, proofClient, clientState); err != nil {
+ return err
+ }
+
+ // Ensure that ChainB has stored the correct ConsensusState for chainA at the consensusHeight
+ if err := k.VerifyClientConsensusState(
+ ctx, connection, proofHeight, consensusHeight, proofConsensus, expectedConsensusState,
+ ); err != nil {
+ return err
+ }
+
+ k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", connection.State.String(), "new-state", "OPEN")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "connection", "open-ack")
+ }()
+
+ // Update connection state to Open
+ connection.State = types.OPEN
+ connection.Versions = []*types.Version{version}
+ connection.Counterparty.ConnectionId = counterpartyConnectionID
+ k.SetConnection(ctx, connectionID, connection)
+ return nil
+}
+
+// ConnOpenConfirm confirms opening of a connection on chain A to chain B, after
+// which the connection is open on both chains (this code is executed on chain B).
+//
+// NOTE: Identifiers are checked on msg validation.
+func (k Keeper) ConnOpenConfirm(
+ ctx sdk.Context,
+ connectionID string,
+ proofAck []byte, // proof that connection opened on ChainA during ConnOpenAck
+ proofHeight exported.Height, // height that relayer constructed proofAck
+) error {
+ // Retrieve connection
+ connection, found := k.GetConnection(ctx, connectionID)
+ if !found {
+ return sdkerrors.Wrap(types.ErrConnectionNotFound, connectionID)
+ }
+
+ // Check that connection state on ChainB is on state: TRYOPEN
+ if connection.State != types.TRYOPEN {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidConnectionState,
+ "connection state is not TRYOPEN (got %s)", connection.State.String(),
+ )
+ }
+
+ prefix := k.GetCommitmentPrefix()
+ expectedCounterparty := types.NewCounterparty(connection.ClientId, connectionID, commitmenttypes.NewMerklePrefix(prefix.Bytes()))
+ expectedConnection := types.NewConnectionEnd(types.OPEN, connection.Counterparty.ClientId, expectedCounterparty, connection.Versions, connection.DelayPeriod)
+
+ // Check that connection on ChainA is open
+ if err := k.VerifyConnectionState(
+ ctx, connection, proofHeight, proofAck, connection.Counterparty.ConnectionId,
+ expectedConnection,
+ ); err != nil {
+ return err
+ }
+
+ // Update ChainB's connection to Open
+ connection.State = types.OPEN
+ k.SetConnection(ctx, connectionID, connection)
+ k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", "TRYOPEN", "new-state", "OPEN")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "connection", "open-confirm")
+ }()
+
+ return nil
+}
diff --git a/core/03-connection/keeper/handshake_test.go b/core/03-connection/keeper/handshake_test.go
new file mode 100644
index 00000000..101c061a
--- /dev/null
+++ b/core/03-connection/keeper/handshake_test.go
@@ -0,0 +1,701 @@
+package keeper_test
+
+import (
+ "time"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+// TestConnOpenInit - chainA initializes (INIT state) a connection with
+// chainB which is yet UNINITIALIZED
+func (suite *KeeperTestSuite) TestConnOpenInit() {
+ var (
+ clientA string
+ clientB string
+ version *types.Version
+ delayPeriod uint64
+ emptyConnBID bool
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"success", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ }, true},
+ {"success with empty counterparty identifier", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ emptyConnBID = true
+ }, true},
+ {"success with non empty version", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ version = types.ExportedVersionsToProto(types.GetCompatibleVersions())[0]
+ }, true},
+ {"success with non zero delayPeriod", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ delayPeriod = uint64(time.Hour.Nanoseconds())
+ }, true},
+
+ {"invalid version", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ version = &types.Version{}
+ }, false},
+ {"couldn't add connection to client", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ // set clientA to invalid client identifier
+ clientA = "clientidentifier"
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+ emptyConnBID = false // must be explicitly changed
+ version = nil // must be explicitly changed
+
+ tc.malleate()
+
+ connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+ if emptyConnBID {
+ connB.ID = ""
+ }
+ counterparty := types.NewCounterparty(clientB, connB.ID, suite.chainB.GetPrefix())
+
+ connectionID, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.ConnOpenInit(suite.chainA.GetContext(), clientA, counterparty, version, delayPeriod)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(types.FormatConnectionIdentifier(0), connectionID)
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Equal("", connectionID)
+ }
+ })
+ }
+}
+
+// TestConnOpenTry - chainB calls ConnOpenTry to verify the state of
+// connection on chainA is INIT
+func (suite *KeeperTestSuite) TestConnOpenTry() {
+ var (
+ clientA string
+ clientB string
+ delayPeriod uint64
+ previousConnectionID string
+ versions []exported.Version
+ consensusHeight exported.Height
+ counterpartyClient exported.ClientState
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"success", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+ }, true},
+ {"success with crossing hellos", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, connB, err := suite.coordinator.ConnOpenInitOnBothChains(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ previousConnectionID = connB.ID
+ }, true},
+ {"success with delay period", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ delayPeriod = uint64(time.Hour.Nanoseconds())
+
+ // set delay period on counterparty to non-zero value
+ conn := suite.chainA.GetConnection(connA)
+ conn.DelayPeriod = delayPeriod
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, conn)
+
+ // commit in order for proof to return correct value
+ suite.coordinator.CommitBlock(suite.chainA)
+ suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+ }, true},
+ {"invalid counterparty client", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ // Set an invalid client of chainA on chainB
+ tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClient.ChainId = "wrongchainid"
+
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient)
+ }, false},
+ {"consensus height >= latest height", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ consensusHeight = clienttypes.GetSelfHeight(suite.chainB.GetContext())
+ }, false},
+ {"self consensus state not found", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ consensusHeight = clienttypes.NewHeight(0, 1)
+ }, false},
+ {"counterparty versions is empty", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ versions = nil
+ }, false},
+ {"counterparty versions don't have a match", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ version := types.NewVersion("0.0", nil)
+ versions = []exported.Version{version}
+ }, false},
+ {"connection state verification failed", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ // chainA connection not created
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+ }, false},
+ {"client state verification failed", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ // modify counterparty client without setting in store so it still passes validate but fails proof verification
+ tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClient.LatestHeight = tmClient.LatestHeight.Increment().(clienttypes.Height)
+ }, false},
+ {"consensus state verification failed", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ // give chainA wrong consensus state for chainB
+ consState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ tmConsState, ok := consState.(*ibctmtypes.ConsensusState)
+ suite.Require().True(ok)
+
+ tmConsState.Timestamp = time.Now()
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, counterpartyClient.GetLatestHeight(), tmConsState)
+
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+ }, false},
+ {"invalid previous connection is in TRYOPEN", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // open init chainA
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // open try chainB
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ previousConnectionID = connB.ID
+ }, false},
+ {"invalid previous connection has invalid versions", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // open init chainA
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // open try chainB
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ // modify connB to be in INIT with incorrect versions
+ connection, found := suite.chainB.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainB.GetContext(), connB.ID)
+ suite.Require().True(found)
+
+ connection.State = types.INIT
+ connection.Versions = []*types.Version{{}}
+
+ suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection)
+
+ err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainA to pass as counterpartyClient
+ counterpartyClient = suite.chainA.GetClientState(clientA)
+
+ previousConnectionID = connB.ID
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+ consensusHeight = clienttypes.ZeroHeight() // must be explicitly changed in malleate
+ versions = types.GetCompatibleVersions() // must be explicitly changed in malleate
+ previousConnectionID = ""
+
+ tc.malleate()
+
+ connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
+ counterparty := types.NewCounterparty(clientA, connA.ID, suite.chainA.GetPrefix())
+
+ connectionKey := host.ConnectionKey(connA.ID)
+ proofInit, proofHeight := suite.chainA.QueryProof(connectionKey)
+
+ if consensusHeight.IsZero() {
+ // retrieve consensus state height to provide proof for
+ consensusHeight = counterpartyClient.GetLatestHeight()
+ }
+ consensusKey := host.FullConsensusStateKey(clientA, consensusHeight)
+ proofConsensus, _ := suite.chainA.QueryProof(consensusKey)
+
+ // retrieve proof of counterparty clientstate on chainA
+ clientKey := host.FullClientStateKey(clientA)
+ proofClient, _ := suite.chainA.QueryProof(clientKey)
+
+ connectionID, err := suite.chainB.App.IBCKeeper.ConnectionKeeper.ConnOpenTry(
+ suite.chainB.GetContext(), previousConnectionID, counterparty, delayPeriod, clientB, counterpartyClient,
+ versions, proofInit, proofClient, proofConsensus,
+ proofHeight, consensusHeight,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(types.FormatConnectionIdentifier(0), connectionID)
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Equal("", connectionID)
+ }
+ })
+ }
+}
+
+// TestConnOpenAck - Chain A (ID #1) calls TestConnOpenAck to acknowledge (ACK state)
+// the initialization (TRYINIT) of the connection on Chain B (ID #2).
+func (suite *KeeperTestSuite) TestConnOpenAck() {
+ var (
+ clientA string
+ clientB string
+ consensusHeight exported.Height
+ version *types.Version
+ counterpartyClient exported.ClientState
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"success", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+ }, true},
+ {"success from tryopen", func() {
+ // chainA is in TRYOPEN, chainB is in TRYOPEN
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainA, suite.chainB, connA, connB)
+ suite.Require().NoError(err)
+
+ // set chainB to TRYOPEN
+ connection := suite.chainB.GetConnection(connB)
+ connection.State = types.TRYOPEN
+ connection.Counterparty.ConnectionId = connA.ID
+ suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection)
+ // update clientB so state change is committed
+ suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+ }, true},
+ {"invalid counterparty client", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ // Set an invalid client of chainA on chainB
+ tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClient.ChainId = "wrongchainid"
+
+ suite.chainB.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainB.GetContext(), clientB, tmClient)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+ }, false},
+ {"consensus height >= latest height", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ consensusHeight = clienttypes.GetSelfHeight(suite.chainA.GetContext())
+ }, false},
+ {"connection not found", func() {
+ // connections are never created
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+ }, false},
+ {"invalid counterparty connection ID", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ // modify connB to set counterparty connection identifier to wrong identifier
+ connection, found := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID)
+ suite.Require().True(found)
+
+ connection.Counterparty.ConnectionId = "badconnectionid"
+
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection)
+
+ err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ suite.Require().NoError(err)
+ }, false},
+ {"connection state is not INIT", func() {
+ // connection state is already OPEN on chainA
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenAck(suite.chainA, suite.chainB, connA, connB)
+ suite.Require().NoError(err)
+ }, false},
+ {"connection is in INIT but the proposed version is invalid", func() {
+ // chainA is in INIT, chainB is in TRYOPEN
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ version = types.NewVersion("2.0", nil)
+ }, false},
+ {"connection is in TRYOPEN but the set version in the connection is invalid", func() {
+ // chainA is in TRYOPEN, chainB is in TRYOPEN
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainA, suite.chainB, connA, connB)
+ suite.Require().NoError(err)
+
+ // set chainB to TRYOPEN
+ connection := suite.chainB.GetConnection(connB)
+ connection.State = types.TRYOPEN
+ suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection)
+
+ // update clientB so state change is committed
+ suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ version = types.NewVersion("2.0", nil)
+ }, false},
+ {"incompatible IBC versions", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ // set version to a non-compatible version
+ version = types.NewVersion("2.0", nil)
+ }, false},
+ {"empty version", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ version = &types.Version{}
+ }, false},
+ {"feature set verification failed - unsupported feature", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ version = types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED", "ORDER_UNORDERED", "ORDER_DAG"})
+ }, false},
+ {"self consensus state not found", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ consensusHeight = clienttypes.NewHeight(0, 1)
+ }, false},
+ {"connection state verification failed", func() {
+ // chainB connection is not in INIT
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+ }, false},
+ {"client state verification failed", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ // modify counterparty client without setting in store so it still passes validate but fails proof verification
+ tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClient.LatestHeight = tmClient.LatestHeight.Increment().(clienttypes.Height)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+ }, false},
+ {"consensus state verification failed", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // retrieve client state of chainB to pass as counterpartyClient
+ counterpartyClient = suite.chainB.GetClientState(clientB)
+
+ // give chainB wrong consensus state for chainA
+ consState, found := suite.chainB.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), clientB)
+ suite.Require().True(found)
+
+ tmConsState, ok := consState.(*ibctmtypes.ConsensusState)
+ suite.Require().True(ok)
+
+ tmConsState.Timestamp = time.Now()
+ suite.chainB.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), clientB, counterpartyClient.GetLatestHeight(), tmConsState)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+ version = types.ExportedVersionsToProto(types.GetCompatibleVersions())[0] // must be explicitly changed in malleate
+ consensusHeight = clienttypes.ZeroHeight() // must be explicitly changed in malleate
+
+ tc.malleate()
+
+ connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
+ connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+
+ connectionKey := host.ConnectionKey(connB.ID)
+ proofTry, proofHeight := suite.chainB.QueryProof(connectionKey)
+
+ if consensusHeight.IsZero() {
+ // retrieve consensus state height to provide proof for
+ clientState := suite.chainB.GetClientState(clientB)
+ consensusHeight = clientState.GetLatestHeight()
+ }
+ consensusKey := host.FullConsensusStateKey(clientB, consensusHeight)
+ proofConsensus, _ := suite.chainB.QueryProof(consensusKey)
+
+ // retrieve proof of counterparty clientstate on chainA
+ clientKey := host.FullClientStateKey(clientB)
+ proofClient, _ := suite.chainB.QueryProof(clientKey)
+
+ err := suite.chainA.App.IBCKeeper.ConnectionKeeper.ConnOpenAck(
+ suite.chainA.GetContext(), connA.ID, counterpartyClient, version, connB.ID,
+ proofTry, proofClient, proofConsensus, proofHeight, consensusHeight,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestConnOpenConfirm - chainB calls ConnOpenConfirm to confirm that
+// chainA state is now OPEN.
+func (suite *KeeperTestSuite) TestConnOpenConfirm() {
+ var (
+ clientA string
+ clientB string
+ )
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"success", func() {
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenAck(suite.chainA, suite.chainB, connA, connB)
+ suite.Require().NoError(err)
+ }, true},
+ {"connection not found", func() {
+ // connections are never created
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ }, false},
+ {"chain B's connection state is not TRYOPEN", func() {
+ // connections are OPEN
+ clientA, clientB, _, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ }, false},
+ {"connection state verification failed", func() {
+ // chainA is in INIT
+ clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ suite.Require().NoError(err)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
+ connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+
+ connectionKey := host.ConnectionKey(connA.ID)
+ proofAck, proofHeight := suite.chainA.QueryProof(connectionKey)
+
+ err := suite.chainB.App.IBCKeeper.ConnectionKeeper.ConnOpenConfirm(
+ suite.chainB.GetContext(), connB.ID, proofAck, proofHeight,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/core/03-connection/keeper/keeper.go b/core/03-connection/keeper/keeper.go
new file mode 100644
index 00000000..66372686
--- /dev/null
+++ b/core/03-connection/keeper/keeper.go
@@ -0,0 +1,198 @@
+package keeper
+
+import (
+ "github.com/tendermint/tendermint/libs/log"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// Keeper defines the IBC connection keeper
+type Keeper struct {
+ // implements gRPC QueryServer interface
+ types.QueryServer
+
+ storeKey sdk.StoreKey
+ cdc codec.BinaryMarshaler
+ clientKeeper types.ClientKeeper
+}
+
+// NewKeeper creates a new IBC connection Keeper instance
+func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, ck types.ClientKeeper) Keeper {
+ return Keeper{
+ storeKey: key,
+ cdc: cdc,
+ clientKeeper: ck,
+ }
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper) Logger(ctx sdk.Context) log.Logger {
+ return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
+}
+
+// GetCommitmentPrefix returns the IBC connection store prefix as a commitment
+// Prefix
+func (k Keeper) GetCommitmentPrefix() exported.Prefix {
+ return commitmenttypes.NewMerklePrefix([]byte(k.storeKey.Name()))
+}
+
+// GenerateConnectionIdentifier returns the next connection identifier.
+func (k Keeper) GenerateConnectionIdentifier(ctx sdk.Context) string {
+ nextConnSeq := k.GetNextConnectionSequence(ctx)
+ connectionID := types.FormatConnectionIdentifier(nextConnSeq)
+
+ nextConnSeq++
+ k.SetNextConnectionSequence(ctx, nextConnSeq)
+ return connectionID
+}
+
+// GetConnection returns a connection with a particular identifier
+func (k Keeper) GetConnection(ctx sdk.Context, connectionID string) (types.ConnectionEnd, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.ConnectionKey(connectionID))
+ if bz == nil {
+ return types.ConnectionEnd{}, false
+ }
+
+ var connection types.ConnectionEnd
+ k.cdc.MustUnmarshalBinaryBare(bz, &connection)
+
+ return connection, true
+}
+
+// SetConnection sets a connection to the store
+func (k Keeper) SetConnection(ctx sdk.Context, connectionID string, connection types.ConnectionEnd) {
+ store := ctx.KVStore(k.storeKey)
+ bz := k.cdc.MustMarshalBinaryBare(&connection)
+ store.Set(host.ConnectionKey(connectionID), bz)
+}
+
+// GetTimestampAtHeight returns the timestamp in nanoseconds of the consensus state at the
+// given height.
+func (k Keeper) GetTimestampAtHeight(ctx sdk.Context, connection types.ConnectionEnd, height exported.Height) (uint64, error) {
+ consensusState, found := k.clientKeeper.GetClientConsensusState(
+ ctx, connection.GetClientID(), height,
+ )
+
+ if !found {
+ return 0, sdkerrors.Wrapf(
+ clienttypes.ErrConsensusStateNotFound,
+ "clientID (%s), height (%s)", connection.GetClientID(), height,
+ )
+ }
+
+ return consensusState.GetTimestamp(), nil
+}
+
+// GetClientConnectionPaths returns all the connection paths stored under a
+// particular client
+func (k Keeper) GetClientConnectionPaths(ctx sdk.Context, clientID string) ([]string, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.ClientConnectionsKey(clientID))
+ if bz == nil {
+ return nil, false
+ }
+
+ var clientPaths types.ClientPaths
+ k.cdc.MustUnmarshalBinaryBare(bz, &clientPaths)
+ return clientPaths.Paths, true
+}
+
+// SetClientConnectionPaths sets the connections paths for client
+func (k Keeper) SetClientConnectionPaths(ctx sdk.Context, clientID string, paths []string) {
+ store := ctx.KVStore(k.storeKey)
+ clientPaths := types.ClientPaths{Paths: paths}
+ bz := k.cdc.MustMarshalBinaryBare(&clientPaths)
+ store.Set(host.ClientConnectionsKey(clientID), bz)
+}
+
+// GetNextConnectionSequence gets the next connection sequence from the store.
+func (k Keeper) GetNextConnectionSequence(ctx sdk.Context) uint64 {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get([]byte(types.KeyNextConnectionSequence))
+ if bz == nil {
+ panic("next connection sequence is nil")
+ }
+
+ return sdk.BigEndianToUint64(bz)
+}
+
+// SetNextConnectionSequence sets the next connection sequence to the store.
+func (k Keeper) SetNextConnectionSequence(ctx sdk.Context, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ bz := sdk.Uint64ToBigEndian(sequence)
+ store.Set([]byte(types.KeyNextConnectionSequence), bz)
+}
+
+// GetAllClientConnectionPaths returns all stored clients connection id paths. It
+// will ignore the clients that haven't initialized a connection handshake since
+// no paths are stored.
+func (k Keeper) GetAllClientConnectionPaths(ctx sdk.Context) []types.ConnectionPaths {
+ var allConnectionPaths []types.ConnectionPaths
+ k.clientKeeper.IterateClients(ctx, func(clientID string, cs exported.ClientState) bool {
+ paths, found := k.GetClientConnectionPaths(ctx, clientID)
+ if !found {
+ // continue when connection handshake is not initialized
+ return false
+ }
+ connPaths := types.NewConnectionPaths(clientID, paths)
+ allConnectionPaths = append(allConnectionPaths, connPaths)
+ return false
+ })
+
+ return allConnectionPaths
+}
+
+// IterateConnections provides an iterator over all ConnectionEnd objects.
+// For each ConnectionEnd, cb will be called. If the cb returns true, the
+// iterator will close and stop.
+func (k Keeper) IterateConnections(ctx sdk.Context, cb func(types.IdentifiedConnection) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConnectionPrefix))
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ var connection types.ConnectionEnd
+ k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &connection)
+
+ connectionID := host.MustParseConnectionPath(string(iterator.Key()))
+ identifiedConnection := types.NewIdentifiedConnection(connectionID, connection)
+ if cb(identifiedConnection) {
+ break
+ }
+ }
+}
+
+// GetAllConnections returns all stored ConnectionEnd objects.
+func (k Keeper) GetAllConnections(ctx sdk.Context) (connections []types.IdentifiedConnection) {
+ k.IterateConnections(ctx, func(connection types.IdentifiedConnection) bool {
+ connections = append(connections, connection)
+ return false
+ })
+ return connections
+}
+
+// addConnectionToClient is used to add a connection identifier to the set of
+// connections associated with a client.
+func (k Keeper) addConnectionToClient(ctx sdk.Context, clientID, connectionID string) error {
+ _, found := k.clientKeeper.GetClientState(ctx, clientID)
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ conns, found := k.GetClientConnectionPaths(ctx, clientID)
+ if !found {
+ conns = []string{}
+ }
+
+ conns = append(conns, connectionID)
+ k.SetClientConnectionPaths(ctx, clientID, conns)
+ return nil
+}
diff --git a/core/03-connection/keeper/keeper_test.go b/core/03-connection/keeper/keeper_test.go
new file mode 100644
index 00000000..f2a1124b
--- /dev/null
+++ b/core/03-connection/keeper/keeper_test.go
@@ -0,0 +1,133 @@
+package keeper_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+func (suite *KeeperTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+
+func (suite *KeeperTestSuite) TestSetAndGetConnection() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
+ _, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID)
+ suite.Require().False(existed)
+
+ suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
+ _, existed = suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID)
+ suite.Require().True(existed)
+}
+
+func (suite *KeeperTestSuite) TestSetAndGetClientConnectionPaths() {
+ clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ _, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), clientA)
+ suite.False(existed)
+
+ connections := []string{"connectionA", "connectionB"}
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), clientA, connections)
+ paths, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), clientA)
+ suite.True(existed)
+ suite.EqualValues(connections, paths)
+}
+
+// create 2 connections: A0 - B0, A1 - B1
+func (suite KeeperTestSuite) TestGetAllConnections() {
+ clientA, clientB, connA0, connB0 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ connA1, connB1 := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
+
+ counterpartyB0 := types.NewCounterparty(clientB, connB0.ID, suite.chainB.GetPrefix()) // connection B0
+ counterpartyB1 := types.NewCounterparty(clientB, connB1.ID, suite.chainB.GetPrefix()) // connection B1
+
+ conn1 := types.NewConnectionEnd(types.OPEN, clientA, counterpartyB0, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A0 - B0
+ conn2 := types.NewConnectionEnd(types.OPEN, clientA, counterpartyB1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A1 - B1
+
+ iconn1 := types.NewIdentifiedConnection(connA0.ID, conn1)
+ iconn2 := types.NewIdentifiedConnection(connA1.ID, conn2)
+
+ expConnections := []types.IdentifiedConnection{iconn1, iconn2}
+
+ connections := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetAllConnections(suite.chainA.GetContext())
+ suite.Require().Len(connections, len(expConnections))
+ suite.Require().Equal(expConnections, connections)
+}
+
+// the test creates 2 clients clientA0 and clientA1. clientA0 has a single
+// connection and clientA1 has 2 connections.
+func (suite KeeperTestSuite) TestGetAllClientConnectionPaths() {
+ clientA0, _, connA0, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ clientA1, clientB1, connA1, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ connA2, _ := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA1, clientB1)
+
+ expPaths := []types.ConnectionPaths{
+ types.NewConnectionPaths(clientA0, []string{connA0.ID}),
+ types.NewConnectionPaths(clientA1, []string{connA1.ID, connA2.ID}),
+ }
+
+ connPaths := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetAllClientConnectionPaths(suite.chainA.GetContext())
+ suite.Require().Len(connPaths, 2)
+ suite.Require().Equal(expPaths, connPaths)
+}
+
+// TestGetTimestampAtHeight verifies if the clients on each chain return the
+// correct timestamp for the other chain.
+func (suite *KeeperTestSuite) TestGetTimestampAtHeight() {
+ var connection types.ConnectionEnd
+
+ cases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"verification success", func() {
+ _, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ connection = suite.chainA.GetConnection(connA)
+ }, true},
+ {"consensus state not found", func() {
+ // any non-nil value of connection is valid
+ suite.Require().NotNil(connection)
+ }, false},
+ }
+
+ for _, tc := range cases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ actualTimestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight(
+ suite.chainA.GetContext(), connection, suite.chainB.LastHeader.GetHeight(),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().EqualValues(uint64(suite.chainB.LastHeader.GetTime().UnixNano()), actualTimestamp)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/core/03-connection/keeper/verify.go b/core/03-connection/keeper/verify.go
new file mode 100644
index 00000000..ddb1ea6b
--- /dev/null
+++ b/core/03-connection/keeper/verify.go
@@ -0,0 +1,225 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// VerifyClientState verifies a proof of a client state of the running machine
+// stored on the target machine
+func (k Keeper) VerifyClientState(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ clientState exported.ClientState,
+) error {
+ clientID := connection.GetClientID()
+ targetClient, found := k.clientKeeper.GetClientState(ctx, clientID)
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if err := targetClient.VerifyClientState(
+ k.clientKeeper.ClientStore(ctx, clientID), k.cdc, height,
+ connection.GetCounterparty().GetPrefix(), connection.GetCounterparty().GetClientID(), proof, clientState); err != nil {
+ return sdkerrors.Wrapf(err, "failed client state verification for target client: %s", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyClientConsensusState verifies a proof of the consensus state of the
+// specified client stored on the target machine.
+func (k Keeper) VerifyClientConsensusState(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ consensusHeight exported.Height,
+ proof []byte,
+ consensusState exported.ConsensusState,
+) error {
+ clientID := connection.GetClientID()
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if err := clientState.VerifyClientConsensusState(
+ k.clientKeeper.ClientStore(ctx, clientID), k.cdc, height,
+ connection.GetCounterparty().GetClientID(), consensusHeight, connection.GetCounterparty().GetPrefix(), proof, consensusState,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed consensus state verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyConnectionState verifies a proof of the connection state of the
+// specified connection end stored on the target machine.
+func (k Keeper) VerifyConnectionState(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ connectionID string,
+ connectionEnd exported.ConnectionI, // opposite connection
+) error {
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ }
+
+ if err := clientState.VerifyConnectionState(
+ k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ connection.GetCounterparty().GetPrefix(), proof, connectionID, connectionEnd,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed connection state verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyChannelState verifies a proof of the channel state of the specified
+// channel end, under the specified port, stored on the target machine.
+func (k Keeper) VerifyChannelState(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ channel exported.ChannelI,
+) error {
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ }
+
+ if err := clientState.VerifyChannelState(
+ k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ connection.GetCounterparty().GetPrefix(), proof,
+ portID, channelID, channel,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed channel state verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
+// the specified port, specified channel, and specified sequence.
+func (k Keeper) VerifyPacketCommitment(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ commitmentBytes []byte,
+) error {
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ }
+
+ if err := clientState.VerifyPacketCommitment(
+ k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
+ sequence, commitmentBytes,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed packet commitment verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyPacketAcknowledgement verifies a proof of an incoming packet
+// acknowledgement at the specified port, specified channel, and specified sequence.
+func (k Keeper) VerifyPacketAcknowledgement(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ acknowledgement []byte,
+) error {
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ }
+
+ if err := clientState.VerifyPacketAcknowledgement(
+ k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
+ sequence, acknowledgement,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed packet acknowledgement verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyPacketReceiptAbsence verifies a proof of the absence of an
+// incoming packet receipt at the specified port, specified channel, and
+// specified sequence.
+func (k Keeper) VerifyPacketReceiptAbsence(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+) error {
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ }
+
+ if err := clientState.VerifyPacketReceiptAbsence(
+ k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
+ sequence,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed packet receipt absence verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
+
+// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
+// received of the specified channel at the specified port.
+func (k Keeper) VerifyNextSequenceRecv(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ nextSequenceRecv uint64,
+) error {
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ if !found {
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ }
+
+ if err := clientState.VerifyNextSequenceRecv(
+ k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
+ nextSequenceRecv,
+ ); err != nil {
+ return sdkerrors.Wrapf(err, "failed next sequence receive verification for client (%s)", connection.GetClientID())
+ }
+
+ return nil
+}
diff --git a/core/03-connection/keeper/verify_test.go b/core/03-connection/keeper/verify_test.go
new file mode 100644
index 00000000..2d94955d
--- /dev/null
+++ b/core/03-connection/keeper/verify_test.go
@@ -0,0 +1,514 @@
+package keeper_test
+
+import (
+ "fmt"
+ "time"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+var defaultTimeoutHeight = clienttypes.NewHeight(0, 100000)
+
+// TestVerifyClientState verifies a client state of chainA
+// stored on clientB (which is on chainB)
+func (suite *KeeperTestSuite) TestVerifyClientState() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ heightDiff uint64
+ malleateCounterparty bool
+ expPass bool
+ }{
+ {"verification success", false, 0, false, true},
+ {"client state not found", true, 0, false, false},
+ {"consensus state for proof height not found", false, 5, false, false},
+ {"verification failed", false, 0, true, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ _, clientB, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ counterpartyClient, clientProof := suite.chainB.QueryClientStateProof(clientB)
+ proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1))
+
+ if tc.malleateCounterparty {
+ tmClient, _ := counterpartyClient.(*ibctmtypes.ClientState)
+ tmClient.ChainId = "wrongChainID"
+ }
+
+ connection := suite.chainA.GetConnection(connA)
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyClientState(
+ suite.chainA.GetContext(), connection,
+ malleateHeight(proofHeight, tc.heightDiff), clientProof, counterpartyClient,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyClientConsensusState verifies that the consensus state of
+// chainA stored on clientB (which is on chainB) matches the consensus
+// state for chainA at that height.
+func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ changeClientID bool
+ heightDiff uint64
+ )
+ cases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {"verification success", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ }, true},
+ {"client state not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ changeClientID = true
+ }, false},
+ {"consensus state not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ heightDiff = 5
+ }, false},
+ {"verification failed", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ clientB := connB.ClientID
+ clientState := suite.chainB.GetClientState(clientB)
+
+ // give chainB wrong consensus state for chainA
+ consState, found := suite.chainB.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), clientB)
+ suite.Require().True(found)
+
+ tmConsState, ok := consState.(*ibctmtypes.ConsensusState)
+ suite.Require().True(ok)
+
+ tmConsState.Timestamp = time.Now()
+ suite.chainB.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), clientB, clientState.GetLatestHeight(), tmConsState)
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ }, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+ heightDiff = 0 // must be explicitly changed in malleate
+ changeClientID = false // must be explicitly changed in malleate
+
+ tc.malleate()
+
+ connection := suite.chainA.GetConnection(connA)
+ if changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ proof, consensusHeight := suite.chainB.QueryConsensusStateProof(connB.ClientID)
+ proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1))
+ consensusState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetSelfConsensusState(suite.chainA.GetContext(), consensusHeight)
+ suite.Require().True(found)
+
+ err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyClientConsensusState(
+ suite.chainA.GetContext(), connection,
+ malleateHeight(proofHeight, heightDiff), consensusHeight, proof, consensusState,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyConnectionState verifies the connection state of the connection
+// on chainB. The connections on chainA and chainB are fully opened.
+func (suite *KeeperTestSuite) TestVerifyConnectionState() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ changeConnectionState bool
+ heightDiff uint64
+ expPass bool
+ }{
+ {"verification success", false, false, 0, true},
+ {"client state not found - changed client ID", true, false, 0, false},
+ {"consensus state not found - increased proof height", false, false, 5, false},
+ {"verification failed - connection state is different than proof", false, true, 0, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ connection := suite.chainA.GetConnection(connA)
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+ expectedConnection := suite.chainB.GetConnection(connB)
+
+ connectionKey := host.ConnectionKey(connB.ID)
+ proof, proofHeight := suite.chainB.QueryProof(connectionKey)
+
+ if tc.changeConnectionState {
+ expectedConnection.State = types.TRYOPEN
+ }
+
+ err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyConnectionState(
+ suite.chainA.GetContext(), connection,
+ malleateHeight(proofHeight, tc.heightDiff), proof, connB.ID, expectedConnection,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyChannelState verifies the channel state of the channel on
+// chainB. The channels on chainA and chainB are fully opened.
+func (suite *KeeperTestSuite) TestVerifyChannelState() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ changeChannelState bool
+ heightDiff uint64
+ expPass bool
+ }{
+ {"verification success", false, false, 0, true},
+ {"client state not found- changed client ID", true, false, 0, false},
+ {"consensus state not found - increased proof height", false, false, 5, false},
+ {"verification failed - changed channel state", false, true, 0, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ _, _, connA, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ connection := suite.chainA.GetConnection(connA)
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ channelKey := host.ChannelKey(channelB.PortID, channelB.ID)
+ proof, proofHeight := suite.chainB.QueryProof(channelKey)
+
+ channel := suite.chainB.GetChannel(channelB)
+ if tc.changeChannelState {
+ channel.State = channeltypes.TRYOPEN
+ }
+
+ err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyChannelState(
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ channelB.PortID, channelB.ID, channel,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyPacketCommitmentState has chainB verify the packet commitment
+// on channelA. The channels on chainA and chainB are fully opened and a
+// packet is sent from chainA to chainB, but has not been received.
+func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ changePacketCommitmentState bool
+ heightDiff uint64
+ delayPeriod uint64
+ expPass bool
+ }{
+ {"verification success", false, false, 0, 0, true},
+ {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true},
+ {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false},
+ {"client state not found- changed client ID", true, false, 0, 0, false},
+ {"consensus state not found - increased proof height", false, false, 5, 0, false},
+ {"verification failed - changed packet commitment state", false, true, 0, 0, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ _, clientB, _, connB, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ connection := suite.chainB.GetConnection(connB)
+ connection.DelayPeriod = tc.delayPeriod
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ commitmentKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainA.QueryProof(commitmentKey)
+
+ if tc.changePacketCommitmentState {
+ packet.Data = []byte(ibctesting.InvalidID)
+ }
+
+ commitment := channeltypes.CommitPacket(suite.chainB.App.IBCKeeper.Codec(), packet)
+ err = suite.chainB.App.IBCKeeper.ConnectionKeeper.VerifyPacketCommitment(
+ suite.chainB.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyPacketAcknowledgement has chainA verify the acknowledgement on
+// channelB. The channels on chainA and chainB are fully opened and a packet
+// is sent from chainA to chainB and received.
+func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ changeAcknowledgement bool
+ heightDiff uint64
+ delayPeriod uint64
+ expPass bool
+ }{
+ {"verification success", false, false, 0, 0, true},
+ {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true},
+ {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false},
+ {"client state not found- changed client ID", true, false, 0, 0, false},
+ {"consensus state not found - increased proof height", false, false, 5, 0, false},
+ {"verification failed - changed acknowledgement", false, true, 0, 0, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ connection := suite.chainA.GetConnection(connA)
+ connection.DelayPeriod = tc.delayPeriod
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ // send and receive packet
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // increment receiving chain's (chainB) time by 2 hour to always pass receive
+ suite.coordinator.IncrementTimeBy(time.Hour * 2)
+ suite.coordinator.CommitBlock(suite.chainB)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ packetAckKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainB.QueryProof(packetAckKey)
+
+ ack := ibcmock.MockAcknowledgement
+ if tc.changeAcknowledgement {
+ ack = []byte(ibctesting.InvalidID)
+ }
+
+ err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketAcknowledgement(
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyPacketReceiptAbsence has chainA verify the receipt
+// absence on channelB. The channels on chainA and chainB are fully opened and
+// a packet is sent from chainA to chainB and not received.
+func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ recvAck bool
+ heightDiff uint64
+ delayPeriod uint64
+ expPass bool
+ }{
+ {"verification success", false, false, 0, 0, true},
+ {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true},
+ {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false},
+ {"client state not found - changed client ID", true, false, 0, 0, false},
+ {"consensus state not found - increased proof height", false, false, 5, 0, false},
+ {"verification failed - acknowledgement was received", false, true, 0, 0, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ connection := suite.chainA.GetConnection(connA)
+ connection.DelayPeriod = tc.delayPeriod
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ // send, only receive if specified
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ if tc.recvAck {
+ // increment receiving chain's (chainB) time by 2 hour to always pass receive
+ suite.coordinator.IncrementTimeBy(time.Hour * 2)
+ suite.coordinator.CommitBlock(suite.chainB)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ } else {
+ // need to update height to prove absence
+ suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }
+
+ packetReceiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainB.QueryProof(packetReceiptKey)
+
+ err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketReceiptAbsence(
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestVerifyNextSequenceRecv has chainA verify the next sequence receive on
+// channelB. The channels on chainA and chainB are fully opened and a packet
+// is sent from chainA to chainB and received.
+func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
+ cases := []struct {
+ msg string
+ changeClientID bool
+ offsetSeq uint64
+ heightDiff uint64
+ delayPeriod uint64
+ expPass bool
+ }{
+ {"verification success", false, 0, 0, 0, true},
+ {"verification success: delay period passed", false, 0, 0, uint64(1 * time.Second.Nanoseconds()), true},
+ {"delay period has not passed", false, 0, 0, uint64(1 * time.Hour.Nanoseconds()), false},
+ {"client state not found- changed client ID", true, 0, 0, 0, false},
+ {"consensus state not found - increased proof height", false, 0, 5, 0, false},
+ {"verification failed - wrong expected next seq recv", false, 1, 0, 0, false},
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.msg, func() {
+ suite.SetupTest() // reset
+
+ clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ connection := suite.chainA.GetConnection(connA)
+ connection.DelayPeriod = tc.delayPeriod
+ if tc.changeClientID {
+ connection.ClientId = ibctesting.InvalidID
+ }
+
+ // send and receive packet
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // increment receiving chain's (chainB) time by 2 hour to always pass receive
+ suite.coordinator.IncrementTimeBy(time.Hour * 2)
+ suite.coordinator.CommitBlock(suite.chainB)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ proof, proofHeight := suite.chainB.QueryProof(nextSeqRecvKey)
+
+ err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyNextSequenceRecv(
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+tc.offsetSeq,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func malleateHeight(height exported.Height, diff uint64) exported.Height {
+ return clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+diff)
+}
diff --git a/core/03-connection/module.go b/core/03-connection/module.go
new file mode 100644
index 00000000..6100caa4
--- /dev/null
+++ b/core/03-connection/module.go
@@ -0,0 +1,29 @@
+package connection
+
+import (
+ "github.com/gogo/protobuf/grpc"
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+)
+
+// Name returns the IBC connection ICS name.
+func Name() string {
+ return types.SubModuleName
+}
+
+// GetTxCmd returns the root tx command for the IBC connections.
+func GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd()
+}
+
+// GetQueryCmd returns the root query command for the IBC connections.
+func GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd()
+}
+
+// RegisterQueryService registers the gRPC query service for IBC connections.
+func RegisterQueryService(server grpc.Server, queryServer types.QueryServer) {
+ types.RegisterQueryServer(server, queryServer)
+}
diff --git a/core/03-connection/simulation/decoder.go b/core/03-connection/simulation/decoder.go
new file mode 100644
index 00000000..ef988a10
--- /dev/null
+++ b/core/03-connection/simulation/decoder.go
@@ -0,0 +1,32 @@
+package simulation
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
+// Value to the corresponding connection type.
+func NewDecodeStore(cdc codec.BinaryMarshaler, kvA, kvB kv.Pair) (string, bool) {
+ switch {
+ case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.HasSuffix(kvA.Key, []byte(host.KeyConnectionPrefix)):
+ var clientConnectionsA, clientConnectionsB types.ClientPaths
+ cdc.MustUnmarshalBinaryBare(kvA.Value, &clientConnectionsA)
+ cdc.MustUnmarshalBinaryBare(kvB.Value, &clientConnectionsB)
+ return fmt.Sprintf("ClientPaths A: %v\nClientPaths B: %v", clientConnectionsA, clientConnectionsB), true
+
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyConnectionPrefix)):
+ var connectionA, connectionB types.ConnectionEnd
+ cdc.MustUnmarshalBinaryBare(kvA.Value, &connectionA)
+ cdc.MustUnmarshalBinaryBare(kvB.Value, &connectionB)
+ return fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connectionA, connectionB), true
+
+ default:
+ return "", false
+ }
+}
diff --git a/core/03-connection/simulation/decoder_test.go b/core/03-connection/simulation/decoder_test.go
new file mode 100644
index 00000000..673bf640
--- /dev/null
+++ b/core/03-connection/simulation/decoder_test.go
@@ -0,0 +1,69 @@
+package simulation_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+func TestDecodeStore(t *testing.T) {
+ app := simapp.Setup(false)
+ cdc := app.AppCodec()
+
+ connectionID := "connectionidone"
+
+ connection := types.ConnectionEnd{
+ ClientId: "clientidone",
+ Versions: types.ExportedVersionsToProto(types.GetCompatibleVersions()),
+ }
+
+ paths := types.ClientPaths{
+ Paths: []string{connectionID},
+ }
+
+ kvPairs := kv.Pairs{
+ Pairs: []kv.Pair{
+ {
+ Key: host.ClientConnectionsKey(connection.ClientId),
+ Value: cdc.MustMarshalBinaryBare(&paths),
+ },
+ {
+ Key: host.ConnectionKey(connectionID),
+ Value: cdc.MustMarshalBinaryBare(&connection),
+ },
+ {
+ Key: []byte{0x99},
+ Value: []byte{0x99},
+ },
+ },
+ }
+ tests := []struct {
+ name string
+ expectedLog string
+ }{
+ {"ClientPaths", fmt.Sprintf("ClientPaths A: %v\nClientPaths B: %v", paths, paths)},
+ {"ConnectionEnd", fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connection, connection)},
+ {"other", ""},
+ }
+
+ for i, tt := range tests {
+ i, tt := i, tt
+ t.Run(tt.name, func(t *testing.T) {
+ res, found := simulation.NewDecodeStore(cdc, kvPairs.Pairs[i], kvPairs.Pairs[i])
+ if i == len(tests)-1 {
+ require.False(t, found, string(kvPairs.Pairs[i].Key))
+ require.Empty(t, res, string(kvPairs.Pairs[i].Key))
+ } else {
+ require.True(t, found, string(kvPairs.Pairs[i].Key))
+ require.Equal(t, tt.expectedLog, res, string(kvPairs.Pairs[i].Key))
+ }
+ })
+ }
+}
diff --git a/core/03-connection/simulation/genesis.go b/core/03-connection/simulation/genesis.go
new file mode 100644
index 00000000..43b08237
--- /dev/null
+++ b/core/03-connection/simulation/genesis.go
@@ -0,0 +1,13 @@
+package simulation
+
+import (
+ "math/rand"
+
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+)
+
+// GenConnectionGenesis returns the default connection genesis state.
+func GenConnectionGenesis(_ *rand.Rand, _ []simtypes.Account) types.GenesisState {
+ return types.DefaultGenesisState()
+}
diff --git a/core/03-connection/types/codec.go b/core/03-connection/types/codec.go
new file mode 100644
index 00000000..6105fa9e
--- /dev/null
+++ b/core/03-connection/types/codec.go
@@ -0,0 +1,47 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf
+// Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterInterface(
+ "ibc.core.connection.v1.ConnectionI",
+ (*exported.ConnectionI)(nil),
+ &ConnectionEnd{},
+ )
+ registry.RegisterInterface(
+ "ibc.core.connection.v1.CounterpartyConnectionI",
+ (*exported.CounterpartyConnectionI)(nil),
+ &Counterparty{},
+ )
+ registry.RegisterInterface(
+ "ibc.core.connection.v1.Version",
+ (*exported.Version)(nil),
+ &Version{},
+ )
+ registry.RegisterImplementations(
+ (*sdk.Msg)(nil),
+ &MsgConnectionOpenInit{},
+ &MsgConnectionOpenTry{},
+ &MsgConnectionOpenAck{},
+ &MsgConnectionOpenConfirm{},
+ )
+
+ msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
+}
+
+var (
+ // SubModuleCdc references the global x/ibc/core/03-connection module codec. Note, the codec should
+ // ONLY be used in certain instances of tests and for JSON encoding.
+ //
+ // The actual codec used for serialization should be provided to x/ibc/core/03-connection and
+ // defined at the application level.
+ SubModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
+)
diff --git a/core/03-connection/types/connection.go b/core/03-connection/types/connection.go
new file mode 100644
index 00000000..197af83c
--- /dev/null
+++ b/core/03-connection/types/connection.go
@@ -0,0 +1,127 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.ConnectionI = (*ConnectionEnd)(nil)
+
+// NewConnectionEnd creates a new ConnectionEnd instance.
+func NewConnectionEnd(state State, clientID string, counterparty Counterparty, versions []*Version, delayPeriod uint64) ConnectionEnd {
+ return ConnectionEnd{
+ ClientId: clientID,
+ Versions: versions,
+ State: state,
+ Counterparty: counterparty,
+ DelayPeriod: delayPeriod,
+ }
+}
+
+// GetState implements the Connection interface
+func (c ConnectionEnd) GetState() int32 {
+ return int32(c.State)
+}
+
+// GetClientID implements the Connection interface
+func (c ConnectionEnd) GetClientID() string {
+ return c.ClientId
+}
+
+// GetCounterparty implements the Connection interface
+func (c ConnectionEnd) GetCounterparty() exported.CounterpartyConnectionI {
+ return c.Counterparty
+}
+
+// GetVersions implements the Connection interface
+func (c ConnectionEnd) GetVersions() []exported.Version {
+ return ProtoVersionsToExported(c.Versions)
+}
+
+// GetDelayPeriod implements the Connection interface
+func (c ConnectionEnd) GetDelayPeriod() uint64 {
+ return c.DelayPeriod
+}
+
+// ValidateBasic implements the Connection interface.
+// NOTE: the protocol supports that the connection and client IDs match the
+// counterparty's.
+func (c ConnectionEnd) ValidateBasic() error {
+ if err := host.ClientIdentifierValidator(c.ClientId); err != nil {
+ return sdkerrors.Wrap(err, "invalid client ID")
+ }
+ if len(c.Versions) == 0 {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidVersion, "empty connection versions")
+ }
+ for _, version := range c.Versions {
+ if err := ValidateVersion(version); err != nil {
+ return err
+ }
+ }
+ return c.Counterparty.ValidateBasic()
+}
+
+var _ exported.CounterpartyConnectionI = (*Counterparty)(nil)
+
+// NewCounterparty creates a new Counterparty instance.
+func NewCounterparty(clientID, connectionID string, prefix commitmenttypes.MerklePrefix) Counterparty {
+ return Counterparty{
+ ClientId: clientID,
+ ConnectionId: connectionID,
+ Prefix: prefix,
+ }
+}
+
+// GetClientID implements the CounterpartyConnectionI interface
+func (c Counterparty) GetClientID() string {
+ return c.ClientId
+}
+
+// GetConnectionID implements the CounterpartyConnectionI interface
+func (c Counterparty) GetConnectionID() string {
+ return c.ConnectionId
+}
+
+// GetPrefix implements the CounterpartyConnectionI interface
+func (c Counterparty) GetPrefix() exported.Prefix {
+ return &c.Prefix
+}
+
+// ValidateBasic performs a basic validation check of the identifiers and prefix
+func (c Counterparty) ValidateBasic() error {
+ if c.ConnectionId != "" {
+ if err := host.ConnectionIdentifierValidator(c.ConnectionId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty connection ID")
+ }
+ }
+ if err := host.ClientIdentifierValidator(c.ClientId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty client ID")
+ }
+ if c.Prefix.Empty() {
+ return sdkerrors.Wrap(ErrInvalidCounterparty, "counterparty prefix cannot be empty")
+ }
+ return nil
+}
+
+// NewIdentifiedConnection creates a new IdentifiedConnection instance
+func NewIdentifiedConnection(connectionID string, conn ConnectionEnd) IdentifiedConnection {
+ return IdentifiedConnection{
+ Id: connectionID,
+ ClientId: conn.ClientId,
+ Versions: conn.Versions,
+ State: conn.State,
+ Counterparty: conn.Counterparty,
+ DelayPeriod: conn.DelayPeriod,
+ }
+}
+
+// ValidateBasic performs a basic validation of the connection identifier and connection fields.
+func (ic IdentifiedConnection) ValidateBasic() error {
+ if err := host.ConnectionIdentifierValidator(ic.Id); err != nil {
+ return sdkerrors.Wrap(err, "invalid connection ID")
+ }
+ connection := NewConnectionEnd(ic.State, ic.ClientId, ic.Counterparty, ic.Versions, ic.DelayPeriod)
+ return connection.ValidateBasic()
+}
diff --git a/core/03-connection/types/connection.pb.go b/core/03-connection/types/connection.pb.go
new file mode 100644
index 00000000..ec417b75
--- /dev/null
+++ b/core/03-connection/types/connection.pb.go
@@ -0,0 +1,1801 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/connection/v1/connection.proto
+
+package types
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/ibc-go/core/23-commitment/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// State defines if a connection is in one of the following states:
+// INIT, TRYOPEN, OPEN or UNINITIALIZED.
+type State int32
+
+const (
+ // Default State
+ UNINITIALIZED State = 0
+ // A connection end has just started the opening handshake.
+ INIT State = 1
+ // A connection end has acknowledged the handshake step on the counterparty
+ // chain.
+ TRYOPEN State = 2
+ // A connection end has completed the handshake.
+ OPEN State = 3
+)
+
+var State_name = map[int32]string{
+ 0: "STATE_UNINITIALIZED_UNSPECIFIED",
+ 1: "STATE_INIT",
+ 2: "STATE_TRYOPEN",
+ 3: "STATE_OPEN",
+}
+
+var State_value = map[string]int32{
+ "STATE_UNINITIALIZED_UNSPECIFIED": 0,
+ "STATE_INIT": 1,
+ "STATE_TRYOPEN": 2,
+ "STATE_OPEN": 3,
+}
+
+func (x State) String() string {
+ return proto.EnumName(State_name, int32(x))
+}
+
+func (State) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{0}
+}
+
+// ConnectionEnd defines a stateful object on a chain connected to another
+// separate one.
+// NOTE: there must only be 2 defined ConnectionEnds to establish
+// a connection between two chains.
+type ConnectionEnd struct {
+ // client associated with this connection.
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // IBC version which can be utilised to determine encodings or protocols for
+ // channels or packets utilising this connection.
+ Versions []*Version `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty"`
+ // current state of the connection end.
+ State State `protobuf:"varint,3,opt,name=state,proto3,enum=ibcgo.core.connection.v1.State" json:"state,omitempty"`
+ // counterparty chain associated with this connection.
+ Counterparty Counterparty `protobuf:"bytes,4,opt,name=counterparty,proto3" json:"counterparty"`
+ // delay period that must pass before a consensus state can be used for
+ // packet-verification NOTE: delay period logic is only implemented by some
+ // clients.
+ DelayPeriod uint64 `protobuf:"varint,5,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"`
+}
+
+func (m *ConnectionEnd) Reset() { *m = ConnectionEnd{} }
+func (m *ConnectionEnd) String() string { return proto.CompactTextString(m) }
+func (*ConnectionEnd) ProtoMessage() {}
+func (*ConnectionEnd) Descriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{0}
+}
+func (m *ConnectionEnd) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConnectionEnd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConnectionEnd.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConnectionEnd) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionEnd.Merge(m, src)
+}
+func (m *ConnectionEnd) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConnectionEnd) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionEnd.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionEnd proto.InternalMessageInfo
+
+// IdentifiedConnection defines a connection with additional connection
+// identifier field.
+type IdentifiedConnection struct {
+ // connection identifier.
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" yaml:"id"`
+ // client associated with this connection.
+ ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // IBC version which can be utilised to determine encodings or protocols for
+ // channels or packets utilising this connection
+ Versions []*Version `protobuf:"bytes,3,rep,name=versions,proto3" json:"versions,omitempty"`
+ // current state of the connection end.
+ State State `protobuf:"varint,4,opt,name=state,proto3,enum=ibcgo.core.connection.v1.State" json:"state,omitempty"`
+ // counterparty chain associated with this connection.
+ Counterparty Counterparty `protobuf:"bytes,5,opt,name=counterparty,proto3" json:"counterparty"`
+ // delay period associated with this connection.
+ DelayPeriod uint64 `protobuf:"varint,6,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"`
+}
+
+func (m *IdentifiedConnection) Reset() { *m = IdentifiedConnection{} }
+func (m *IdentifiedConnection) String() string { return proto.CompactTextString(m) }
+func (*IdentifiedConnection) ProtoMessage() {}
+func (*IdentifiedConnection) Descriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{1}
+}
+func (m *IdentifiedConnection) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IdentifiedConnection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_IdentifiedConnection.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *IdentifiedConnection) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IdentifiedConnection.Merge(m, src)
+}
+func (m *IdentifiedConnection) XXX_Size() int {
+ return m.Size()
+}
+func (m *IdentifiedConnection) XXX_DiscardUnknown() {
+ xxx_messageInfo_IdentifiedConnection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IdentifiedConnection proto.InternalMessageInfo
+
+// Counterparty defines the counterparty chain associated with a connection end.
+type Counterparty struct {
+ // identifies the client on the counterparty chain associated with a given
+ // connection.
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // identifies the connection end on the counterparty chain associated with a
+ // given connection.
+ ConnectionId string `protobuf:"bytes,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"`
+ // commitment merkle prefix of the counterparty chain.
+ Prefix types.MerklePrefix `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix"`
+}
+
+func (m *Counterparty) Reset() { *m = Counterparty{} }
+func (m *Counterparty) String() string { return proto.CompactTextString(m) }
+func (*Counterparty) ProtoMessage() {}
+func (*Counterparty) Descriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{2}
+}
+func (m *Counterparty) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Counterparty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Counterparty.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Counterparty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counterparty.Merge(m, src)
+}
+func (m *Counterparty) XXX_Size() int {
+ return m.Size()
+}
+func (m *Counterparty) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counterparty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counterparty proto.InternalMessageInfo
+
+// ClientPaths define all the connection paths for a client state.
+type ClientPaths struct {
+ // list of connection paths
+ Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+func (m *ClientPaths) Reset() { *m = ClientPaths{} }
+func (m *ClientPaths) String() string { return proto.CompactTextString(m) }
+func (*ClientPaths) ProtoMessage() {}
+func (*ClientPaths) Descriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{3}
+}
+func (m *ClientPaths) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientPaths.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientPaths) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientPaths.Merge(m, src)
+}
+func (m *ClientPaths) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientPaths) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientPaths.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientPaths proto.InternalMessageInfo
+
+func (m *ClientPaths) GetPaths() []string {
+ if m != nil {
+ return m.Paths
+ }
+ return nil
+}
+
+// ConnectionPaths define all the connection paths for a given client state.
+type ConnectionPaths struct {
+ // client state unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // list of connection paths
+ Paths []string `protobuf:"bytes,2,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+func (m *ConnectionPaths) Reset() { *m = ConnectionPaths{} }
+func (m *ConnectionPaths) String() string { return proto.CompactTextString(m) }
+func (*ConnectionPaths) ProtoMessage() {}
+func (*ConnectionPaths) Descriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{4}
+}
+func (m *ConnectionPaths) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConnectionPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConnectionPaths.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConnectionPaths) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionPaths.Merge(m, src)
+}
+func (m *ConnectionPaths) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConnectionPaths) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionPaths.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionPaths proto.InternalMessageInfo
+
+func (m *ConnectionPaths) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *ConnectionPaths) GetPaths() []string {
+ if m != nil {
+ return m.Paths
+ }
+ return nil
+}
+
+// Version defines the versioning scheme used to negotiate the IBC verison in
+// the connection handshake.
+type Version struct {
+ // unique version identifier
+ Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ // list of features compatible with the specified identifier
+ Features []string `protobuf:"bytes,2,rep,name=features,proto3" json:"features,omitempty"`
+}
+
+func (m *Version) Reset() { *m = Version{} }
+func (m *Version) String() string { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage() {}
+func (*Version) Descriptor() ([]byte, []int) {
+ return fileDescriptor_278e9c8044b4f86b, []int{5}
+}
+func (m *Version) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Version) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Version.Merge(m, src)
+}
+func (m *Version) XXX_Size() int {
+ return m.Size()
+}
+func (m *Version) XXX_DiscardUnknown() {
+ xxx_messageInfo_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Version proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterEnum("ibcgo.core.connection.v1.State", State_name, State_value)
+ proto.RegisterType((*ConnectionEnd)(nil), "ibcgo.core.connection.v1.ConnectionEnd")
+ proto.RegisterType((*IdentifiedConnection)(nil), "ibcgo.core.connection.v1.IdentifiedConnection")
+ proto.RegisterType((*Counterparty)(nil), "ibcgo.core.connection.v1.Counterparty")
+ proto.RegisterType((*ClientPaths)(nil), "ibcgo.core.connection.v1.ClientPaths")
+ proto.RegisterType((*ConnectionPaths)(nil), "ibcgo.core.connection.v1.ConnectionPaths")
+ proto.RegisterType((*Version)(nil), "ibcgo.core.connection.v1.Version")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/connection/v1/connection.proto", fileDescriptor_278e9c8044b4f86b)
+}
+
+var fileDescriptor_278e9c8044b4f86b = []byte{
+ // 648 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xc1, 0x6e, 0xda, 0x4a,
+ 0x14, 0xf5, 0x18, 0x93, 0xc0, 0x10, 0xde, 0xa3, 0x53, 0xa4, 0x5a, 0x96, 0x6a, 0xbb, 0xae, 0x54,
+ 0xd1, 0x4a, 0x81, 0x92, 0xa8, 0x5d, 0x44, 0xea, 0x22, 0x10, 0x2a, 0x59, 0x69, 0x29, 0x72, 0x48,
+ 0xa5, 0x66, 0x83, 0xc0, 0x9e, 0x90, 0x51, 0xc1, 0x83, 0xec, 0x09, 0x2a, 0x7f, 0x10, 0x65, 0xd5,
+ 0x6d, 0x17, 0x91, 0x2a, 0xf5, 0x67, 0xb2, 0xc8, 0x22, 0xcb, 0xae, 0x50, 0x95, 0xfc, 0x01, 0x5f,
+ 0x50, 0xd9, 0x63, 0x8c, 0xd3, 0x8a, 0x45, 0x92, 0xee, 0xee, 0xf1, 0x3d, 0xe7, 0x30, 0xf7, 0xcc,
+ 0x65, 0xe0, 0x73, 0xd2, 0xb3, 0xfb, 0xb4, 0x62, 0x53, 0x0f, 0x57, 0x6c, 0xea, 0xba, 0xd8, 0x66,
+ 0x84, 0xba, 0x95, 0x71, 0x35, 0x81, 0xca, 0x23, 0x8f, 0x32, 0x8a, 0xe4, 0x90, 0x5a, 0x0e, 0xa8,
+ 0xe5, 0x44, 0x73, 0x5c, 0x55, 0x8a, 0x7d, 0xda, 0xa7, 0x21, 0xa9, 0x12, 0x54, 0x9c, 0xaf, 0xdc,
+ 0xb4, 0x1e, 0x0e, 0x09, 0x1b, 0x62, 0x97, 0x71, 0xeb, 0x39, 0xe2, 0x54, 0xe3, 0x42, 0x84, 0xf9,
+ 0x7a, 0x6c, 0xd9, 0x70, 0x1d, 0x54, 0x85, 0x59, 0x7b, 0x40, 0xb0, 0xcb, 0x3a, 0xc4, 0x91, 0x81,
+ 0x0e, 0x4a, 0xd9, 0x5a, 0x71, 0x36, 0xd5, 0x0a, 0x93, 0xee, 0x70, 0xb0, 0x65, 0xc4, 0x2d, 0xc3,
+ 0xca, 0xf0, 0xda, 0x74, 0xd0, 0x1b, 0x98, 0x19, 0x63, 0xcf, 0x27, 0xd4, 0xf5, 0x65, 0x51, 0x4f,
+ 0x95, 0x72, 0x1b, 0x4f, 0xca, 0xcb, 0x8e, 0x5c, 0xfe, 0xc8, 0x99, 0x56, 0x2c, 0x41, 0xaf, 0x60,
+ 0xda, 0x67, 0x5d, 0x86, 0xe5, 0x94, 0x0e, 0x4a, 0xff, 0x6d, 0x68, 0xcb, 0xb5, 0x7b, 0x01, 0xcd,
+ 0xe2, 0x6c, 0xd4, 0x82, 0x6b, 0x36, 0x3d, 0x76, 0x19, 0xf6, 0x46, 0x5d, 0x8f, 0x4d, 0x64, 0x49,
+ 0x07, 0xa5, 0xdc, 0xc6, 0xb3, 0xe5, 0xea, 0x7a, 0x82, 0x5d, 0x93, 0xce, 0xa7, 0x9a, 0x60, 0xdd,
+ 0x70, 0x40, 0x5b, 0x70, 0xcd, 0xc1, 0x83, 0xee, 0xa4, 0x33, 0xc2, 0x1e, 0xa1, 0x8e, 0x9c, 0xd6,
+ 0x41, 0x49, 0xaa, 0x3d, 0x9a, 0x4d, 0xb5, 0x87, 0x7c, 0xfa, 0x64, 0xd7, 0xb0, 0x72, 0x21, 0x6c,
+ 0x85, 0x68, 0x4b, 0x3a, 0xf9, 0xae, 0x09, 0xc6, 0x4c, 0x84, 0x45, 0xd3, 0xc1, 0x2e, 0x23, 0x87,
+ 0x04, 0x3b, 0x8b, 0x60, 0xd1, 0x63, 0x28, 0xc6, 0x71, 0xe6, 0x67, 0x53, 0x2d, 0xcb, 0x0d, 0x83,
+ 0x1c, 0x45, 0xf2, 0x47, 0xe8, 0xe2, 0xad, 0x43, 0x4f, 0xdd, 0x23, 0x74, 0xe9, 0x5e, 0xa1, 0xa7,
+ 0xff, 0x79, 0xe8, 0x2b, 0xb7, 0x0e, 0xfd, 0x02, 0xc0, 0xb5, 0xe4, 0xcf, 0xdc, 0x6d, 0x85, 0xf3,
+ 0x8b, 0x73, 0x2f, 0x2e, 0x41, 0x9e, 0x4d, 0xb5, 0x62, 0x24, 0x4b, 0xb6, 0x8d, 0x60, 0x88, 0x39,
+ 0x36, 0x1d, 0xb4, 0x03, 0x57, 0x46, 0x1e, 0x3e, 0x24, 0x5f, 0xc2, 0x1d, 0xfe, 0x2b, 0x90, 0xf8,
+ 0x4f, 0x37, 0xae, 0x96, 0xdf, 0x63, 0xef, 0xf3, 0x00, 0xb7, 0x42, 0x76, 0x14, 0x48, 0xa4, 0x8d,
+ 0xc6, 0x79, 0x0a, 0x73, 0xf5, 0xf0, 0x58, 0xad, 0x2e, 0x3b, 0xf2, 0x51, 0x11, 0xa6, 0x47, 0x41,
+ 0x21, 0x03, 0x3d, 0x55, 0xca, 0x5a, 0x1c, 0x18, 0x07, 0xf0, 0xff, 0xc5, 0x76, 0x71, 0xe2, 0x1d,
+ 0xa6, 0x8e, 0xbd, 0xc5, 0xa4, 0xf7, 0x2e, 0x5c, 0x8d, 0xf6, 0x05, 0xa9, 0x10, 0x92, 0xf9, 0x3a,
+ 0x7b, 0xdc, 0xd4, 0x4a, 0x7c, 0x41, 0x0a, 0xcc, 0x1c, 0xe2, 0x2e, 0x3b, 0xf6, 0xf0, 0xdc, 0x23,
+ 0xc6, 0x7c, 0x9a, 0x17, 0xdf, 0x00, 0x4c, 0x87, 0x1b, 0x84, 0x5e, 0x43, 0x6d, 0xaf, 0xbd, 0xdd,
+ 0x6e, 0x74, 0xf6, 0x9b, 0x66, 0xd3, 0x6c, 0x9b, 0xdb, 0xef, 0xcc, 0x83, 0xc6, 0x4e, 0x67, 0xbf,
+ 0xb9, 0xd7, 0x6a, 0xd4, 0xcd, 0xb7, 0x66, 0x63, 0xa7, 0x20, 0x28, 0x0f, 0x4e, 0xcf, 0xf4, 0xfc,
+ 0x0d, 0x02, 0x92, 0x21, 0xe4, 0xba, 0xe0, 0x63, 0x01, 0x28, 0x99, 0xd3, 0x33, 0x5d, 0x0a, 0x6a,
+ 0xa4, 0xc2, 0x3c, 0xef, 0xb4, 0xad, 0x4f, 0x1f, 0x5a, 0x8d, 0x66, 0x41, 0x54, 0x72, 0xa7, 0x67,
+ 0xfa, 0x6a, 0x04, 0x17, 0xca, 0xb0, 0x99, 0xe2, 0xca, 0xa0, 0x56, 0xa4, 0x93, 0x1f, 0xaa, 0x50,
+ 0xdb, 0x3d, 0xbf, 0x52, 0xc1, 0xe5, 0x95, 0x0a, 0x7e, 0x5d, 0xa9, 0xe0, 0xeb, 0xb5, 0x2a, 0x5c,
+ 0x5e, 0xab, 0xc2, 0xcf, 0x6b, 0x55, 0x38, 0xa8, 0xf6, 0x09, 0x3b, 0x3a, 0xee, 0x05, 0x57, 0x57,
+ 0xb1, 0xa9, 0x3f, 0xa4, 0x7e, 0x85, 0xf4, 0xec, 0xf5, 0xf9, 0xa3, 0xfa, 0x72, 0x73, 0x3d, 0xf1,
+ 0x64, 0xb3, 0xc9, 0x08, 0xfb, 0xbd, 0x95, 0xf0, 0x41, 0xdd, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff,
+ 0x7d, 0x5b, 0xa0, 0xa3, 0xd8, 0x05, 0x00, 0x00,
+}
+
+func (m *ConnectionEnd) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConnectionEnd) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConnectionEnd) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.DelayPeriod != 0 {
+ i = encodeVarintConnection(dAtA, i, uint64(m.DelayPeriod))
+ i--
+ dAtA[i] = 0x28
+ }
+ {
+ size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConnection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if m.State != 0 {
+ i = encodeVarintConnection(dAtA, i, uint64(m.State))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Versions) > 0 {
+ for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConnection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IdentifiedConnection) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IdentifiedConnection) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IdentifiedConnection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.DelayPeriod != 0 {
+ i = encodeVarintConnection(dAtA, i, uint64(m.DelayPeriod))
+ i--
+ dAtA[i] = 0x30
+ }
+ {
+ size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConnection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if m.State != 0 {
+ i = encodeVarintConnection(dAtA, i, uint64(m.State))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Versions) > 0 {
+ for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConnection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Id) > 0 {
+ i -= len(m.Id)
+ copy(dAtA[i:], m.Id)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.Id)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Counterparty) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Counterparty) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Counterparty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Prefix.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintConnection(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.ConnectionId) > 0 {
+ i -= len(m.ConnectionId)
+ copy(dAtA[i:], m.ConnectionId)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.ConnectionId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClientPaths) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientPaths) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Paths[iNdEx])
+ copy(dAtA[i:], m.Paths[iNdEx])
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.Paths[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConnectionPaths) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConnectionPaths) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConnectionPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Paths[iNdEx])
+ copy(dAtA[i:], m.Paths[iNdEx])
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.Paths[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Version) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Version) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Features) > 0 {
+ for iNdEx := len(m.Features) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Features[iNdEx])
+ copy(dAtA[i:], m.Features[iNdEx])
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.Features[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Identifier) > 0 {
+ i -= len(m.Identifier)
+ copy(dAtA[i:], m.Identifier)
+ i = encodeVarintConnection(dAtA, i, uint64(len(m.Identifier)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintConnection(dAtA []byte, offset int, v uint64) int {
+ offset -= sovConnection(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ConnectionEnd) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ }
+ if m.State != 0 {
+ n += 1 + sovConnection(uint64(m.State))
+ }
+ l = m.Counterparty.Size()
+ n += 1 + l + sovConnection(uint64(l))
+ if m.DelayPeriod != 0 {
+ n += 1 + sovConnection(uint64(m.DelayPeriod))
+ }
+ return n
+}
+
+func (m *IdentifiedConnection) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Id)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ if len(m.Versions) > 0 {
+ for _, e := range m.Versions {
+ l = e.Size()
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ }
+ if m.State != 0 {
+ n += 1 + sovConnection(uint64(m.State))
+ }
+ l = m.Counterparty.Size()
+ n += 1 + l + sovConnection(uint64(l))
+ if m.DelayPeriod != 0 {
+ n += 1 + sovConnection(uint64(m.DelayPeriod))
+ }
+ return n
+}
+
+func (m *Counterparty) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ l = len(m.ConnectionId)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ l = m.Prefix.Size()
+ n += 1 + l + sovConnection(uint64(l))
+ return n
+}
+
+func (m *ClientPaths) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, s := range m.Paths {
+ l = len(s)
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ConnectionPaths) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ if len(m.Paths) > 0 {
+ for _, s := range m.Paths {
+ l = len(s)
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Version) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Identifier)
+ if l > 0 {
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ if len(m.Features) > 0 {
+ for _, s := range m.Features {
+ l = len(s)
+ n += 1 + l + sovConnection(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovConnection(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozConnection(x uint64) (n int) {
+ return sovConnection(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ConnectionEnd) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConnectionEnd: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConnectionEnd: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, &Version{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.State |= State(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType)
+ }
+ m.DelayPeriod = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DelayPeriod |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IdentifiedConnection) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IdentifiedConnection: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IdentifiedConnection: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Id = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Versions = append(m.Versions, &Version{})
+ if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.State |= State(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType)
+ }
+ m.DelayPeriod = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DelayPeriod |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Counterparty) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Counterparty: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Counterparty: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Prefix.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClientPaths) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientPaths: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientPaths: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConnectionPaths) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConnectionPaths: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConnectionPaths: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Version) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Version: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Identifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthConnection
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Features = append(m.Features, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipConnection(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthConnection
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupConnection
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthConnection
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthConnection = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowConnection = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupConnection = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/03-connection/types/connection_test.go b/core/03-connection/types/connection_test.go
new file mode 100644
index 00000000..e7e91538
--- /dev/null
+++ b/core/03-connection/types/connection_test.go
@@ -0,0 +1,121 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+var (
+ chainID = "gaiamainnet"
+ connectionID = "connection-0"
+ clientID = "clientidone"
+ connectionID2 = "connectionidtwo"
+ clientID2 = "clientidtwo"
+ invalidConnectionID = "(invalidConnectionID)"
+ clientHeight = clienttypes.NewHeight(0, 6)
+)
+
+func TestConnectionValidateBasic(t *testing.T) {
+ testCases := []struct {
+ name string
+ connection types.ConnectionEnd
+ expPass bool
+ }{
+ {
+ "valid connection",
+ types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500},
+ true,
+ },
+ {
+ "invalid client id",
+ types.ConnectionEnd{"(clientID1)", []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500},
+ false,
+ },
+ {
+ "empty versions",
+ types.ConnectionEnd{clientID, nil, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500},
+ false,
+ },
+ {
+ "invalid version",
+ types.ConnectionEnd{clientID, []*types.Version{{}}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500},
+ false,
+ },
+ {
+ "invalid counterparty",
+ types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, emptyPrefix}, 500},
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.connection.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+func TestCounterpartyValidateBasic(t *testing.T) {
+ testCases := []struct {
+ name string
+ counterparty types.Counterparty
+ expPass bool
+ }{
+ {"valid counterparty", types.Counterparty{clientID, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, true},
+ {"invalid client id", types.Counterparty{"(InvalidClient)", connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, false},
+ {"invalid connection id", types.Counterparty{clientID, "(InvalidConnection)", commitmenttypes.NewMerklePrefix([]byte("prefix"))}, false},
+ {"invalid prefix", types.Counterparty{clientID, connectionID2, emptyPrefix}, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.counterparty.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+func TestIdentifiedConnectionValidateBasic(t *testing.T) {
+ testCases := []struct {
+ name string
+ connection types.IdentifiedConnection
+ expPass bool
+ }{
+ {
+ "valid connection",
+ types.NewIdentifiedConnection(clientID, types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}),
+ true,
+ },
+ {
+ "invalid connection id",
+ types.NewIdentifiedConnection("(connectionIDONE)", types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.connection.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
diff --git a/core/03-connection/types/errors.go b/core/03-connection/types/errors.go
new file mode 100644
index 00000000..107a0e08
--- /dev/null
+++ b/core/03-connection/types/errors.go
@@ -0,0 +1,19 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// IBC connection sentinel errors
+var (
+ ErrConnectionExists = sdkerrors.Register(SubModuleName, 2, "connection already exists")
+ ErrConnectionNotFound = sdkerrors.Register(SubModuleName, 3, "connection not found")
+ ErrClientConnectionPathsNotFound = sdkerrors.Register(SubModuleName, 4, "light client connection paths not found")
+ ErrConnectionPath = sdkerrors.Register(SubModuleName, 5, "connection path is not associated to the given light client")
+ ErrInvalidConnectionState = sdkerrors.Register(SubModuleName, 6, "invalid connection state")
+ ErrInvalidCounterparty = sdkerrors.Register(SubModuleName, 7, "invalid counterparty connection")
+ ErrInvalidConnection = sdkerrors.Register(SubModuleName, 8, "invalid connection")
+ ErrInvalidVersion = sdkerrors.Register(SubModuleName, 9, "invalid connection version")
+ ErrVersionNegotiationFailed = sdkerrors.Register(SubModuleName, 10, "connection version negotiation failed")
+ ErrInvalidConnectionIdentifier = sdkerrors.Register(SubModuleName, 11, "invalid connection identifier")
+)
diff --git a/core/03-connection/types/events.go b/core/03-connection/types/events.go
new file mode 100644
index 00000000..3cb5997b
--- /dev/null
+++ b/core/03-connection/types/events.go
@@ -0,0 +1,25 @@
+package types
+
+import (
+ "fmt"
+
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// IBC connection events
+const (
+ AttributeKeyConnectionID = "connection_id"
+ AttributeKeyClientID = "client_id"
+ AttributeKeyCounterpartyClientID = "counterparty_client_id"
+ AttributeKeyCounterpartyConnectionID = "counterparty_connection_id"
+)
+
+// IBC connection events vars
+var (
+ EventTypeConnectionOpenInit = MsgConnectionOpenInit{}.Type()
+ EventTypeConnectionOpenTry = MsgConnectionOpenTry{}.Type()
+ EventTypeConnectionOpenAck = MsgConnectionOpenAck{}.Type()
+ EventTypeConnectionOpenConfirm = MsgConnectionOpenConfirm{}.Type()
+
+ AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName)
+)
diff --git a/core/03-connection/types/expected_keepers.go b/core/03-connection/types/expected_keepers.go
new file mode 100644
index 00000000..9fc99586
--- /dev/null
+++ b/core/03-connection/types/expected_keepers.go
@@ -0,0 +1,16 @@
+package types
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// ClientKeeper expected account IBC client keeper
+type ClientKeeper interface {
+ GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool)
+ GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool)
+ GetSelfConsensusState(ctx sdk.Context, height exported.Height) (exported.ConsensusState, bool)
+ ValidateSelfClient(ctx sdk.Context, clientState exported.ClientState) error
+ IterateClients(ctx sdk.Context, cb func(string, exported.ClientState) bool)
+ ClientStore(ctx sdk.Context, clientID string) sdk.KVStore
+}
diff --git a/core/03-connection/types/genesis.go b/core/03-connection/types/genesis.go
new file mode 100644
index 00000000..b10c300a
--- /dev/null
+++ b/core/03-connection/types/genesis.go
@@ -0,0 +1,76 @@
+package types
+
+import (
+ "fmt"
+
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// NewConnectionPaths creates a ConnectionPaths instance.
+func NewConnectionPaths(id string, paths []string) ConnectionPaths {
+ return ConnectionPaths{
+ ClientId: id,
+ Paths: paths,
+ }
+}
+
+// NewGenesisState creates a GenesisState instance.
+func NewGenesisState(
+ connections []IdentifiedConnection, connPaths []ConnectionPaths,
+ nextConnectionSequence uint64,
+) GenesisState {
+ return GenesisState{
+ Connections: connections,
+ ClientConnectionPaths: connPaths,
+ NextConnectionSequence: nextConnectionSequence,
+ }
+}
+
+// DefaultGenesisState returns the ibc connection submodule's default genesis state.
+func DefaultGenesisState() GenesisState {
+ return GenesisState{
+ Connections: []IdentifiedConnection{},
+ ClientConnectionPaths: []ConnectionPaths{},
+ NextConnectionSequence: 0,
+ }
+}
+
+// Validate performs basic genesis state validation returning an error upon any
+// failure.
+func (gs GenesisState) Validate() error {
+ // keep track of the max sequence to ensure it is less than
+ // the next sequence used in creating connection identifers.
+ var maxSequence uint64 = 0
+
+ for i, conn := range gs.Connections {
+ sequence, err := ParseConnectionSequence(conn.Id)
+ if err != nil {
+ return err
+ }
+
+ if sequence > maxSequence {
+ maxSequence = sequence
+ }
+
+ if err := conn.ValidateBasic(); err != nil {
+ return fmt.Errorf("invalid connection %v index %d: %w", conn, i, err)
+ }
+ }
+
+ for i, conPaths := range gs.ClientConnectionPaths {
+ if err := host.ClientIdentifierValidator(conPaths.ClientId); err != nil {
+ return fmt.Errorf("invalid client connection path %d: %w", i, err)
+ }
+ for _, connectionID := range conPaths.Paths {
+ if err := host.ConnectionIdentifierValidator(connectionID); err != nil {
+ return fmt.Errorf("invalid client connection ID (%s) in connection paths %d: %w", connectionID, i, err)
+ }
+ }
+ }
+
+ if maxSequence != 0 && maxSequence >= gs.NextConnectionSequence {
+ return fmt.Errorf("next connection sequence %d must be greater than maximum sequence used in connection identifier %d", gs.NextConnectionSequence, maxSequence)
+ }
+
+ return nil
+}
diff --git a/core/03-connection/types/genesis.pb.go b/core/03-connection/types/genesis.pb.go
new file mode 100644
index 00000000..5dce20ca
--- /dev/null
+++ b/core/03-connection/types/genesis.pb.go
@@ -0,0 +1,438 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/connection/v1/genesis.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibc connection submodule's genesis state.
+type GenesisState struct {
+ Connections []IdentifiedConnection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections"`
+ ClientConnectionPaths []ConnectionPaths `protobuf:"bytes,2,rep,name=client_connection_paths,json=clientConnectionPaths,proto3" json:"client_connection_paths" yaml:"client_connection_paths"`
+ // the sequence for the next generated connection identifier
+ NextConnectionSequence uint64 `protobuf:"varint,3,opt,name=next_connection_sequence,json=nextConnectionSequence,proto3" json:"next_connection_sequence,omitempty" yaml:"next_connection_sequence"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1d3565a164ba596e, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetConnections() []IdentifiedConnection {
+ if m != nil {
+ return m.Connections
+ }
+ return nil
+}
+
+func (m *GenesisState) GetClientConnectionPaths() []ConnectionPaths {
+ if m != nil {
+ return m.ClientConnectionPaths
+ }
+ return nil
+}
+
+func (m *GenesisState) GetNextConnectionSequence() uint64 {
+ if m != nil {
+ return m.NextConnectionSequence
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "ibcgo.core.connection.v1.GenesisState")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/connection/v1/genesis.proto", fileDescriptor_1d3565a164ba596e)
+}
+
+var fileDescriptor_1d3565a164ba596e = []byte{
+ // 322 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x3f, 0x4f, 0xc2, 0x40,
+ 0x18, 0xc6, 0x7b, 0x40, 0x1c, 0x8a, 0x53, 0xe3, 0x9f, 0x86, 0xe1, 0x4a, 0x6a, 0x42, 0x60, 0xe0,
+ 0x4e, 0x64, 0x73, 0xac, 0x83, 0x31, 0x2e, 0x06, 0x12, 0x07, 0x13, 0x43, 0xe8, 0xf1, 0x5a, 0x2e,
+ 0x81, 0x3b, 0xe4, 0x0e, 0x22, 0x1f, 0xc1, 0xcd, 0x8f, 0xc5, 0x88, 0x9b, 0x53, 0x63, 0xda, 0x6f,
+ 0xc0, 0x27, 0x30, 0x6d, 0xd5, 0x56, 0x93, 0x6e, 0x97, 0xf7, 0xfd, 0x3d, 0xbf, 0xf7, 0x92, 0xc7,
+ 0x6c, 0x71, 0x9f, 0x05, 0x92, 0x32, 0xb9, 0x04, 0xca, 0xa4, 0x10, 0xc0, 0x34, 0x97, 0x82, 0xae,
+ 0x7b, 0x34, 0x00, 0x01, 0x8a, 0x2b, 0xb2, 0x58, 0x4a, 0x2d, 0x2d, 0x3b, 0xe5, 0x48, 0xc2, 0x91,
+ 0x9c, 0x23, 0xeb, 0x5e, 0xe3, 0x28, 0x90, 0x81, 0x4c, 0x21, 0x9a, 0xbc, 0x32, 0xbe, 0xd1, 0x29,
+ 0xf5, 0x16, 0xd2, 0x29, 0xea, 0xbe, 0x57, 0xcc, 0xc3, 0xeb, 0xec, 0xd8, 0x50, 0x8f, 0x35, 0x58,
+ 0xf7, 0x66, 0x3d, 0x87, 0x94, 0x8d, 0x9a, 0xd5, 0x76, 0xfd, 0x82, 0x90, 0xb2, 0x1f, 0x90, 0x9b,
+ 0x09, 0x08, 0xcd, 0x9f, 0x38, 0x4c, 0xae, 0x7e, 0xe7, 0x5e, 0x6d, 0x1b, 0x3a, 0xc6, 0xa0, 0x28,
+ 0xb2, 0x5e, 0x91, 0x79, 0xca, 0x66, 0x1c, 0x84, 0x1e, 0xe5, 0xe3, 0xd1, 0x62, 0xac, 0xa7, 0xca,
+ 0xae, 0xa4, 0x47, 0x3a, 0xe5, 0x47, 0x72, 0xf5, 0x5d, 0x12, 0xf0, 0x5a, 0x89, 0x7f, 0x1f, 0x3a,
+ 0x78, 0x33, 0x9e, 0xcf, 0x2e, 0xdd, 0x12, 0xaf, 0x3b, 0x38, 0xce, 0x36, 0xff, 0xe2, 0xd6, 0xa3,
+ 0x69, 0x0b, 0x78, 0xf9, 0x13, 0x50, 0xf0, 0xbc, 0x02, 0xc1, 0xc0, 0xae, 0x36, 0x51, 0xbb, 0xe6,
+ 0x9d, 0xed, 0x43, 0xc7, 0xc9, 0xe4, 0x65, 0xa4, 0x3b, 0x38, 0x49, 0x56, 0xb9, 0x7b, 0xf8, 0xbd,
+ 0xf0, 0x6e, 0xb7, 0x11, 0x46, 0xbb, 0x08, 0xa3, 0xcf, 0x08, 0xa3, 0xb7, 0x18, 0x1b, 0xbb, 0x18,
+ 0x1b, 0x1f, 0x31, 0x36, 0x1e, 0x7a, 0x01, 0xd7, 0xd3, 0x95, 0x4f, 0x98, 0x9c, 0x53, 0x26, 0xd5,
+ 0x5c, 0x2a, 0xca, 0x7d, 0xd6, 0xfd, 0xe9, 0xea, 0xbc, 0xdf, 0x2d, 0xd4, 0xa5, 0x37, 0x0b, 0x50,
+ 0xfe, 0x41, 0xda, 0x53, 0xff, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x31, 0x41, 0xfb, 0xcb, 0x2c, 0x02,
+ 0x00, 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NextConnectionSequence != 0 {
+ i = encodeVarintGenesis(dAtA, i, uint64(m.NextConnectionSequence))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ClientConnectionPaths) > 0 {
+ for iNdEx := len(m.ClientConnectionPaths) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ClientConnectionPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Connections) > 0 {
+ for iNdEx := len(m.Connections) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Connections[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Connections) > 0 {
+ for _, e := range m.Connections {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.ClientConnectionPaths) > 0 {
+ for _, e := range m.ClientConnectionPaths {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if m.NextConnectionSequence != 0 {
+ n += 1 + sovGenesis(uint64(m.NextConnectionSequence))
+ }
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Connections", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Connections = append(m.Connections, IdentifiedConnection{})
+ if err := m.Connections[len(m.Connections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientConnectionPaths", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientConnectionPaths = append(m.ClientConnectionPaths, ConnectionPaths{})
+ if err := m.ClientConnectionPaths[len(m.ClientConnectionPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextConnectionSequence", wireType)
+ }
+ m.NextConnectionSequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextConnectionSequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/03-connection/types/genesis_test.go b/core/03-connection/types/genesis_test.go
new file mode 100644
index 00000000..846837f9
--- /dev/null
+++ b/core/03-connection/types/genesis_test.go
@@ -0,0 +1,114 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func TestValidateGenesis(t *testing.T) {
+
+ testCases := []struct {
+ name string
+ genState types.GenesisState
+ expPass bool
+ }{
+ {
+ name: "default",
+ genState: types.DefaultGenesisState(),
+ expPass: true,
+ },
+ {
+ name: "valid genesis",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {clientID, []string{connectionID}},
+ },
+ 0,
+ ),
+ expPass: true,
+ },
+ {
+ name: "invalid connection",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, "(CLIENTIDONE)", types.Counterparty{clientID, connectionID, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {clientID, []string{connectionID}},
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid client id",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {"(CLIENTIDONE)", []string{connectionID}},
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid path",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {clientID, []string{invalidConnectionID}},
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid connection identifier",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection("conn-0", types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {clientID, []string{connectionID}},
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "next connection sequence is not greater than maximum connection identifier sequence provided",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection(types.FormatConnectionIdentifier(10), types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {clientID, []string{connectionID}},
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ err := tc.genState.Validate()
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/core/03-connection/types/keys.go b/core/03-connection/types/keys.go
new file mode 100644
index 00000000..65af565c
--- /dev/null
+++ b/core/03-connection/types/keys.go
@@ -0,0 +1,61 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ // SubModuleName defines the IBC connection name
+ SubModuleName = "connection"
+
+ // StoreKey is the store key string for IBC connections
+ StoreKey = SubModuleName
+
+ // RouterKey is the message route for IBC connections
+ RouterKey = SubModuleName
+
+ // QuerierRoute is the querier route for IBC connections
+ QuerierRoute = SubModuleName
+
+ // KeyNextConnectionSequence is the key used to store the next connection sequence in
+ // the keeper.
+ KeyNextConnectionSequence = "nextConnectionSequence"
+
+ // ConnectionPrefix is the prefix used when creating a connection identifier
+ ConnectionPrefix = "connection-"
+)
+
+// FormatConnectionIdentifier returns the connection identifier with the sequence appended.
+// This is a SDK specific format not enforced by IBC protocol.
+func FormatConnectionIdentifier(sequence uint64) string {
+ return fmt.Sprintf("%s%d", ConnectionPrefix, sequence)
+}
+
+// IsConnectionIDFormat checks if a connectionID is in the format required on the SDK for
+// parsing connection identifiers. The connection identifier must be in the form: `connection-{N}
+var IsConnectionIDFormat = regexp.MustCompile(`^connection-[0-9]{1,20}$`).MatchString
+
+// IsValidConnectionID checks if the connection identifier is valid and can be parsed to
+// the connection identifier format.
+func IsValidConnectionID(connectionID string) bool {
+ _, err := ParseConnectionSequence(connectionID)
+ return err == nil
+}
+
+// ParseConnectionSequence parses the connection sequence from the connection identifier.
+func ParseConnectionSequence(connectionID string) (uint64, error) {
+ if !IsConnectionIDFormat(connectionID) {
+ return 0, sdkerrors.Wrap(host.ErrInvalidID, "connection identifier is not in the format: `connection-{N}`")
+ }
+
+ sequence, err := host.ParseIdentifier(connectionID, ConnectionPrefix)
+ if err != nil {
+ return 0, sdkerrors.Wrap(err, "invalid connection identifier")
+ }
+
+ return sequence, nil
+}
diff --git a/core/03-connection/types/keys_test.go b/core/03-connection/types/keys_test.go
new file mode 100644
index 00000000..6adb8090
--- /dev/null
+++ b/core/03-connection/types/keys_test.go
@@ -0,0 +1,49 @@
+package types_test
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+)
+
+// tests ParseConnectionSequence and IsValidConnectionID
+func TestParseConnectionSequence(t *testing.T) {
+ testCases := []struct {
+ name string
+ connectionID string
+ expSeq uint64
+ expPass bool
+ }{
+ {"valid 0", "connection-0", 0, true},
+ {"valid 1", "connection-1", 1, true},
+ {"valid large sequence", types.FormatConnectionIdentifier(math.MaxUint64), math.MaxUint64, true},
+ // one above uint64 max
+ {"invalid uint64", "connection-18446744073709551616", 0, false},
+ // uint64 == 20 characters
+ {"invalid large sequence", "connection-2345682193567182931243", 0, false},
+ {"capital prefix", "Connection-0", 0, false},
+ {"double prefix", "connection-connection-0", 0, false},
+ {"missing dash", "connection0", 0, false},
+ {"blank id", " ", 0, false},
+ {"empty id", "", 0, false},
+ {"negative sequence", "connection--1", 0, false},
+ }
+
+ for _, tc := range testCases {
+
+ seq, err := types.ParseConnectionSequence(tc.connectionID)
+ valid := types.IsValidConnectionID(tc.connectionID)
+ require.Equal(t, tc.expSeq, seq)
+
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ require.True(t, valid)
+ } else {
+ require.Error(t, err, tc.name)
+ require.False(t, valid)
+ }
+ }
+}
diff --git a/core/03-connection/types/msgs.go b/core/03-connection/types/msgs.go
new file mode 100644
index 00000000..3ba1aed8
--- /dev/null
+++ b/core/03-connection/types/msgs.go
@@ -0,0 +1,354 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ sdk.Msg = &MsgConnectionOpenInit{}
+ _ sdk.Msg = &MsgConnectionOpenConfirm{}
+ _ sdk.Msg = &MsgConnectionOpenAck{}
+ _ sdk.Msg = &MsgConnectionOpenTry{}
+
+ _ codectypes.UnpackInterfacesMessage = MsgConnectionOpenTry{}
+ _ codectypes.UnpackInterfacesMessage = MsgConnectionOpenAck{}
+)
+
+// NewMsgConnectionOpenInit creates a new MsgConnectionOpenInit instance. It sets the
+// counterparty connection identifier to be empty.
+//nolint:interfacer
+func NewMsgConnectionOpenInit(
+ clientID, counterpartyClientID string,
+ counterpartyPrefix commitmenttypes.MerklePrefix,
+ version *Version, delayPeriod uint64, signer sdk.AccAddress,
+) *MsgConnectionOpenInit {
+ // counterparty must have the same delay period
+ counterparty := NewCounterparty(counterpartyClientID, "", counterpartyPrefix)
+ return &MsgConnectionOpenInit{
+ ClientId: clientID,
+ Counterparty: counterparty,
+ Version: version,
+ DelayPeriod: delayPeriod,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgConnectionOpenInit) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgConnectionOpenInit) Type() string {
+ return "connection_open_init"
+}
+
+// ValidateBasic implements sdk.Msg.
+func (msg MsgConnectionOpenInit) ValidateBasic() error {
+ if err := host.ClientIdentifierValidator(msg.ClientId); err != nil {
+ return sdkerrors.Wrap(err, "invalid client ID")
+ }
+ if msg.Counterparty.ConnectionId != "" {
+ return sdkerrors.Wrap(ErrInvalidCounterparty, "counterparty connection identifier must be empty")
+ }
+
+ // NOTE: Version can be nil on MsgConnectionOpenInit
+ if msg.Version != nil {
+ if err := ValidateVersion(msg.Version); err != nil {
+ return sdkerrors.Wrap(err, "basic validation of the provided version failed")
+ }
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Counterparty.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgConnectionOpenInit) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgConnectionOpenInit) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// NewMsgConnectionOpenTry creates a new MsgConnectionOpenTry instance
+//nolint:interfacer
+func NewMsgConnectionOpenTry(
+ previousConnectionID, clientID, counterpartyConnectionID,
+ counterpartyClientID string, counterpartyClient exported.ClientState,
+ counterpartyPrefix commitmenttypes.MerklePrefix,
+ counterpartyVersions []*Version, delayPeriod uint64,
+ proofInit, proofClient, proofConsensus []byte,
+ proofHeight, consensusHeight clienttypes.Height, signer sdk.AccAddress,
+) *MsgConnectionOpenTry {
+ counterparty := NewCounterparty(counterpartyClientID, counterpartyConnectionID, counterpartyPrefix)
+ csAny, _ := clienttypes.PackClientState(counterpartyClient)
+ return &MsgConnectionOpenTry{
+ PreviousConnectionId: previousConnectionID,
+ ClientId: clientID,
+ ClientState: csAny,
+ Counterparty: counterparty,
+ CounterpartyVersions: counterpartyVersions,
+ DelayPeriod: delayPeriod,
+ ProofInit: proofInit,
+ ProofClient: proofClient,
+ ProofConsensus: proofConsensus,
+ ProofHeight: proofHeight,
+ ConsensusHeight: consensusHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgConnectionOpenTry) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgConnectionOpenTry) Type() string {
+ return "connection_open_try"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgConnectionOpenTry) ValidateBasic() error {
+ // an empty connection identifier indicates that a connection identifier should be generated
+ if msg.PreviousConnectionId != "" {
+ if !IsValidConnectionID(msg.PreviousConnectionId) {
+ return sdkerrors.Wrap(ErrInvalidConnectionIdentifier, "invalid previous connection ID")
+ }
+ }
+ if err := host.ClientIdentifierValidator(msg.ClientId); err != nil {
+ return sdkerrors.Wrap(err, "invalid client ID")
+ }
+ // counterparty validate basic allows empty counterparty connection identifiers
+ if err := host.ConnectionIdentifierValidator(msg.Counterparty.ConnectionId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty connection ID")
+ }
+ if msg.ClientState == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "counterparty client is nil")
+ }
+ clientState, err := clienttypes.UnpackClientState(msg.ClientState)
+ if err != nil {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "unpack err: %v", err)
+ }
+ if err := clientState.Validate(); err != nil {
+ return sdkerrors.Wrap(err, "counterparty client is invalid")
+ }
+ if len(msg.CounterpartyVersions) == 0 {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidVersion, "empty counterparty versions")
+ }
+ for i, version := range msg.CounterpartyVersions {
+ if err := ValidateVersion(version); err != nil {
+ return sdkerrors.Wrapf(err, "basic validation failed on version with index %d", i)
+ }
+ }
+ if len(msg.ProofInit) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof init")
+ }
+ if len(msg.ProofClient) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit empty proof client")
+ }
+ if len(msg.ProofConsensus) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of consensus state")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ if msg.ConsensusHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "consensus height must be non-zero")
+ }
+ _, err = sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Counterparty.ValidateBasic()
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (msg MsgConnectionOpenTry) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(msg.ClientState, new(exported.ClientState))
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgConnectionOpenTry) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgConnectionOpenTry) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// NewMsgConnectionOpenAck creates a new MsgConnectionOpenAck instance
+//nolint:interfacer
+func NewMsgConnectionOpenAck(
+ connectionID, counterpartyConnectionID string, counterpartyClient exported.ClientState,
+ proofTry, proofClient, proofConsensus []byte,
+ proofHeight, consensusHeight clienttypes.Height,
+ version *Version,
+ signer sdk.AccAddress,
+) *MsgConnectionOpenAck {
+ csAny, _ := clienttypes.PackClientState(counterpartyClient)
+ return &MsgConnectionOpenAck{
+ ConnectionId: connectionID,
+ CounterpartyConnectionId: counterpartyConnectionID,
+ ClientState: csAny,
+ ProofTry: proofTry,
+ ProofClient: proofClient,
+ ProofConsensus: proofConsensus,
+ ProofHeight: proofHeight,
+ ConsensusHeight: consensusHeight,
+ Version: version,
+ Signer: signer.String(),
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (msg MsgConnectionOpenAck) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(msg.ClientState, new(exported.ClientState))
+}
+
+// Route implements sdk.Msg
+func (msg MsgConnectionOpenAck) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgConnectionOpenAck) Type() string {
+ return "connection_open_ack"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgConnectionOpenAck) ValidateBasic() error {
+ if !IsValidConnectionID(msg.ConnectionId) {
+ return ErrInvalidConnectionIdentifier
+ }
+ if err := host.ConnectionIdentifierValidator(msg.CounterpartyConnectionId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty connection ID")
+ }
+ if err := ValidateVersion(msg.Version); err != nil {
+ return err
+ }
+ if msg.ClientState == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "counterparty client is nil")
+ }
+ clientState, err := clienttypes.UnpackClientState(msg.ClientState)
+ if err != nil {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "unpack err: %v", err)
+ }
+ if err := clientState.Validate(); err != nil {
+ return sdkerrors.Wrap(err, "counterparty client is invalid")
+ }
+ if len(msg.ProofTry) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof try")
+ }
+ if len(msg.ProofClient) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit empty proof client")
+ }
+ if len(msg.ProofConsensus) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of consensus state")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ if msg.ConsensusHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "consensus height must be non-zero")
+ }
+ _, err = sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return nil
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgConnectionOpenAck) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgConnectionOpenAck) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
+
+// NewMsgConnectionOpenConfirm creates a new MsgConnectionOpenConfirm instance
+//nolint:interfacer
+func NewMsgConnectionOpenConfirm(
+ connectionID string, proofAck []byte, proofHeight clienttypes.Height,
+ signer sdk.AccAddress,
+) *MsgConnectionOpenConfirm {
+ return &MsgConnectionOpenConfirm{
+ ConnectionId: connectionID,
+ ProofAck: proofAck,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgConnectionOpenConfirm) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgConnectionOpenConfirm) Type() string {
+ return "connection_open_confirm"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgConnectionOpenConfirm) ValidateBasic() error {
+ if !IsValidConnectionID(msg.ConnectionId) {
+ return ErrInvalidConnectionIdentifier
+ }
+ if len(msg.ProofAck) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof ack")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return nil
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgConnectionOpenConfirm) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgConnectionOpenConfirm) GetSigners() []sdk.AccAddress {
+ accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{accAddr}
+}
diff --git a/core/03-connection/types/msgs_test.go b/core/03-connection/types/msgs_test.go
new file mode 100644
index 00000000..6aff3b09
--- /dev/null
+++ b/core/03-connection/types/msgs_test.go
@@ -0,0 +1,243 @@
+package types_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/store/iavl"
+ "github.com/cosmos/cosmos-sdk/store/rootmulti"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+var (
+ emptyPrefix = commitmenttypes.MerklePrefix{}
+ emptyProof = []byte{}
+)
+
+type MsgTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+
+ proof []byte
+}
+
+func (suite *MsgTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+
+ app := simapp.Setup(false)
+ db := dbm.NewMemDB()
+ store := rootmulti.NewStore(db)
+ storeKey := storetypes.NewKVStoreKey("iavlStoreKey")
+
+ store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil)
+ store.LoadVersion(0)
+ iavlStore := store.GetCommitStore(storeKey).(*iavl.Store)
+
+ iavlStore.Set([]byte("KEY"), []byte("VALUE"))
+ _ = store.Commit()
+
+ res := store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("KEY"),
+ Prove: true,
+ })
+
+ merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
+ suite.Require().NoError(err)
+ proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof)
+ suite.Require().NoError(err)
+
+ suite.proof = proof
+
+}
+
+func TestMsgTestSuite(t *testing.T) {
+ suite.Run(t, new(MsgTestSuite))
+}
+
+func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() {
+ prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey"))
+ signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
+ // empty versions are considered valid, the default compatible versions
+ // will be used in protocol.
+ var version *types.Version
+
+ var testCases = []struct {
+ name string
+ msg *types.MsgConnectionOpenInit
+ expPass bool
+ }{
+ {"invalid client ID", types.NewMsgConnectionOpenInit("test/iris", "clienttotest", prefix, version, 500, signer), false},
+ {"invalid counterparty client ID", types.NewMsgConnectionOpenInit("clienttotest", "(clienttotest)", prefix, version, 500, signer), false},
+ {"invalid counterparty connection ID", &types.MsgConnectionOpenInit{connectionID, types.NewCounterparty("clienttotest", "connectiontotest", prefix), version, 500, signer.String()}, false},
+ {"empty counterparty prefix", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", emptyPrefix, version, 500, signer), false},
+ {"supplied version fails basic validation", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, &types.Version{}, 500, signer), false},
+ {"empty singer", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, nil), false},
+ {"success", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, signer), true},
+ }
+
+ for _, tc := range testCases {
+ err := tc.msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() {
+ prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey"))
+ signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
+
+ clientState := ibctmtypes.NewClientState(
+ chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
+ )
+
+ // Pack consensus state into any to test unpacking error
+ consState := ibctmtypes.NewConsensusState(
+ time.Now(), commitmenttypes.NewMerkleRoot([]byte("root")), []byte("nextValsHash"),
+ )
+ invalidAny := clienttypes.MustPackConsensusState(consState)
+ counterparty := types.NewCounterparty("connectiontotest", "clienttotest", prefix)
+
+ // invalidClientState fails validateBasic
+ invalidClient := ibctmtypes.NewClientState(
+ chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
+ )
+
+ var testCases = []struct {
+ name string
+ msg *types.MsgConnectionOpenTry
+ expPass bool
+ }{
+ {"invalid connection ID", types.NewMsgConnectionOpenTry("test/conn1", "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"invalid connection ID", types.NewMsgConnectionOpenTry("(invalidconnection)", "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"invalid client ID", types.NewMsgConnectionOpenTry(connectionID, "test/iris", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"invalid counterparty connection ID", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "ibc/test", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"invalid counterparty client ID", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "test/conn1", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"invalid nil counterparty client", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", nil, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"invalid client unpacking", &types.MsgConnectionOpenTry{connectionID, "clienttotesta", invalidAny, counterparty, 500, []*types.Version{ibctesting.ConnectionVersion}, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer.String()}, false},
+ {"counterparty failed validate", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", invalidClient, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"empty counterparty prefix", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, emptyPrefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"empty counterpartyVersions", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"empty proofInit", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, emptyProof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"empty proofClient", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, emptyProof, suite.proof, clientHeight, clientHeight, signer), false},
+ {"empty proofConsensus", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, emptyProof, clientHeight, clientHeight, signer), false},
+ {"invalid proofHeight", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clienttypes.ZeroHeight(), clientHeight, signer), false},
+ {"invalid consensusHeight", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clienttypes.ZeroHeight(), signer), false},
+ {"empty singer", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, nil), false},
+ {"success", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), true},
+ {"invalid version", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{{}}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
+ }
+
+ for _, tc := range testCases {
+ err := tc.msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() {
+ signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
+ clientState := ibctmtypes.NewClientState(
+ chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
+ )
+
+ // Pack consensus state into any to test unpacking error
+ consState := ibctmtypes.NewConsensusState(
+ time.Now(), commitmenttypes.NewMerkleRoot([]byte("root")), []byte("nextValsHash"),
+ )
+ invalidAny := clienttypes.MustPackConsensusState(consState)
+
+ // invalidClientState fails validateBasic
+ invalidClient := ibctmtypes.NewClientState(
+ chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
+ )
+ connectionID := "connection-0"
+
+ var testCases = []struct {
+ name string
+ msg *types.MsgConnectionOpenAck
+ expPass bool
+ }{
+ {"invalid connection ID", types.NewMsgConnectionOpenAck("test/conn1", connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"invalid counterparty connection ID", types.NewMsgConnectionOpenAck(connectionID, "test/conn1", clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"invalid nil counterparty client", types.NewMsgConnectionOpenAck(connectionID, connectionID, nil, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"invalid unpacking counterparty client", &types.MsgConnectionOpenAck{connectionID, connectionID, ibctesting.ConnectionVersion, invalidAny, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer.String()}, false},
+ {"counterparty client failed validate", types.NewMsgConnectionOpenAck(connectionID, connectionID, invalidClient, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"empty proofTry", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, emptyProof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"empty proofClient", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, emptyProof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"empty proofConsensus", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, emptyProof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"invalid proofHeight", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clienttypes.ZeroHeight(), clientHeight, ibctesting.ConnectionVersion, signer), false},
+ {"invalid consensusHeight", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clienttypes.ZeroHeight(), ibctesting.ConnectionVersion, signer), false},
+ {"invalid version", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, &types.Version{}, signer), false},
+ {"empty signer", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, nil), false},
+ {"success", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), true},
+ }
+
+ for _, tc := range testCases {
+ err := tc.msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *MsgTestSuite) TestNewMsgConnectionOpenConfirm() {
+ signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
+
+ testMsgs := []*types.MsgConnectionOpenConfirm{
+ types.NewMsgConnectionOpenConfirm("test/conn1", suite.proof, clientHeight, signer),
+ types.NewMsgConnectionOpenConfirm(connectionID, emptyProof, clientHeight, signer),
+ types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clienttypes.ZeroHeight(), signer),
+ types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, nil),
+ types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, signer),
+ }
+
+ var testCases = []struct {
+ msg *types.MsgConnectionOpenConfirm
+ expPass bool
+ errMsg string
+ }{
+ {testMsgs[0], false, "invalid connection ID"},
+ {testMsgs[1], false, "empty proofTry"},
+ {testMsgs[2], false, "invalid proofHeight"},
+ {testMsgs[3], false, "empty signer"},
+ {testMsgs[4], true, "success"},
+ }
+
+ for i, tc := range testCases {
+ err := tc.msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err, "Msg %d failed: %s", i, tc.errMsg)
+ } else {
+ suite.Require().Error(err, "Invalid Msg %d passed: %s", i, tc.errMsg)
+ }
+ }
+}
diff --git a/core/03-connection/types/query.go b/core/03-connection/types/query.go
new file mode 100644
index 00000000..7661b38d
--- /dev/null
+++ b/core/03-connection/types/query.go
@@ -0,0 +1,70 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ codectypes.UnpackInterfacesMessage = QueryConnectionClientStateResponse{}
+ _ codectypes.UnpackInterfacesMessage = QueryConnectionConsensusStateResponse{}
+)
+
+// NewQueryConnectionResponse creates a new QueryConnectionResponse instance
+func NewQueryConnectionResponse(
+ connection ConnectionEnd, proof []byte, height clienttypes.Height,
+) *QueryConnectionResponse {
+ return &QueryConnectionResponse{
+ Connection: &connection,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// NewQueryClientConnectionsResponse creates a new ConnectionPaths instance
+func NewQueryClientConnectionsResponse(
+ connectionPaths []string, proof []byte, height clienttypes.Height,
+) *QueryClientConnectionsResponse {
+ return &QueryClientConnectionsResponse{
+ ConnectionPaths: connectionPaths,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// NewQueryClientConnectionsRequest creates a new QueryClientConnectionsRequest instance
+func NewQueryClientConnectionsRequest(clientID string) *QueryClientConnectionsRequest {
+ return &QueryClientConnectionsRequest{
+ ClientId: clientID,
+ }
+}
+
+// NewQueryConnectionClientStateResponse creates a newQueryConnectionClientStateResponse instance
+func NewQueryConnectionClientStateResponse(identifiedClientState clienttypes.IdentifiedClientState, proof []byte, height clienttypes.Height) *QueryConnectionClientStateResponse {
+ return &QueryConnectionClientStateResponse{
+ IdentifiedClientState: &identifiedClientState,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qccsr QueryConnectionClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return qccsr.IdentifiedClientState.UnpackInterfaces(unpacker)
+}
+
+// NewQueryConnectionConsensusStateResponse creates a newQueryConnectionConsensusStateResponse instance
+func NewQueryConnectionConsensusStateResponse(clientID string, anyConsensusState *codectypes.Any, consensusStateHeight exported.Height, proof []byte, height clienttypes.Height) *QueryConnectionConsensusStateResponse {
+ return &QueryConnectionConsensusStateResponse{
+ ConsensusState: anyConsensusState,
+ ClientId: clientID,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qccsr QueryConnectionConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(qccsr.ConsensusState, new(exported.ConsensusState))
+}
diff --git a/core/03-connection/types/query.pb.go b/core/03-connection/types/query.pb.go
new file mode 100644
index 00000000..a03441a1
--- /dev/null
+++ b/core/03-connection/types/query.pb.go
@@ -0,0 +1,2892 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/connection/v1/query.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types1 "github.com/cosmos/cosmos-sdk/codec/types"
+ query "github.com/cosmos/cosmos-sdk/types/query"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// QueryConnectionRequest is the request type for the Query/Connection RPC
+// method
+type QueryConnectionRequest struct {
+ // connection unique identifier
+ ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"`
+}
+
+func (m *QueryConnectionRequest) Reset() { *m = QueryConnectionRequest{} }
+func (m *QueryConnectionRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionRequest) ProtoMessage() {}
+func (*QueryConnectionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{0}
+}
+func (m *QueryConnectionRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionRequest.Merge(m, src)
+}
+func (m *QueryConnectionRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionRequest proto.InternalMessageInfo
+
+func (m *QueryConnectionRequest) GetConnectionId() string {
+ if m != nil {
+ return m.ConnectionId
+ }
+ return ""
+}
+
+// QueryConnectionResponse is the response type for the Query/Connection RPC
+// method. Besides the connection end, it includes a proof and the height from
+// which the proof was retrieved.
+type QueryConnectionResponse struct {
+ // connection associated with the request identifier
+ Connection *ConnectionEnd `protobuf:"bytes,1,opt,name=connection,proto3" json:"connection,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryConnectionResponse) Reset() { *m = QueryConnectionResponse{} }
+func (m *QueryConnectionResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionResponse) ProtoMessage() {}
+func (*QueryConnectionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{1}
+}
+func (m *QueryConnectionResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionResponse.Merge(m, src)
+}
+func (m *QueryConnectionResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionResponse proto.InternalMessageInfo
+
+func (m *QueryConnectionResponse) GetConnection() *ConnectionEnd {
+ if m != nil {
+ return m.Connection
+ }
+ return nil
+}
+
+func (m *QueryConnectionResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryConnectionResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryConnectionsRequest is the request type for the Query/Connections RPC
+// method
+type QueryConnectionsRequest struct {
+ Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryConnectionsRequest) Reset() { *m = QueryConnectionsRequest{} }
+func (m *QueryConnectionsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionsRequest) ProtoMessage() {}
+func (*QueryConnectionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{2}
+}
+func (m *QueryConnectionsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionsRequest.Merge(m, src)
+}
+func (m *QueryConnectionsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionsRequest proto.InternalMessageInfo
+
+func (m *QueryConnectionsRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryConnectionsResponse is the response type for the Query/Connections RPC
+// method.
+type QueryConnectionsResponse struct {
+ // list of stored connections of the chain.
+ Connections []*IdentifiedConnection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections,omitempty"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryConnectionsResponse) Reset() { *m = QueryConnectionsResponse{} }
+func (m *QueryConnectionsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionsResponse) ProtoMessage() {}
+func (*QueryConnectionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{3}
+}
+func (m *QueryConnectionsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionsResponse.Merge(m, src)
+}
+func (m *QueryConnectionsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionsResponse proto.InternalMessageInfo
+
+func (m *QueryConnectionsResponse) GetConnections() []*IdentifiedConnection {
+ if m != nil {
+ return m.Connections
+ }
+ return nil
+}
+
+func (m *QueryConnectionsResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+func (m *QueryConnectionsResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryClientConnectionsRequest is the request type for the
+// Query/ClientConnections RPC method
+type QueryClientConnectionsRequest struct {
+ // client identifier associated with a connection
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+}
+
+func (m *QueryClientConnectionsRequest) Reset() { *m = QueryClientConnectionsRequest{} }
+func (m *QueryClientConnectionsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryClientConnectionsRequest) ProtoMessage() {}
+func (*QueryClientConnectionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{4}
+}
+func (m *QueryClientConnectionsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientConnectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientConnectionsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientConnectionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientConnectionsRequest.Merge(m, src)
+}
+func (m *QueryClientConnectionsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientConnectionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientConnectionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientConnectionsRequest proto.InternalMessageInfo
+
+func (m *QueryClientConnectionsRequest) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+// QueryClientConnectionsResponse is the response type for the
+// Query/ClientConnections RPC method
+type QueryClientConnectionsResponse struct {
+ // slice of all the connection paths associated with a client.
+ ConnectionPaths []string `protobuf:"bytes,1,rep,name=connection_paths,json=connectionPaths,proto3" json:"connection_paths,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was generated
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryClientConnectionsResponse) Reset() { *m = QueryClientConnectionsResponse{} }
+func (m *QueryClientConnectionsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryClientConnectionsResponse) ProtoMessage() {}
+func (*QueryClientConnectionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{5}
+}
+func (m *QueryClientConnectionsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientConnectionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientConnectionsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientConnectionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientConnectionsResponse.Merge(m, src)
+}
+func (m *QueryClientConnectionsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientConnectionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientConnectionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientConnectionsResponse proto.InternalMessageInfo
+
+func (m *QueryClientConnectionsResponse) GetConnectionPaths() []string {
+ if m != nil {
+ return m.ConnectionPaths
+ }
+ return nil
+}
+
+func (m *QueryClientConnectionsResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryClientConnectionsResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryConnectionClientStateRequest is the request type for the
+// Query/ConnectionClientState RPC method
+type QueryConnectionClientStateRequest struct {
+ // connection identifier
+ ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"`
+}
+
+func (m *QueryConnectionClientStateRequest) Reset() { *m = QueryConnectionClientStateRequest{} }
+func (m *QueryConnectionClientStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionClientStateRequest) ProtoMessage() {}
+func (*QueryConnectionClientStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{6}
+}
+func (m *QueryConnectionClientStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionClientStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionClientStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionClientStateRequest.Merge(m, src)
+}
+func (m *QueryConnectionClientStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionClientStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionClientStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionClientStateRequest proto.InternalMessageInfo
+
+func (m *QueryConnectionClientStateRequest) GetConnectionId() string {
+ if m != nil {
+ return m.ConnectionId
+ }
+ return ""
+}
+
+// QueryConnectionClientStateResponse is the response type for the
+// Query/ConnectionClientState RPC method
+type QueryConnectionClientStateResponse struct {
+ // client state associated with the channel
+ IdentifiedClientState *types.IdentifiedClientState `protobuf:"bytes,1,opt,name=identified_client_state,json=identifiedClientState,proto3" json:"identified_client_state,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryConnectionClientStateResponse) Reset() { *m = QueryConnectionClientStateResponse{} }
+func (m *QueryConnectionClientStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionClientStateResponse) ProtoMessage() {}
+func (*QueryConnectionClientStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{7}
+}
+func (m *QueryConnectionClientStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionClientStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionClientStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionClientStateResponse.Merge(m, src)
+}
+func (m *QueryConnectionClientStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionClientStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionClientStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionClientStateResponse proto.InternalMessageInfo
+
+func (m *QueryConnectionClientStateResponse) GetIdentifiedClientState() *types.IdentifiedClientState {
+ if m != nil {
+ return m.IdentifiedClientState
+ }
+ return nil
+}
+
+func (m *QueryConnectionClientStateResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryConnectionClientStateResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryConnectionConsensusStateRequest is the request type for the
+// Query/ConnectionConsensusState RPC method
+type QueryConnectionConsensusStateRequest struct {
+ // connection identifier
+ ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"`
+ RevisionNumber uint64 `protobuf:"varint,2,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"`
+ RevisionHeight uint64 `protobuf:"varint,3,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty"`
+}
+
+func (m *QueryConnectionConsensusStateRequest) Reset() { *m = QueryConnectionConsensusStateRequest{} }
+func (m *QueryConnectionConsensusStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionConsensusStateRequest) ProtoMessage() {}
+func (*QueryConnectionConsensusStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{8}
+}
+func (m *QueryConnectionConsensusStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionConsensusStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionConsensusStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionConsensusStateRequest.Merge(m, src)
+}
+func (m *QueryConnectionConsensusStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionConsensusStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionConsensusStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionConsensusStateRequest proto.InternalMessageInfo
+
+func (m *QueryConnectionConsensusStateRequest) GetConnectionId() string {
+ if m != nil {
+ return m.ConnectionId
+ }
+ return ""
+}
+
+func (m *QueryConnectionConsensusStateRequest) GetRevisionNumber() uint64 {
+ if m != nil {
+ return m.RevisionNumber
+ }
+ return 0
+}
+
+func (m *QueryConnectionConsensusStateRequest) GetRevisionHeight() uint64 {
+ if m != nil {
+ return m.RevisionHeight
+ }
+ return 0
+}
+
+// QueryConnectionConsensusStateResponse is the response type for the
+// Query/ConnectionConsensusState RPC method
+type QueryConnectionConsensusStateResponse struct {
+ // consensus state associated with the channel
+ ConsensusState *types1.Any `protobuf:"bytes,1,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"`
+ // client ID associated with the consensus state
+ ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryConnectionConsensusStateResponse) Reset() { *m = QueryConnectionConsensusStateResponse{} }
+func (m *QueryConnectionConsensusStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionConsensusStateResponse) ProtoMessage() {}
+func (*QueryConnectionConsensusStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eaccf9805ea75291, []int{9}
+}
+func (m *QueryConnectionConsensusStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionConsensusStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionConsensusStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionConsensusStateResponse.Merge(m, src)
+}
+func (m *QueryConnectionConsensusStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionConsensusStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionConsensusStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionConsensusStateResponse proto.InternalMessageInfo
+
+func (m *QueryConnectionConsensusStateResponse) GetConsensusState() *types1.Any {
+ if m != nil {
+ return m.ConsensusState
+ }
+ return nil
+}
+
+func (m *QueryConnectionConsensusStateResponse) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *QueryConnectionConsensusStateResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryConnectionConsensusStateResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+func init() {
+ proto.RegisterType((*QueryConnectionRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionRequest")
+ proto.RegisterType((*QueryConnectionResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionResponse")
+ proto.RegisterType((*QueryConnectionsRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionsRequest")
+ proto.RegisterType((*QueryConnectionsResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionsResponse")
+ proto.RegisterType((*QueryClientConnectionsRequest)(nil), "ibcgo.core.connection.v1.QueryClientConnectionsRequest")
+ proto.RegisterType((*QueryClientConnectionsResponse)(nil), "ibcgo.core.connection.v1.QueryClientConnectionsResponse")
+ proto.RegisterType((*QueryConnectionClientStateRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionClientStateRequest")
+ proto.RegisterType((*QueryConnectionClientStateResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionClientStateResponse")
+ proto.RegisterType((*QueryConnectionConsensusStateRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionConsensusStateRequest")
+ proto.RegisterType((*QueryConnectionConsensusStateResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionConsensusStateResponse")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/connection/v1/query.proto", fileDescriptor_eaccf9805ea75291)
+}
+
+var fileDescriptor_eaccf9805ea75291 = []byte{
+ // 889 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
+ 0x14, 0xce, 0xa4, 0xdd, 0xd5, 0x76, 0x52, 0x76, 0x61, 0x94, 0x65, 0x4d, 0x58, 0xb2, 0x5d, 0x6f,
+ 0x4b, 0x5b, 0xaa, 0xce, 0x34, 0xa9, 0x80, 0xaa, 0xbf, 0x80, 0xa0, 0x42, 0x2b, 0x24, 0x54, 0xcc,
+ 0x8d, 0x4b, 0x65, 0x3b, 0x53, 0xc7, 0x52, 0xe3, 0x49, 0x63, 0x27, 0x28, 0xaa, 0x22, 0x24, 0xfe,
+ 0x02, 0x24, 0xae, 0x5c, 0x10, 0x17, 0x4e, 0x5c, 0x39, 0x72, 0x43, 0x3d, 0x56, 0xe2, 0xc2, 0x01,
+ 0x55, 0x55, 0x8b, 0xb8, 0xc3, 0x5f, 0x80, 0x3c, 0x33, 0xae, 0xc7, 0x49, 0xdc, 0x26, 0xd1, 0xf6,
+ 0x96, 0xbc, 0x79, 0x6f, 0xde, 0xf7, 0x7d, 0xef, 0xcd, 0x97, 0xc0, 0x59, 0xd7, 0xb2, 0x1d, 0x46,
+ 0x6c, 0xd6, 0xa4, 0xc4, 0x66, 0x9e, 0x47, 0xed, 0xc0, 0x65, 0x1e, 0x69, 0x97, 0xc8, 0x71, 0x8b,
+ 0x36, 0x3b, 0xb8, 0xd1, 0x64, 0x01, 0x43, 0x1a, 0xcf, 0xc2, 0x61, 0x16, 0x8e, 0xb3, 0x70, 0xbb,
+ 0x54, 0xc8, 0x3b, 0xcc, 0x61, 0x3c, 0x89, 0x84, 0x9f, 0x44, 0x7e, 0xe1, 0x1d, 0x9b, 0xf9, 0x75,
+ 0xe6, 0x13, 0xcb, 0xf4, 0xa9, 0xb8, 0x88, 0xb4, 0x4b, 0x16, 0x0d, 0xcc, 0x12, 0x69, 0x98, 0x8e,
+ 0xeb, 0x99, 0xbc, 0x5c, 0xe4, 0x3e, 0x57, 0x11, 0x1c, 0xb9, 0xd4, 0x0b, 0xc2, 0xee, 0xe2, 0x93,
+ 0x4c, 0x59, 0x4c, 0x05, 0xa9, 0x80, 0x11, 0xa9, 0x4f, 0x1d, 0xc6, 0x9c, 0x23, 0x4a, 0xcc, 0x86,
+ 0x4b, 0x4c, 0xcf, 0x63, 0x01, 0x6f, 0xe5, 0xcb, 0xd3, 0x37, 0xe4, 0x29, 0xff, 0x66, 0xb5, 0x0e,
+ 0x89, 0xe9, 0x49, 0x8a, 0xfa, 0x16, 0x7c, 0xfd, 0x8b, 0x10, 0xe8, 0xc7, 0xd7, 0x37, 0x1a, 0xf4,
+ 0xb8, 0x45, 0xfd, 0x00, 0xbd, 0x80, 0xaf, 0xc4, 0x6d, 0x0e, 0xdc, 0xaa, 0x06, 0x66, 0xc0, 0xc2,
+ 0x94, 0x31, 0x1d, 0x07, 0xf7, 0xaa, 0xfa, 0x6f, 0x00, 0x3e, 0xe9, 0xab, 0xf7, 0x1b, 0xcc, 0xf3,
+ 0x29, 0xfa, 0x14, 0xc2, 0x38, 0x97, 0x57, 0xe7, 0xca, 0xf3, 0x38, 0x4d, 0x52, 0x1c, 0xdf, 0xb0,
+ 0xe3, 0x55, 0x0d, 0xa5, 0x14, 0xe5, 0xe1, 0xbd, 0x46, 0x93, 0xb1, 0x43, 0x2d, 0x3b, 0x03, 0x16,
+ 0xa6, 0x0d, 0xf1, 0x05, 0xed, 0xc0, 0x69, 0xfe, 0xe1, 0xa0, 0x46, 0x5d, 0xa7, 0x16, 0x68, 0x13,
+ 0xbc, 0xc1, 0xd3, 0x44, 0x03, 0xa1, 0x66, 0xbb, 0x84, 0x77, 0x79, 0x4e, 0x65, 0xf2, 0xf4, 0xfc,
+ 0x59, 0xc6, 0xc8, 0xf1, 0x3a, 0x11, 0xd2, 0xcd, 0x3e, 0x02, 0x7e, 0xa4, 0xc0, 0x27, 0x10, 0xc6,
+ 0x63, 0x93, 0x04, 0xde, 0xc6, 0x62, 0xc6, 0x38, 0x9c, 0x31, 0x16, 0xcb, 0x22, 0x67, 0x8c, 0xf7,
+ 0x4d, 0x87, 0xca, 0x5a, 0x43, 0xa9, 0xd4, 0xff, 0x05, 0x50, 0xeb, 0xef, 0x21, 0x55, 0xda, 0x87,
+ 0xb9, 0x98, 0xaa, 0xaf, 0x81, 0x99, 0x89, 0x85, 0x5c, 0x19, 0xa7, 0xcb, 0xb4, 0x57, 0xa5, 0x5e,
+ 0xe0, 0x1e, 0xba, 0xb4, 0xaa, 0x48, 0xae, 0x5e, 0x11, 0xea, 0xae, 0xc0, 0xce, 0x4a, 0xdd, 0x6f,
+ 0x83, 0x2d, 0xe0, 0xa8, 0xb8, 0xd1, 0x3a, 0xbc, 0x3f, 0xb2, 0xb6, 0xb2, 0x42, 0xdf, 0x84, 0x6f,
+ 0x09, 0xca, 0x3c, 0x6d, 0x80, 0xb8, 0x6f, 0xc2, 0x29, 0x71, 0x45, 0xbc, 0x5a, 0x0f, 0x44, 0x60,
+ 0xaf, 0xaa, 0xff, 0x0c, 0x60, 0x31, 0xad, 0x5c, 0xea, 0xb6, 0x08, 0x5f, 0x55, 0xd6, 0xb3, 0x61,
+ 0x06, 0x35, 0x21, 0xde, 0x94, 0xf1, 0x28, 0x8e, 0xef, 0x87, 0xe1, 0xbb, 0xdd, 0x1f, 0x0b, 0x3e,
+ 0xef, 0x99, 0xad, 0xc0, 0xfc, 0x65, 0x60, 0x06, 0xd1, 0x36, 0xa0, 0xad, 0x81, 0x6f, 0xa9, 0xa2,
+ 0xfd, 0x77, 0xfe, 0x2c, 0xdf, 0x31, 0xeb, 0x47, 0xeb, 0x7a, 0xe2, 0x58, 0xef, 0x79, 0x65, 0xff,
+ 0x00, 0xa8, 0xdf, 0xd4, 0x44, 0x4a, 0x62, 0xc3, 0x27, 0xee, 0xf5, 0x76, 0x1c, 0x48, 0x75, 0xfd,
+ 0x30, 0x45, 0x2e, 0xef, 0xd2, 0x60, 0x72, 0xca, 0x4a, 0x29, 0xb7, 0x3e, 0x76, 0x07, 0x85, 0xef,
+ 0x56, 0xcc, 0x5f, 0x01, 0x9c, 0xed, 0x25, 0x1a, 0x52, 0xf3, 0xfc, 0x96, 0xff, 0x12, 0x05, 0x45,
+ 0xf3, 0xf0, 0x51, 0x93, 0xb6, 0x5d, 0x3f, 0x3c, 0xf5, 0x5a, 0x75, 0x8b, 0x36, 0x39, 0x9d, 0x49,
+ 0xe3, 0x61, 0x14, 0xfe, 0x9c, 0x47, 0x13, 0x89, 0x0a, 0x35, 0x25, 0x51, 0x22, 0xbf, 0x00, 0x70,
+ 0xee, 0x16, 0xe4, 0x72, 0x4a, 0x5b, 0x30, 0x5c, 0x50, 0x71, 0x92, 0x98, 0x4e, 0x1e, 0x0b, 0x9b,
+ 0xc6, 0x91, 0x4d, 0xe3, 0x8f, 0xbc, 0x8e, 0xf1, 0xd0, 0x4e, 0x5c, 0x93, 0x7c, 0x37, 0xd9, 0xe4,
+ 0xbb, 0x89, 0x87, 0x33, 0x71, 0xd3, 0x70, 0x26, 0xc7, 0x1a, 0x4e, 0xf9, 0xa7, 0x07, 0xf0, 0x1e,
+ 0xa7, 0x88, 0x7e, 0x01, 0x10, 0xc6, 0x3c, 0xd1, 0x4a, 0xba, 0x5b, 0x0d, 0xfe, 0x6d, 0x29, 0x94,
+ 0x46, 0xa8, 0x10, 0xb2, 0xe9, 0x1b, 0xdf, 0xfe, 0xf1, 0xf7, 0xf7, 0xd9, 0x77, 0xd1, 0x2a, 0x71,
+ 0x2d, 0xfb, 0xe6, 0xdf, 0x44, 0x9f, 0x9c, 0x24, 0xa6, 0xdf, 0x45, 0x3f, 0x02, 0x98, 0x53, 0x4c,
+ 0x04, 0x0d, 0xdf, 0x3f, 0xf2, 0xab, 0x42, 0x79, 0x94, 0x12, 0x89, 0x79, 0x89, 0x63, 0x9e, 0x43,
+ 0x2f, 0x86, 0xc0, 0x8c, 0x7e, 0x07, 0xf0, 0xb5, 0x3e, 0xbb, 0x43, 0xef, 0xdf, 0xd6, 0x36, 0xc5,
+ 0x5f, 0x0b, 0x6b, 0xa3, 0x17, 0x4a, 0xd4, 0xdb, 0x1c, 0xf5, 0x1a, 0x7a, 0x2f, 0x15, 0xb5, 0xd8,
+ 0xbf, 0xa4, 0xe0, 0xd1, 0x4e, 0x76, 0xd1, 0x5f, 0x00, 0x3e, 0x1e, 0x68, 0x54, 0x68, 0x63, 0x68,
+ 0x0d, 0xfb, 0x3d, 0xb4, 0xb0, 0x39, 0x5e, 0xb1, 0x24, 0xb5, 0xcb, 0x49, 0x55, 0xd0, 0x87, 0x63,
+ 0xac, 0x0f, 0x51, 0xad, 0x14, 0xfd, 0x90, 0x85, 0x5a, 0xda, 0x23, 0x47, 0xdb, 0xc3, 0x83, 0x1c,
+ 0xe4, 0x6b, 0x85, 0x0f, 0xc6, 0xae, 0x97, 0x3c, 0xbf, 0xe1, 0x3c, 0x3b, 0xe8, 0xeb, 0xb1, 0x78,
+ 0x26, 0x7d, 0x89, 0x44, 0x1e, 0x47, 0x4e, 0x7a, 0xdc, 0xb2, 0x4b, 0x84, 0x95, 0x28, 0x07, 0x22,
+ 0xd0, 0xad, 0x7c, 0x76, 0x7a, 0x59, 0x04, 0x67, 0x97, 0x45, 0x70, 0x71, 0x59, 0x04, 0xdf, 0x5d,
+ 0x15, 0x33, 0x67, 0x57, 0xc5, 0xcc, 0x9f, 0x57, 0xc5, 0xcc, 0x57, 0x25, 0xc7, 0x0d, 0x6a, 0x2d,
+ 0x0b, 0xdb, 0xac, 0x4e, 0xe4, 0x1f, 0x65, 0xd7, 0xb2, 0x97, 0xa3, 0x7f, 0xb8, 0x2b, 0xab, 0xcb,
+ 0x0a, 0xd2, 0xa0, 0xd3, 0xa0, 0xbe, 0x75, 0x9f, 0x5b, 0xe1, 0xea, 0xff, 0x01, 0x00, 0x00, 0xff,
+ 0xff, 0x02, 0x85, 0x22, 0x9e, 0xaf, 0x0b, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// QueryClient is the client API for Query service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryClient interface {
+ // Connection queries an IBC connection end.
+ Connection(ctx context.Context, in *QueryConnectionRequest, opts ...grpc.CallOption) (*QueryConnectionResponse, error)
+ // Connections queries all the IBC connections of a chain.
+ Connections(ctx context.Context, in *QueryConnectionsRequest, opts ...grpc.CallOption) (*QueryConnectionsResponse, error)
+ // ClientConnections queries the connection paths associated with a client
+ // state.
+ ClientConnections(ctx context.Context, in *QueryClientConnectionsRequest, opts ...grpc.CallOption) (*QueryClientConnectionsResponse, error)
+ // ConnectionClientState queries the client state associated with the
+ // connection.
+ ConnectionClientState(ctx context.Context, in *QueryConnectionClientStateRequest, opts ...grpc.CallOption) (*QueryConnectionClientStateResponse, error)
+ // ConnectionConsensusState queries the consensus state associated with the
+ // connection.
+ ConnectionConsensusState(ctx context.Context, in *QueryConnectionConsensusStateRequest, opts ...grpc.CallOption) (*QueryConnectionConsensusStateResponse, error)
+}
+
+type queryClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewQueryClient(cc grpc1.ClientConn) QueryClient {
+ return &queryClient{cc}
+}
+
+func (c *queryClient) Connection(ctx context.Context, in *QueryConnectionRequest, opts ...grpc.CallOption) (*QueryConnectionResponse, error) {
+ out := new(QueryConnectionResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/Connection", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) Connections(ctx context.Context, in *QueryConnectionsRequest, opts ...grpc.CallOption) (*QueryConnectionsResponse, error) {
+ out := new(QueryConnectionsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/Connections", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ClientConnections(ctx context.Context, in *QueryClientConnectionsRequest, opts ...grpc.CallOption) (*QueryClientConnectionsResponse, error) {
+ out := new(QueryClientConnectionsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ClientConnections", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ConnectionClientState(ctx context.Context, in *QueryConnectionClientStateRequest, opts ...grpc.CallOption) (*QueryConnectionClientStateResponse, error) {
+ out := new(QueryConnectionClientStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ConnectionClientState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ConnectionConsensusState(ctx context.Context, in *QueryConnectionConsensusStateRequest, opts ...grpc.CallOption) (*QueryConnectionConsensusStateResponse, error) {
+ out := new(QueryConnectionConsensusStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ConnectionConsensusState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServer is the server API for Query service.
+type QueryServer interface {
+ // Connection queries an IBC connection end.
+ Connection(context.Context, *QueryConnectionRequest) (*QueryConnectionResponse, error)
+ // Connections queries all the IBC connections of a chain.
+ Connections(context.Context, *QueryConnectionsRequest) (*QueryConnectionsResponse, error)
+ // ClientConnections queries the connection paths associated with a client
+ // state.
+ ClientConnections(context.Context, *QueryClientConnectionsRequest) (*QueryClientConnectionsResponse, error)
+ // ConnectionClientState queries the client state associated with the
+ // connection.
+ ConnectionClientState(context.Context, *QueryConnectionClientStateRequest) (*QueryConnectionClientStateResponse, error)
+ // ConnectionConsensusState queries the consensus state associated with the
+ // connection.
+ ConnectionConsensusState(context.Context, *QueryConnectionConsensusStateRequest) (*QueryConnectionConsensusStateResponse, error)
+}
+
+// UnimplementedQueryServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServer struct {
+}
+
+func (*UnimplementedQueryServer) Connection(ctx context.Context, req *QueryConnectionRequest) (*QueryConnectionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Connection not implemented")
+}
+func (*UnimplementedQueryServer) Connections(ctx context.Context, req *QueryConnectionsRequest) (*QueryConnectionsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Connections not implemented")
+}
+func (*UnimplementedQueryServer) ClientConnections(ctx context.Context, req *QueryClientConnectionsRequest) (*QueryClientConnectionsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ClientConnections not implemented")
+}
+func (*UnimplementedQueryServer) ConnectionClientState(ctx context.Context, req *QueryConnectionClientStateRequest) (*QueryConnectionClientStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionClientState not implemented")
+}
+func (*UnimplementedQueryServer) ConnectionConsensusState(ctx context.Context, req *QueryConnectionConsensusStateRequest) (*QueryConnectionConsensusStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionConsensusState not implemented")
+}
+
+func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
+ s.RegisterService(&_Query_serviceDesc, srv)
+}
+
+func _Query_Connection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConnectionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Connection(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Query/Connection",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Connection(ctx, req.(*QueryConnectionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_Connections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConnectionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Connections(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Query/Connections",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Connections(ctx, req.(*QueryConnectionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ClientConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryClientConnectionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ClientConnections(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Query/ClientConnections",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ClientConnections(ctx, req.(*QueryClientConnectionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ConnectionClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConnectionClientStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ConnectionClientState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Query/ConnectionClientState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ConnectionClientState(ctx, req.(*QueryConnectionClientStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ConnectionConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConnectionConsensusStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ConnectionConsensusState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Query/ConnectionConsensusState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ConnectionConsensusState(ctx, req.(*QueryConnectionConsensusStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Query_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.core.connection.v1.Query",
+ HandlerType: (*QueryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Connection",
+ Handler: _Query_Connection_Handler,
+ },
+ {
+ MethodName: "Connections",
+ Handler: _Query_Connections_Handler,
+ },
+ {
+ MethodName: "ClientConnections",
+ Handler: _Query_ClientConnections_Handler,
+ },
+ {
+ MethodName: "ConnectionClientState",
+ Handler: _Query_ConnectionClientState_Handler,
+ },
+ {
+ MethodName: "ConnectionConsensusState",
+ Handler: _Query_ConnectionConsensusState_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/core/connection/v1/query.proto",
+}
+
+func (m *QueryConnectionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ConnectionId) > 0 {
+ i -= len(m.ConnectionId)
+ copy(dAtA[i:], m.ConnectionId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Connection != nil {
+ {
+ size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Connections) > 0 {
+ for iNdEx := len(m.Connections) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Connections[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientConnectionsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientConnectionsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientConnectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientConnectionsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientConnectionsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientConnectionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ConnectionPaths) > 0 {
+ for iNdEx := len(m.ConnectionPaths) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ConnectionPaths[iNdEx])
+ copy(dAtA[i:], m.ConnectionPaths[iNdEx])
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionPaths[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionClientStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionClientStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ConnectionId) > 0 {
+ i -= len(m.ConnectionId)
+ copy(dAtA[i:], m.ConnectionId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionClientStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionClientStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.IdentifiedClientState != nil {
+ {
+ size, err := m.IdentifiedClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionConsensusStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHeight != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.RevisionHeight))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.RevisionNumber != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.RevisionNumber))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ConnectionId) > 0 {
+ i -= len(m.ConnectionId)
+ copy(dAtA[i:], m.ConnectionId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionConsensusStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
+ offset -= sovQuery(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *QueryConnectionRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConnectionId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryConnectionResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Connection != nil {
+ l = m.Connection.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryConnectionsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryConnectionsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Connections) > 0 {
+ for _, e := range m.Connections {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryClientConnectionsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryClientConnectionsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ConnectionPaths) > 0 {
+ for _, s := range m.ConnectionPaths {
+ l = len(s)
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryConnectionClientStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConnectionId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryConnectionClientStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.IdentifiedClientState != nil {
+ l = m.IdentifiedClientState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryConnectionConsensusStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConnectionId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.RevisionNumber != 0 {
+ n += 1 + sovQuery(uint64(m.RevisionNumber))
+ }
+ if m.RevisionHeight != 0 {
+ n += 1 + sovQuery(uint64(m.RevisionHeight))
+ }
+ return n
+}
+
+func (m *QueryConnectionConsensusStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func sovQuery(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozQuery(x uint64) (n int) {
+ return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *QueryConnectionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Connection == nil {
+ m.Connection = &ConnectionEnd{}
+ }
+ if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Connections", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Connections = append(m.Connections, &IdentifiedConnection{})
+ if err := m.Connections[len(m.Connections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientConnectionsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientConnectionsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientConnectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientConnectionsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientConnectionsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientConnectionsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionPaths", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionPaths = append(m.ConnectionPaths, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionClientStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionClientStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionClientStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionClientStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IdentifiedClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IdentifiedClientState == nil {
+ m.IdentifiedClientState = &types.IdentifiedClientState{}
+ }
+ if err := m.IdentifiedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionConsensusStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionConsensusStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType)
+ }
+ m.RevisionNumber = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionNumber |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType)
+ }
+ m.RevisionHeight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionHeight |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionConsensusStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionConsensusStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types1.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipQuery(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupQuery
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/03-connection/types/query.pb.gw.go b/core/03-connection/types/query.pb.gw.go
new file mode 100644
index 00000000..e597cbeb
--- /dev/null
+++ b/core/03-connection/types/query.pb.gw.go
@@ -0,0 +1,602 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: ibcgo/core/connection/v1/query.proto
+
+/*
+Package types is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package types
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+
+func request_Query_Connection_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id")
+ }
+
+ protoReq.ConnectionId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err)
+ }
+
+ msg, err := client.Connection(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Connection_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id")
+ }
+
+ protoReq.ConnectionId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err)
+ }
+
+ msg, err := server.Connection(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_Connections_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
+)
+
+func request_Query_Connections_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionsRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Connections_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Connections(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Connections_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionsRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Connections_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.Connections(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_ClientConnections_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientConnectionsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ msg, err := client.ClientConnections(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ClientConnections_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientConnectionsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ msg, err := server.ClientConnections(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_ConnectionClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id")
+ }
+
+ protoReq.ConnectionId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err)
+ }
+
+ msg, err := client.ConnectionClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ConnectionClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id")
+ }
+
+ protoReq.ConnectionId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err)
+ }
+
+ msg, err := server.ConnectionClientState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_ConnectionConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id")
+ }
+
+ protoReq.ConnectionId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err)
+ }
+
+ val, ok = pathParams["revision_number"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number")
+ }
+
+ protoReq.RevisionNumber, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err)
+ }
+
+ val, ok = pathParams["revision_height"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height")
+ }
+
+ protoReq.RevisionHeight, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err)
+ }
+
+ msg, err := client.ConnectionConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ConnectionConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id")
+ }
+
+ protoReq.ConnectionId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err)
+ }
+
+ val, ok = pathParams["revision_number"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number")
+ }
+
+ protoReq.RevisionNumber, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err)
+ }
+
+ val, ok = pathParams["revision_height"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height")
+ }
+
+ protoReq.RevisionHeight, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err)
+ }
+
+ msg, err := server.ConnectionConsensusState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
+// UnaryRPC :call QueryServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
+func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
+
+ mux.Handle("GET", pattern_Query_Connection_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Connection_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Connection_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Connections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Connections_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Connections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ClientConnections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ClientConnections_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientConnections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConnectionClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ConnectionClientState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConnectionClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConnectionConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ConnectionConsensusState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConnectionConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterQueryHandler(ctx, mux, conn)
+}
+
+// RegisterQueryHandler registers the http handlers for service Query to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
+}
+
+// RegisterQueryHandlerClient registers the http handlers for service Query
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "QueryClient" to call the correct interceptors.
+func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
+
+ mux.Handle("GET", pattern_Query_Connection_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Connection_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Connection_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Connections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Connections_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Connections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ClientConnections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ClientConnections_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientConnections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConnectionClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ConnectionClientState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConnectionClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConnectionConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ConnectionConsensusState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConnectionConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Query_Connection_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "connection", "v1", "connections", "connection_id"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_Connections_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "connection", "v1", "connections"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ClientConnections_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "connection", "v1", "client_connections", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ConnectionClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"ibc", "core", "connection", "v1", "connections", "connection_id", "client_state"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ConnectionConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 2, 7, 1, 0, 4, 1, 5, 8, 2, 9, 1, 0, 4, 1, 5, 10}, []string{"ibc", "core", "connection", "v1", "connections", "connection_id", "consensus_state", "revision", "revision_number", "height", "revision_height"}, "", runtime.AssumeColonVerbOpt(true)))
+)
+
+var (
+ forward_Query_Connection_0 = runtime.ForwardResponseMessage
+
+ forward_Query_Connections_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ClientConnections_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ConnectionClientState_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ConnectionConsensusState_0 = runtime.ForwardResponseMessage
+)
diff --git a/core/03-connection/types/tx.pb.go b/core/03-connection/types/tx.pb.go
new file mode 100644
index 00000000..ca9b87f5
--- /dev/null
+++ b/core/03-connection/types/tx.pb.go
@@ -0,0 +1,2782 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/connection/v1/tx.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/codec/types"
+ types1 "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgConnectionOpenInit defines the msg sent by an account on Chain A to
+// initialize a connection with Chain B.
+type MsgConnectionOpenInit struct {
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ Counterparty Counterparty `protobuf:"bytes,2,opt,name=counterparty,proto3" json:"counterparty"`
+ Version *Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+ DelayPeriod uint64 `protobuf:"varint,4,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"`
+ Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgConnectionOpenInit) Reset() { *m = MsgConnectionOpenInit{} }
+func (m *MsgConnectionOpenInit) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenInit) ProtoMessage() {}
+func (*MsgConnectionOpenInit) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{0}
+}
+func (m *MsgConnectionOpenInit) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenInit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenInit.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenInit) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenInit.Merge(m, src)
+}
+func (m *MsgConnectionOpenInit) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenInit) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenInit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenInit proto.InternalMessageInfo
+
+// MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
+// type.
+type MsgConnectionOpenInitResponse struct {
+}
+
+func (m *MsgConnectionOpenInitResponse) Reset() { *m = MsgConnectionOpenInitResponse{} }
+func (m *MsgConnectionOpenInitResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenInitResponse) ProtoMessage() {}
+func (*MsgConnectionOpenInitResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{1}
+}
+func (m *MsgConnectionOpenInitResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenInitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenInitResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenInitResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenInitResponse.Merge(m, src)
+}
+func (m *MsgConnectionOpenInitResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenInitResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenInitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenInitResponse proto.InternalMessageInfo
+
+// MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
+// connection on Chain B.
+type MsgConnectionOpenTry struct {
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ // in the case of crossing hello's, when both chains call OpenInit, we need
+ // the connection identifier of the previous connection in state INIT
+ PreviousConnectionId string `protobuf:"bytes,2,opt,name=previous_connection_id,json=previousConnectionId,proto3" json:"previous_connection_id,omitempty" yaml:"previous_connection_id"`
+ ClientState *types.Any `protobuf:"bytes,3,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+ Counterparty Counterparty `protobuf:"bytes,4,opt,name=counterparty,proto3" json:"counterparty"`
+ DelayPeriod uint64 `protobuf:"varint,5,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"`
+ CounterpartyVersions []*Version `protobuf:"bytes,6,rep,name=counterparty_versions,json=counterpartyVersions,proto3" json:"counterparty_versions,omitempty" yaml:"counterparty_versions"`
+ ProofHeight types1.Height `protobuf:"bytes,7,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ // proof of the initialization the connection on Chain A: `UNITIALIZED ->
+ // INIT`
+ ProofInit []byte `protobuf:"bytes,8,opt,name=proof_init,json=proofInit,proto3" json:"proof_init,omitempty" yaml:"proof_init"`
+ // proof of client state included in message
+ ProofClient []byte `protobuf:"bytes,9,opt,name=proof_client,json=proofClient,proto3" json:"proof_client,omitempty" yaml:"proof_client"`
+ // proof of client consensus state
+ ProofConsensus []byte `protobuf:"bytes,10,opt,name=proof_consensus,json=proofConsensus,proto3" json:"proof_consensus,omitempty" yaml:"proof_consensus"`
+ ConsensusHeight types1.Height `protobuf:"bytes,11,opt,name=consensus_height,json=consensusHeight,proto3" json:"consensus_height" yaml:"consensus_height"`
+ Signer string `protobuf:"bytes,12,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgConnectionOpenTry) Reset() { *m = MsgConnectionOpenTry{} }
+func (m *MsgConnectionOpenTry) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenTry) ProtoMessage() {}
+func (*MsgConnectionOpenTry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{2}
+}
+func (m *MsgConnectionOpenTry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenTry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenTry.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenTry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenTry.Merge(m, src)
+}
+func (m *MsgConnectionOpenTry) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenTry) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenTry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenTry proto.InternalMessageInfo
+
+// MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.
+type MsgConnectionOpenTryResponse struct {
+}
+
+func (m *MsgConnectionOpenTryResponse) Reset() { *m = MsgConnectionOpenTryResponse{} }
+func (m *MsgConnectionOpenTryResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenTryResponse) ProtoMessage() {}
+func (*MsgConnectionOpenTryResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{3}
+}
+func (m *MsgConnectionOpenTryResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenTryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenTryResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenTryResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenTryResponse.Merge(m, src)
+}
+func (m *MsgConnectionOpenTryResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenTryResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenTryResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenTryResponse proto.InternalMessageInfo
+
+// MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
+// acknowledge the change of connection state to TRYOPEN on Chain B.
+type MsgConnectionOpenAck struct {
+ ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"`
+ CounterpartyConnectionId string `protobuf:"bytes,2,opt,name=counterparty_connection_id,json=counterpartyConnectionId,proto3" json:"counterparty_connection_id,omitempty" yaml:"counterparty_connection_id"`
+ Version *Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+ ClientState *types.Any `protobuf:"bytes,4,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+ ProofHeight types1.Height `protobuf:"bytes,5,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ // proof of the initialization the connection on Chain B: `UNITIALIZED ->
+ // TRYOPEN`
+ ProofTry []byte `protobuf:"bytes,6,opt,name=proof_try,json=proofTry,proto3" json:"proof_try,omitempty" yaml:"proof_try"`
+ // proof of client state included in message
+ ProofClient []byte `protobuf:"bytes,7,opt,name=proof_client,json=proofClient,proto3" json:"proof_client,omitempty" yaml:"proof_client"`
+ // proof of client consensus state
+ ProofConsensus []byte `protobuf:"bytes,8,opt,name=proof_consensus,json=proofConsensus,proto3" json:"proof_consensus,omitempty" yaml:"proof_consensus"`
+ ConsensusHeight types1.Height `protobuf:"bytes,9,opt,name=consensus_height,json=consensusHeight,proto3" json:"consensus_height" yaml:"consensus_height"`
+ Signer string `protobuf:"bytes,10,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgConnectionOpenAck) Reset() { *m = MsgConnectionOpenAck{} }
+func (m *MsgConnectionOpenAck) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenAck) ProtoMessage() {}
+func (*MsgConnectionOpenAck) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{4}
+}
+func (m *MsgConnectionOpenAck) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenAck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenAck.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenAck) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenAck.Merge(m, src)
+}
+func (m *MsgConnectionOpenAck) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenAck) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenAck.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenAck proto.InternalMessageInfo
+
+// MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.
+type MsgConnectionOpenAckResponse struct {
+}
+
+func (m *MsgConnectionOpenAckResponse) Reset() { *m = MsgConnectionOpenAckResponse{} }
+func (m *MsgConnectionOpenAckResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenAckResponse) ProtoMessage() {}
+func (*MsgConnectionOpenAckResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{5}
+}
+func (m *MsgConnectionOpenAckResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenAckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenAckResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenAckResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenAckResponse.Merge(m, src)
+}
+func (m *MsgConnectionOpenAckResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenAckResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenAckResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenAckResponse proto.InternalMessageInfo
+
+// MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
+// acknowledge the change of connection state to OPEN on Chain A.
+type MsgConnectionOpenConfirm struct {
+ ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"`
+ // proof for the change of the connection state on Chain A: `INIT -> OPEN`
+ ProofAck []byte `protobuf:"bytes,2,opt,name=proof_ack,json=proofAck,proto3" json:"proof_ack,omitempty" yaml:"proof_ack"`
+ ProofHeight types1.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,4,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgConnectionOpenConfirm) Reset() { *m = MsgConnectionOpenConfirm{} }
+func (m *MsgConnectionOpenConfirm) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenConfirm) ProtoMessage() {}
+func (*MsgConnectionOpenConfirm) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{6}
+}
+func (m *MsgConnectionOpenConfirm) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenConfirm.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenConfirm) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenConfirm.Merge(m, src)
+}
+func (m *MsgConnectionOpenConfirm) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenConfirm) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenConfirm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenConfirm proto.InternalMessageInfo
+
+// MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm
+// response type.
+type MsgConnectionOpenConfirmResponse struct {
+}
+
+func (m *MsgConnectionOpenConfirmResponse) Reset() { *m = MsgConnectionOpenConfirmResponse{} }
+func (m *MsgConnectionOpenConfirmResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgConnectionOpenConfirmResponse) ProtoMessage() {}
+func (*MsgConnectionOpenConfirmResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_296ab31199620d78, []int{7}
+}
+func (m *MsgConnectionOpenConfirmResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgConnectionOpenConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgConnectionOpenConfirmResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgConnectionOpenConfirmResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgConnectionOpenConfirmResponse.Merge(m, src)
+}
+func (m *MsgConnectionOpenConfirmResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgConnectionOpenConfirmResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgConnectionOpenConfirmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgConnectionOpenConfirmResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgConnectionOpenInit)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenInit")
+ proto.RegisterType((*MsgConnectionOpenInitResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenInitResponse")
+ proto.RegisterType((*MsgConnectionOpenTry)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenTry")
+ proto.RegisterType((*MsgConnectionOpenTryResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenTryResponse")
+ proto.RegisterType((*MsgConnectionOpenAck)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenAck")
+ proto.RegisterType((*MsgConnectionOpenAckResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenAckResponse")
+ proto.RegisterType((*MsgConnectionOpenConfirm)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenConfirm")
+ proto.RegisterType((*MsgConnectionOpenConfirmResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenConfirmResponse")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/connection/v1/tx.proto", fileDescriptor_296ab31199620d78) }
+
+var fileDescriptor_296ab31199620d78 = []byte{
+ // 913 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0xeb, 0x44,
+ 0x14, 0x8e, 0xf3, 0x9f, 0x49, 0xe0, 0xde, 0x6b, 0x92, 0xd6, 0x84, 0xde, 0x38, 0xb1, 0x04, 0x0a,
+ 0x8b, 0x6b, 0x93, 0x16, 0x81, 0x14, 0xc4, 0x22, 0xc9, 0x86, 0x0a, 0x55, 0x54, 0xa6, 0x02, 0x09,
+ 0x21, 0x45, 0x89, 0x33, 0x75, 0xac, 0x24, 0x1e, 0xcb, 0x76, 0xa2, 0x5a, 0x48, 0x6c, 0x01, 0x89,
+ 0x05, 0x2f, 0x80, 0xd4, 0xb7, 0xe0, 0x15, 0xba, 0xec, 0x92, 0x95, 0x85, 0xda, 0x05, 0xac, 0xfd,
+ 0x04, 0xc8, 0x33, 0xb6, 0x63, 0x27, 0xb6, 0x54, 0x93, 0xb2, 0x9b, 0x33, 0xe7, 0x3b, 0xe7, 0xcc,
+ 0x9c, 0xf3, 0x7d, 0xa3, 0x01, 0x1d, 0x65, 0x2a, 0xc9, 0x48, 0x90, 0x90, 0x0e, 0x05, 0x09, 0xa9,
+ 0x2a, 0x94, 0x4c, 0x05, 0xa9, 0xc2, 0xa6, 0x27, 0x98, 0x37, 0xbc, 0xa6, 0x23, 0x13, 0xd1, 0x0c,
+ 0x86, 0xf0, 0x2e, 0x84, 0xdf, 0x42, 0xf8, 0x4d, 0xaf, 0x59, 0x97, 0x91, 0x8c, 0x30, 0x48, 0x70,
+ 0x57, 0x04, 0xdf, 0x7c, 0x57, 0x46, 0x48, 0x5e, 0x42, 0x01, 0x5b, 0xd3, 0xf5, 0xb5, 0x30, 0x51,
+ 0x2d, 0xcf, 0x15, 0xa9, 0xb6, 0x54, 0xa0, 0x6a, 0xba, 0x95, 0xc8, 0xca, 0x83, 0x7c, 0x98, 0x78,
+ 0xa0, 0x50, 0x6d, 0x0c, 0xe5, 0xfe, 0xc8, 0x82, 0xc6, 0x85, 0x21, 0x8f, 0x82, 0xfd, 0xaf, 0x34,
+ 0xa8, 0x9e, 0xab, 0x8a, 0x49, 0xf7, 0x40, 0x85, 0x24, 0x1d, 0x2b, 0x33, 0x86, 0x6a, 0x53, 0xdd,
+ 0xca, 0xb0, 0xee, 0xd8, 0xec, 0x4b, 0x6b, 0xb2, 0x5a, 0xf6, 0xb9, 0xc0, 0xc5, 0x89, 0x65, 0xb2,
+ 0x3e, 0x9f, 0xd1, 0x97, 0xa0, 0x26, 0xa1, 0xb5, 0x6a, 0x42, 0x5d, 0x9b, 0xe8, 0xa6, 0xc5, 0x64,
+ 0xdb, 0x54, 0xb7, 0x7a, 0xfa, 0x01, 0x9f, 0x74, 0x79, 0x7e, 0x14, 0x42, 0x0f, 0xf3, 0x77, 0x36,
+ 0x9b, 0x11, 0x23, 0x19, 0xe8, 0xcf, 0x40, 0x69, 0x03, 0x75, 0x43, 0x41, 0x2a, 0x93, 0xc3, 0xc9,
+ 0x3a, 0xc9, 0xc9, 0xbe, 0x21, 0x40, 0xd1, 0x8f, 0xa0, 0xfb, 0xa0, 0x36, 0x83, 0xcb, 0x89, 0x35,
+ 0xd6, 0xa0, 0xae, 0xa0, 0x19, 0x93, 0x6f, 0x53, 0xdd, 0xfc, 0xf0, 0xd8, 0xb1, 0xd9, 0x77, 0xc8,
+ 0x25, 0xc2, 0x5e, 0x4e, 0xac, 0x62, 0xf3, 0x12, 0x5b, 0xf4, 0x11, 0x28, 0x1a, 0x8a, 0xac, 0x42,
+ 0x9d, 0x29, 0xb8, 0x57, 0x17, 0x3d, 0xab, 0x5f, 0xfe, 0xf9, 0x96, 0xcd, 0xfc, 0x73, 0xcb, 0x66,
+ 0x38, 0x16, 0xbc, 0x8e, 0x6d, 0x9c, 0x08, 0x0d, 0x0d, 0xa9, 0x06, 0xe4, 0x7e, 0x2f, 0x81, 0xfa,
+ 0x1e, 0xe2, 0x4a, 0xb7, 0xfe, 0x4b, 0x67, 0xbf, 0x05, 0x47, 0x9a, 0x0e, 0x37, 0x0a, 0x5a, 0x1b,
+ 0xe3, 0xed, 0xad, 0xdd, 0xf8, 0x2c, 0x8e, 0xef, 0x38, 0x36, 0xfb, 0x9a, 0xc4, 0xc7, 0xe3, 0x38,
+ 0xb1, 0xee, 0x3b, 0xb6, 0x07, 0xf2, 0x46, 0x46, 0x0a, 0x1a, 0xe6, 0xc4, 0x84, 0x5e, 0x97, 0xeb,
+ 0x3c, 0xe1, 0x1f, 0xef, 0xf3, 0x8f, 0x1f, 0xa8, 0x56, 0xb8, 0x73, 0xe1, 0x18, 0x4e, 0xac, 0x12,
+ 0xf3, 0x6b, 0xd7, 0xda, 0x23, 0x41, 0xfe, 0x60, 0x12, 0xec, 0xce, 0xb1, 0x90, 0x62, 0x8e, 0x37,
+ 0xa0, 0x11, 0xce, 0x35, 0xf6, 0xb8, 0x61, 0x30, 0xc5, 0x76, 0xee, 0x49, 0x74, 0x1a, 0xb6, 0x1d,
+ 0x9b, 0x3d, 0xf1, 0x6e, 0x1d, 0x97, 0x89, 0x13, 0xeb, 0xe1, 0x7d, 0x2f, 0xcc, 0xa0, 0xbf, 0x07,
+ 0x35, 0x4d, 0x47, 0xe8, 0x7a, 0x3c, 0x87, 0x8a, 0x3c, 0x37, 0x99, 0x12, 0xee, 0xc3, 0x49, 0xa4,
+ 0x20, 0x11, 0xed, 0xa6, 0xc7, 0x7f, 0x81, 0x31, 0xc3, 0xf7, 0xdc, 0xdb, 0x6f, 0xef, 0x15, 0x8e,
+ 0xe7, 0xc4, 0x2a, 0x36, 0x09, 0x92, 0xfe, 0x18, 0x00, 0xe2, 0x55, 0x54, 0xc5, 0x64, 0xca, 0x6d,
+ 0xaa, 0x5b, 0x1b, 0x36, 0x1c, 0x9b, 0x7d, 0x15, 0x8e, 0x74, 0x7d, 0x9c, 0x58, 0xc1, 0x06, 0xd6,
+ 0x74, 0xdf, 0x3f, 0x13, 0xa9, 0xcc, 0x54, 0x70, 0xdc, 0xf1, 0x6e, 0x45, 0xe2, 0xf5, 0x2b, 0x8e,
+ 0xb0, 0x45, 0x8f, 0xc0, 0x0b, 0xcf, 0xeb, 0xb2, 0x5b, 0x35, 0xd6, 0x06, 0x03, 0x70, 0x78, 0xd3,
+ 0xb1, 0xd9, 0xa3, 0x48, 0xb8, 0x0f, 0xe0, 0xc4, 0xb7, 0x49, 0x06, 0x7f, 0x83, 0x9e, 0x83, 0x97,
+ 0x81, 0xd7, 0x6f, 0x4c, 0xf5, 0x09, 0x8d, 0x61, 0xbd, 0xc6, 0x1c, 0xfb, 0x83, 0x88, 0xe6, 0xe0,
+ 0xc4, 0x17, 0xc1, 0x96, 0xd7, 0xa0, 0xad, 0x80, 0x6b, 0x09, 0x02, 0x6e, 0x81, 0x93, 0x38, 0x79,
+ 0x06, 0xfa, 0xfd, 0xbb, 0x10, 0xa3, 0xdf, 0x81, 0xb4, 0xa0, 0x3f, 0x07, 0x6f, 0x45, 0x35, 0x48,
+ 0x34, 0xcc, 0x38, 0x36, 0x5b, 0x0f, 0xce, 0x17, 0x96, 0x5e, 0x4d, 0x0a, 0x4b, 0x4e, 0x02, 0xcd,
+ 0x08, 0x91, 0xe2, 0xf4, 0xfc, 0xbe, 0x63, 0xb3, 0x9d, 0x18, 0xd2, 0xed, 0x24, 0x66, 0xc2, 0xce,
+ 0x88, 0xae, 0x0f, 0x7a, 0x38, 0x77, 0x1f, 0x85, 0xfc, 0xc1, 0x8f, 0xc2, 0xae, 0x18, 0x0a, 0xcf,
+ 0x2a, 0x86, 0x1e, 0x20, 0x1c, 0x1f, 0x9b, 0xba, 0xc5, 0x14, 0x31, 0x29, 0x43, 0x0f, 0x6a, 0xe0,
+ 0xe2, 0xc4, 0x32, 0x5e, 0xbb, 0x6f, 0xf0, 0xae, 0x12, 0x4a, 0x87, 0x29, 0xa1, 0xfc, 0x2c, 0x4a,
+ 0xa8, 0xfc, 0xcf, 0x4a, 0x00, 0x29, 0x94, 0x30, 0x90, 0x16, 0x81, 0x12, 0x7e, 0xcd, 0x02, 0x66,
+ 0x0f, 0x30, 0x42, 0xea, 0xb5, 0xa2, 0xaf, 0x0e, 0x55, 0x43, 0x30, 0xbb, 0x89, 0xb4, 0xc0, 0xe4,
+ 0x8f, 0x99, 0xdd, 0x44, 0x5a, 0xf8, 0xb3, 0x73, 0xf5, 0xb7, 0x4b, 0xa6, 0xdc, 0xb3, 0x92, 0x69,
+ 0xdb, 0xae, 0x7c, 0x42, 0xbb, 0x38, 0xd0, 0x4e, 0xea, 0x86, 0xdf, 0xb2, 0xd3, 0x5f, 0xf2, 0x20,
+ 0x77, 0x61, 0xc8, 0xf4, 0x8f, 0x80, 0x8e, 0xf9, 0x5b, 0x09, 0xc9, 0x62, 0x8c, 0xfd, 0x53, 0x34,
+ 0x3f, 0x4d, 0x19, 0xe0, 0x9f, 0x83, 0xfe, 0x01, 0xbc, 0xda, 0xff, 0x80, 0xf0, 0x29, 0xb2, 0x5d,
+ 0xe9, 0x56, 0xf3, 0x93, 0x74, 0xf8, 0xe4, 0xe2, 0xee, 0xf4, 0xd2, 0x14, 0x1f, 0x48, 0x8b, 0x54,
+ 0xc5, 0x43, 0xa4, 0xa5, 0x7f, 0xa2, 0x40, 0x23, 0x9e, 0xb1, 0xa7, 0x29, 0x32, 0x7a, 0x31, 0xcd,
+ 0x7e, 0xfa, 0x18, 0xff, 0x24, 0xc3, 0x2f, 0xef, 0x1e, 0x5a, 0xd4, 0xfd, 0x43, 0x8b, 0xfa, 0xeb,
+ 0xa1, 0x45, 0xfd, 0xf6, 0xd8, 0xca, 0xdc, 0x3f, 0xb6, 0x32, 0x7f, 0x3e, 0xb6, 0x32, 0xdf, 0xf5,
+ 0x64, 0xc5, 0x9c, 0xaf, 0xa7, 0xbc, 0x84, 0x56, 0x82, 0x84, 0x8c, 0x15, 0x32, 0x04, 0x65, 0x2a,
+ 0xbd, 0xf1, 0xff, 0xee, 0x1f, 0x9d, 0xbd, 0x09, 0x7d, 0xdf, 0x4d, 0x4b, 0x83, 0xc6, 0xb4, 0x88,
+ 0x5f, 0xdf, 0xb3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x0e, 0x60, 0x2e, 0x75, 0x0c, 0x00,
+ 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.
+ ConnectionOpenInit(ctx context.Context, in *MsgConnectionOpenInit, opts ...grpc.CallOption) (*MsgConnectionOpenInitResponse, error)
+ // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.
+ ConnectionOpenTry(ctx context.Context, in *MsgConnectionOpenTry, opts ...grpc.CallOption) (*MsgConnectionOpenTryResponse, error)
+ // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.
+ ConnectionOpenAck(ctx context.Context, in *MsgConnectionOpenAck, opts ...grpc.CallOption) (*MsgConnectionOpenAckResponse, error)
+ // ConnectionOpenConfirm defines a rpc handler method for
+ // MsgConnectionOpenConfirm.
+ ConnectionOpenConfirm(ctx context.Context, in *MsgConnectionOpenConfirm, opts ...grpc.CallOption) (*MsgConnectionOpenConfirmResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn) MsgClient {
+ return &msgClient{cc}
+}
+
+func (c *msgClient) ConnectionOpenInit(ctx context.Context, in *MsgConnectionOpenInit, opts ...grpc.CallOption) (*MsgConnectionOpenInitResponse, error) {
+ out := new(MsgConnectionOpenInitResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenInit", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ConnectionOpenTry(ctx context.Context, in *MsgConnectionOpenTry, opts ...grpc.CallOption) (*MsgConnectionOpenTryResponse, error) {
+ out := new(MsgConnectionOpenTryResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenTry", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ConnectionOpenAck(ctx context.Context, in *MsgConnectionOpenAck, opts ...grpc.CallOption) (*MsgConnectionOpenAckResponse, error) {
+ out := new(MsgConnectionOpenAckResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenAck", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ConnectionOpenConfirm(ctx context.Context, in *MsgConnectionOpenConfirm, opts ...grpc.CallOption) (*MsgConnectionOpenConfirmResponse, error) {
+ out := new(MsgConnectionOpenConfirmResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenConfirm", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.
+ ConnectionOpenInit(context.Context, *MsgConnectionOpenInit) (*MsgConnectionOpenInitResponse, error)
+ // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.
+ ConnectionOpenTry(context.Context, *MsgConnectionOpenTry) (*MsgConnectionOpenTryResponse, error)
+ // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.
+ ConnectionOpenAck(context.Context, *MsgConnectionOpenAck) (*MsgConnectionOpenAckResponse, error)
+ // ConnectionOpenConfirm defines a rpc handler method for
+ // MsgConnectionOpenConfirm.
+ ConnectionOpenConfirm(context.Context, *MsgConnectionOpenConfirm) (*MsgConnectionOpenConfirmResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer) ConnectionOpenInit(ctx context.Context, req *MsgConnectionOpenInit) (*MsgConnectionOpenInitResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenInit not implemented")
+}
+func (*UnimplementedMsgServer) ConnectionOpenTry(ctx context.Context, req *MsgConnectionOpenTry) (*MsgConnectionOpenTryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenTry not implemented")
+}
+func (*UnimplementedMsgServer) ConnectionOpenAck(ctx context.Context, req *MsgConnectionOpenAck) (*MsgConnectionOpenAckResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenAck not implemented")
+}
+func (*UnimplementedMsgServer) ConnectionOpenConfirm(ctx context.Context, req *MsgConnectionOpenConfirm) (*MsgConnectionOpenConfirmResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenConfirm not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_ConnectionOpenInit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgConnectionOpenInit)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ConnectionOpenInit(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenInit",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ConnectionOpenInit(ctx, req.(*MsgConnectionOpenInit))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ConnectionOpenTry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgConnectionOpenTry)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ConnectionOpenTry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenTry",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ConnectionOpenTry(ctx, req.(*MsgConnectionOpenTry))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ConnectionOpenAck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgConnectionOpenAck)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ConnectionOpenAck(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenAck",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ConnectionOpenAck(ctx, req.(*MsgConnectionOpenAck))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ConnectionOpenConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgConnectionOpenConfirm)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ConnectionOpenConfirm(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenConfirm",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ConnectionOpenConfirm(ctx, req.(*MsgConnectionOpenConfirm))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.core.connection.v1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ConnectionOpenInit",
+ Handler: _Msg_ConnectionOpenInit_Handler,
+ },
+ {
+ MethodName: "ConnectionOpenTry",
+ Handler: _Msg_ConnectionOpenTry_Handler,
+ },
+ {
+ MethodName: "ConnectionOpenAck",
+ Handler: _Msg_ConnectionOpenAck_Handler,
+ },
+ {
+ MethodName: "ConnectionOpenConfirm",
+ Handler: _Msg_ConnectionOpenConfirm_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/core/connection/v1/tx.proto",
+}
+
+func (m *MsgConnectionOpenInit) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenInit) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenInit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DelayPeriod != 0 {
+ i = encodeVarintTx(dAtA, i, uint64(m.DelayPeriod))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Version != nil {
+ {
+ size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenInitResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenInitResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenInitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenTry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenTry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenTry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x62
+ }
+ {
+ size, err := m.ConsensusHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ if len(m.ProofConsensus) > 0 {
+ i -= len(m.ProofConsensus)
+ copy(dAtA[i:], m.ProofConsensus)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofConsensus)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.ProofClient) > 0 {
+ i -= len(m.ProofClient)
+ copy(dAtA[i:], m.ProofClient)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofClient)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.ProofInit) > 0 {
+ i -= len(m.ProofInit)
+ copy(dAtA[i:], m.ProofInit)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofInit)))
+ i--
+ dAtA[i] = 0x42
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ if len(m.CounterpartyVersions) > 0 {
+ for iNdEx := len(m.CounterpartyVersions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.CounterpartyVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if m.DelayPeriod != 0 {
+ i = encodeVarintTx(dAtA, i, uint64(m.DelayPeriod))
+ i--
+ dAtA[i] = 0x28
+ }
+ {
+ size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.PreviousConnectionId) > 0 {
+ i -= len(m.PreviousConnectionId)
+ copy(dAtA[i:], m.PreviousConnectionId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PreviousConnectionId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenTryResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenTryResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenTryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenAck) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenAck) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenAck) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x52
+ }
+ {
+ size, err := m.ConsensusHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ if len(m.ProofConsensus) > 0 {
+ i -= len(m.ProofConsensus)
+ copy(dAtA[i:], m.ProofConsensus)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofConsensus)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.ProofClient) > 0 {
+ i -= len(m.ProofClient)
+ copy(dAtA[i:], m.ProofClient)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofClient)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.ProofTry) > 0 {
+ i -= len(m.ProofTry)
+ copy(dAtA[i:], m.ProofTry)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofTry)))
+ i--
+ dAtA[i] = 0x32
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Version != nil {
+ {
+ size, err := m.Version.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.CounterpartyConnectionId) > 0 {
+ i -= len(m.CounterpartyConnectionId)
+ copy(dAtA[i:], m.CounterpartyConnectionId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyConnectionId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ConnectionId) > 0 {
+ i -= len(m.ConnectionId)
+ copy(dAtA[i:], m.ConnectionId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ConnectionId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenAckResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenAckResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenAckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenConfirm) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenConfirm) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.ProofAck) > 0 {
+ i -= len(m.ProofAck)
+ copy(dAtA[i:], m.ProofAck)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofAck)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ConnectionId) > 0 {
+ i -= len(m.ConnectionId)
+ copy(dAtA[i:], m.ConnectionId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ConnectionId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgConnectionOpenConfirmResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgConnectionOpenConfirmResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgConnectionOpenConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MsgConnectionOpenInit) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Counterparty.Size()
+ n += 1 + l + sovTx(uint64(l))
+ if m.Version != nil {
+ l = m.Version.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.DelayPeriod != 0 {
+ n += 1 + sovTx(uint64(m.DelayPeriod))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgConnectionOpenInitResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgConnectionOpenTry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.PreviousConnectionId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Counterparty.Size()
+ n += 1 + l + sovTx(uint64(l))
+ if m.DelayPeriod != 0 {
+ n += 1 + sovTx(uint64(m.DelayPeriod))
+ }
+ if len(m.CounterpartyVersions) > 0 {
+ for _, e := range m.CounterpartyVersions {
+ l = e.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.ProofInit)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofClient)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofConsensus)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ConsensusHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgConnectionOpenTryResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgConnectionOpenAck) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConnectionId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.CounterpartyConnectionId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.Version != nil {
+ l = m.Version.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.ProofTry)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofClient)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofConsensus)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ConsensusHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgConnectionOpenAckResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgConnectionOpenConfirm) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConnectionId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofAck)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgConnectionOpenConfirmResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MsgConnectionOpenInit) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenInit: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenInit: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Version == nil {
+ m.Version = &Version{}
+ }
+ if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType)
+ }
+ m.DelayPeriod = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DelayPeriod |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenInitResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenInitResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenInitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenTry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenTry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenTry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreviousConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PreviousConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType)
+ }
+ m.DelayPeriod = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DelayPeriod |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyVersions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CounterpartyVersions = append(m.CounterpartyVersions, &Version{})
+ if err := m.CounterpartyVersions[len(m.CounterpartyVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofInit", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofInit = append(m.ProofInit[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofInit == nil {
+ m.ProofInit = []byte{}
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofClient", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofClient = append(m.ProofClient[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofClient == nil {
+ m.ProofClient = []byte{}
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofConsensus", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofConsensus = append(m.ProofConsensus[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofConsensus == nil {
+ m.ProofConsensus = []byte{}
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConsensusHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenTryResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenTryResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenTryResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenAck) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenAck: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenAck: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CounterpartyConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Version == nil {
+ m.Version = &Version{}
+ }
+ if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofTry", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofTry = append(m.ProofTry[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofTry == nil {
+ m.ProofTry = []byte{}
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofClient", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofClient = append(m.ProofClient[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofClient == nil {
+ m.ProofClient = []byte{}
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofConsensus", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofConsensus = append(m.ProofConsensus[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofConsensus == nil {
+ m.ProofConsensus = []byte{}
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConsensusHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenAckResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenAckResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenAckResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenConfirm) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenConfirm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenConfirm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofAck", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofAck = append(m.ProofAck[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofAck == nil {
+ m.ProofAck = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgConnectionOpenConfirmResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgConnectionOpenConfirmResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgConnectionOpenConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/03-connection/types/version.go b/core/03-connection/types/version.go
new file mode 100644
index 00000000..10c5b33d
--- /dev/null
+++ b/core/03-connection/types/version.go
@@ -0,0 +1,220 @@
+package types
+
+import (
+ "strings"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ // DefaultIBCVersion represents the latest supported version of IBC used
+ // in connection version negotiation. The current version supports only
+ // ORDERED and UNORDERED channels and requires at least one channel type
+ // to be agreed upon.
+ DefaultIBCVersion = NewVersion(DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED", "ORDER_UNORDERED"})
+
+ // DefaultIBCVersionIdentifier is the IBC v1.0.0 protocol version identifier
+ DefaultIBCVersionIdentifier = "1"
+
+ // AllowNilFeatureSet is a helper map to indicate if a specified version
+ // identifier is allowed to have a nil feature set. Any versions supported,
+ // but not included in the map default to not supporting nil feature sets.
+ allowNilFeatureSet = map[string]bool{
+ DefaultIBCVersionIdentifier: false,
+ }
+)
+
+var _ exported.Version = &Version{}
+
+// NewVersion returns a new instance of Version.
+func NewVersion(identifier string, features []string) *Version {
+ return &Version{
+ Identifier: identifier,
+ Features: features,
+ }
+}
+
+// GetIdentifier implements the VersionI interface
+func (version Version) GetIdentifier() string {
+ return version.Identifier
+}
+
+// GetFeatures implements the VersionI interface
+func (version Version) GetFeatures() []string {
+ return version.Features
+}
+
+// ValidateVersion does basic validation of the version identifier and
+// features. It unmarshals the version string into a Version object.
+func ValidateVersion(version *Version) error {
+ if version == nil {
+ return sdkerrors.Wrap(ErrInvalidVersion, "version cannot be nil")
+ }
+ if strings.TrimSpace(version.Identifier) == "" {
+ return sdkerrors.Wrap(ErrInvalidVersion, "version identifier cannot be blank")
+ }
+ for i, feature := range version.Features {
+ if strings.TrimSpace(feature) == "" {
+ return sdkerrors.Wrapf(ErrInvalidVersion, "feature cannot be blank, index %d", i)
+ }
+ }
+
+ return nil
+}
+
+// VerifyProposedVersion verifies that the entire feature set in the
+// proposed version is supported by this chain. If the feature set is
+// empty it verifies that this is allowed for the specified version
+// identifier.
+func (version Version) VerifyProposedVersion(proposedVersion exported.Version) error {
+ if proposedVersion.GetIdentifier() != version.GetIdentifier() {
+ return sdkerrors.Wrapf(
+ ErrVersionNegotiationFailed,
+ "proposed version identifier does not equal supported version identifier (%s != %s)", proposedVersion.GetIdentifier(), version.GetIdentifier(),
+ )
+ }
+
+ if len(proposedVersion.GetFeatures()) == 0 && !allowNilFeatureSet[proposedVersion.GetIdentifier()] {
+ return sdkerrors.Wrapf(
+ ErrVersionNegotiationFailed,
+ "nil feature sets are not supported for version identifier (%s)", proposedVersion.GetIdentifier(),
+ )
+ }
+
+ for _, proposedFeature := range proposedVersion.GetFeatures() {
+ if !contains(proposedFeature, version.GetFeatures()) {
+ return sdkerrors.Wrapf(
+ ErrVersionNegotiationFailed,
+ "proposed feature (%s) is not a supported feature set (%s)", proposedFeature, version.GetFeatures(),
+ )
+ }
+ }
+
+ return nil
+}
+
+// VerifySupportedFeature takes in a version and feature string and returns
+// true if the feature is supported by the version and false otherwise.
+func VerifySupportedFeature(version exported.Version, feature string) bool {
+ for _, f := range version.GetFeatures() {
+ if f == feature {
+ return true
+ }
+ }
+ return false
+}
+
+// GetCompatibleVersions returns a descending ordered set of compatible IBC
+// versions for the caller chain's connection end. The latest supported
+// version should be first element and the set should descend to the oldest
+// supported version.
+func GetCompatibleVersions() []exported.Version {
+ return []exported.Version{DefaultIBCVersion}
+}
+
+// IsSupportedVersion returns true if the proposed version has a matching version
+// identifier and its entire feature set is supported or the version identifier
+// supports an empty feature set.
+func IsSupportedVersion(proposedVersion *Version) bool {
+ supportedVersion, found := FindSupportedVersion(proposedVersion, GetCompatibleVersions())
+ if !found {
+ return false
+ }
+
+ if err := supportedVersion.VerifyProposedVersion(proposedVersion); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// FindSupportedVersion returns the version with a matching version identifier
+// if it exists. The returned boolean is true if the version is found and
+// false otherwise.
+func FindSupportedVersion(version exported.Version, supportedVersions []exported.Version) (exported.Version, bool) {
+ for _, supportedVersion := range supportedVersions {
+ if version.GetIdentifier() == supportedVersion.GetIdentifier() {
+ return supportedVersion, true
+ }
+ }
+ return nil, false
+}
+
+// PickVersion iterates over the descending ordered set of compatible IBC
+// versions and selects the first version with a version identifier that is
+// supported by the counterparty. The returned version contains a feature
+// set with the intersection of the features supported by the source and
+// counterparty chains. If the feature set intersection is nil and this is
+// not allowed for the chosen version identifier then the search for a
+// compatible version continues. This function is called in the ConnOpenTry
+// handshake procedure.
+//
+// CONTRACT: PickVersion must only provide a version that is in the
+// intersection of the supported versions and the counterparty versions.
+func PickVersion(supportedVersions, counterpartyVersions []exported.Version) (*Version, error) {
+ for _, supportedVersion := range supportedVersions {
+ // check if the source version is supported by the counterparty
+ if counterpartyVersion, found := FindSupportedVersion(supportedVersion, counterpartyVersions); found {
+ featureSet := GetFeatureSetIntersection(supportedVersion.GetFeatures(), counterpartyVersion.GetFeatures())
+ if len(featureSet) == 0 && !allowNilFeatureSet[supportedVersion.GetIdentifier()] {
+ continue
+ }
+
+ return NewVersion(supportedVersion.GetIdentifier(), featureSet), nil
+ }
+ }
+
+ return nil, sdkerrors.Wrapf(
+ ErrVersionNegotiationFailed,
+ "failed to find a matching counterparty version (%v) from the supported version list (%v)", counterpartyVersions, supportedVersions,
+ )
+}
+
+// GetFeatureSetIntersection returns the intersections of source feature set
+// and the counterparty feature set. This is done by iterating over all the
+// features in the source version and seeing if they exist in the feature
+// set for the counterparty version.
+func GetFeatureSetIntersection(sourceFeatureSet, counterpartyFeatureSet []string) (featureSet []string) {
+ for _, feature := range sourceFeatureSet {
+ if contains(feature, counterpartyFeatureSet) {
+ featureSet = append(featureSet, feature)
+ }
+ }
+
+ return featureSet
+}
+
+// ExportedVersionsToProto casts a slice of the Version interface to a slice
+// of the Version proto definition.
+func ExportedVersionsToProto(exportedVersions []exported.Version) []*Version {
+ versions := make([]*Version, len(exportedVersions))
+ for i := range exportedVersions {
+ versions[i] = exportedVersions[i].(*Version)
+ }
+
+ return versions
+}
+
+// ProtoVersionsToExported converts a slice of the Version proto definition to
+// the Version interface.
+func ProtoVersionsToExported(versions []*Version) []exported.Version {
+ exportedVersions := make([]exported.Version, len(versions))
+ for i := range versions {
+ exportedVersions[i] = versions[i]
+ }
+
+ return exportedVersions
+}
+
+// contains returns true if the provided string element exists within the
+// string set.
+func contains(elem string, set []string) bool {
+ for _, element := range set {
+ if elem == element {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/core/03-connection/types/version_test.go b/core/03-connection/types/version_test.go
new file mode 100644
index 00000000..8f882dd3
--- /dev/null
+++ b/core/03-connection/types/version_test.go
@@ -0,0 +1,167 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func TestValidateVersion(t *testing.T) {
+ testCases := []struct {
+ name string
+ version *types.Version
+ expPass bool
+ }{
+ {"valid version", types.DefaultIBCVersion, true},
+ {"valid empty feature set", types.NewVersion(types.DefaultIBCVersionIdentifier, []string{}), true},
+ {"empty version identifier", types.NewVersion(" ", []string{"ORDER_UNORDERED"}), false},
+ {"empty feature", types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_UNORDERED", " "}), false},
+ }
+
+ for i, tc := range testCases {
+ err := types.ValidateVersion(tc.version)
+
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+func TestIsSupportedVersion(t *testing.T) {
+ testCases := []struct {
+ name string
+ version *types.Version
+ expPass bool
+ }{
+ {
+ "version is supported",
+ types.ExportedVersionsToProto(types.GetCompatibleVersions())[0],
+ true,
+ },
+ {
+ "version is not supported",
+ &types.Version{},
+ false,
+ },
+ {
+ "version feature is not supported",
+ types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_DAG"}),
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ require.Equal(t, tc.expPass, types.IsSupportedVersion(tc.version))
+ }
+}
+
+func TestFindSupportedVersion(t *testing.T) {
+ testCases := []struct {
+ name string
+ version *types.Version
+ supportedVersions []exported.Version
+ expVersion *types.Version
+ expFound bool
+ }{
+ {"valid supported version", types.DefaultIBCVersion, types.GetCompatibleVersions(), types.DefaultIBCVersion, true},
+ {"empty (invalid) version", &types.Version{}, types.GetCompatibleVersions(), &types.Version{}, false},
+ {"empty supported versions", types.DefaultIBCVersion, []exported.Version{}, &types.Version{}, false},
+ {"desired version is last", types.DefaultIBCVersion, []exported.Version{types.NewVersion("1.1", nil), types.NewVersion("2", []string{"ORDER_UNORDERED"}), types.NewVersion("3", nil), types.DefaultIBCVersion}, types.DefaultIBCVersion, true},
+ {"desired version identifier with different feature set", types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_DAG"}), types.GetCompatibleVersions(), types.DefaultIBCVersion, true},
+ {"version not supported", types.NewVersion("2", []string{"ORDER_DAG"}), types.GetCompatibleVersions(), &types.Version{}, false},
+ }
+
+ for i, tc := range testCases {
+ version, found := types.FindSupportedVersion(tc.version, tc.supportedVersions)
+ if tc.expFound {
+ require.Equal(t, tc.expVersion.GetIdentifier(), version.GetIdentifier(), "test case %d: %s", i, tc.name)
+ require.True(t, found, "test case %d: %s", i, tc.name)
+ } else {
+ require.False(t, found, "test case: %s", tc.name)
+ require.Nil(t, version, "test case: %s", tc.name)
+ }
+ }
+}
+
+func TestPickVersion(t *testing.T) {
+ testCases := []struct {
+ name string
+ supportedVersions []exported.Version
+ counterpartyVersions []exported.Version
+ expVer *types.Version
+ expPass bool
+ }{
+ {"valid default ibc version", types.GetCompatibleVersions(), types.GetCompatibleVersions(), types.DefaultIBCVersion, true},
+ {"valid version in counterparty versions", types.GetCompatibleVersions(), []exported.Version{types.NewVersion("version1", nil), types.NewVersion("2.0.0", []string{"ORDER_UNORDERED-ZK"}), types.DefaultIBCVersion}, types.DefaultIBCVersion, true},
+ {"valid identifier match but empty feature set not allowed", types.GetCompatibleVersions(), []exported.Version{types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"DAG", "ORDERED-ZK", "UNORDERED-zk]"})}, types.NewVersion(types.DefaultIBCVersionIdentifier, nil), false},
+ {"empty counterparty versions", types.GetCompatibleVersions(), []exported.Version{}, &types.Version{}, false},
+ {"non-matching counterparty versions", types.GetCompatibleVersions(), []exported.Version{types.NewVersion("2.0.0", nil)}, &types.Version{}, false},
+ {"non-matching counterparty versions (uses ordered channels only) contained in supported versions (uses unordered channels only)", []exported.Version{types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_UNORDERED"})}, []exported.Version{types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED"})}, &types.Version{}, false},
+ }
+
+ for i, tc := range testCases {
+ version, err := types.PickVersion(tc.supportedVersions, tc.counterpartyVersions)
+
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ var emptyVersion *types.Version
+ require.Equal(t, emptyVersion, version, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+func TestVerifyProposedVersion(t *testing.T) {
+ testCases := []struct {
+ name string
+ proposedVersion *types.Version
+ supportedVersion *types.Version
+ expPass bool
+ }{
+ {"entire feature set supported", types.DefaultIBCVersion, types.NewVersion("1", []string{"ORDER_ORDERED", "ORDER_UNORDERED", "ORDER_DAG"}), true},
+ {"empty feature sets not supported", types.NewVersion("1", []string{}), types.DefaultIBCVersion, false},
+ {"one feature missing", types.DefaultIBCVersion, types.NewVersion("1", []string{"ORDER_UNORDERED", "ORDER_DAG"}), false},
+ {"both features missing", types.DefaultIBCVersion, types.NewVersion("1", []string{"ORDER_DAG"}), false},
+ {"identifiers do not match", types.NewVersion("2", []string{"ORDER_UNORDERED", "ORDER_ORDERED"}), types.DefaultIBCVersion, false},
+ }
+
+ for i, tc := range testCases {
+ err := tc.supportedVersion.VerifyProposedVersion(tc.proposedVersion)
+
+ if tc.expPass {
+ require.NoError(t, err, "test case %d: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "test case %d: %s", i, tc.name)
+ }
+ }
+
+}
+
+func TestVerifySupportedFeature(t *testing.T) {
+ nilFeatures := types.NewVersion(types.DefaultIBCVersionIdentifier, nil)
+
+ testCases := []struct {
+ name string
+ version *types.Version
+ feature string
+ expPass bool
+ }{
+ {"check ORDERED supported", ibctesting.ConnectionVersion, "ORDER_ORDERED", true},
+ {"check UNORDERED supported", ibctesting.ConnectionVersion, "ORDER_UNORDERED", true},
+ {"check DAG unsupported", ibctesting.ConnectionVersion, "ORDER_DAG", false},
+ {"check empty feature set returns false", nilFeatures, "ORDER_ORDERED", false},
+ }
+
+ for i, tc := range testCases {
+ supported := types.VerifySupportedFeature(tc.version, tc.feature)
+
+ require.Equal(t, tc.expPass, supported, "test case %d: %s", i, tc.name)
+ }
+}
diff --git a/core/04-channel/client/cli/cli.go b/core/04-channel/client/cli/cli.go
new file mode 100644
index 00000000..baf386fe
--- /dev/null
+++ b/core/04-channel/client/cli/cli.go
@@ -0,0 +1,58 @@
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// GetQueryCmd returns the query commands for IBC channels
+func GetQueryCmd() *cobra.Command {
+ queryCmd := &cobra.Command{
+ Use: types.SubModuleName,
+ Short: "IBC channel query subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ queryCmd.AddCommand(
+ GetCmdQueryChannels(),
+ GetCmdQueryChannel(),
+ GetCmdQueryConnectionChannels(),
+ GetCmdQueryChannelClientState(),
+ GetCmdQueryPacketCommitment(),
+ GetCmdQueryPacketCommitments(),
+ GetCmdQueryPacketReceipt(),
+ GetCmdQueryPacketAcknowledgement(),
+ GetCmdQueryUnreceivedPackets(),
+ GetCmdQueryUnreceivedAcks(),
+ GetCmdQueryNextSequenceReceive(),
+ // TODO: next sequence Send ?
+ )
+
+ return queryCmd
+}
+
+// NewTxCmd returns a CLI command handler for all x/ibc channel transaction commands.
+func NewTxCmd() *cobra.Command {
+ txCmd := &cobra.Command{
+ Use: types.SubModuleName,
+ Short: "IBC channel transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ txCmd.AddCommand(
+ NewChannelOpenInitCmd(),
+ NewChannelOpenTryCmd(),
+ NewChannelOpenAckCmd(),
+ NewChannelOpenConfirmCmd(),
+ NewChannelCloseInitCmd(),
+ NewChannelCloseConfirmCmd(),
+ )
+
+ return txCmd
+}
diff --git a/core/04-channel/client/cli/query.go b/core/04-channel/client/cli/query.go
new file mode 100644
index 00000000..03df474f
--- /dev/null
+++ b/core/04-channel/client/cli/query.go
@@ -0,0 +1,457 @@
+package cli
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/utils"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ flagSequences = "sequences"
+)
+
+// GetCmdQueryChannels defines the command to query all the channels ends
+// that this chain mantains.
+func GetCmdQueryChannels() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "channels",
+ Short: "Query all channels",
+ Long: "Query all channels from a chain",
+ Example: fmt.Sprintf("%s query %s %s channels", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.NoArgs,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryChannelsRequest{
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.Channels(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "channels")
+
+ return cmd
+}
+
+// GetCmdQueryChannel defines the command to query a channel end
+func GetCmdQueryChannel() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "end [port-id] [channel-id]",
+ Short: "Query a channel end",
+ Long: "Query an IBC channel end from a port and channel identifiers",
+ Example: fmt.Sprintf(
+ "%s query %s %s end [port-id] [channel-id]", version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ channelRes, err := utils.QueryChannel(clientCtx, portID, channelID, prove)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(channelRes)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryConnectionChannels defines the command to query all the channels associated with a
+// connection
+func GetCmdQueryConnectionChannels() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "connections [connection-id]",
+ Short: "Query all channels associated with a connection",
+ Long: "Query all channels associated with a connection",
+ Example: fmt.Sprintf("%s query %s %s connections [connection-id]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryConnectionChannelsRequest{
+ Connection: args[0],
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.ConnectionChannels(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "channels associated with a connection")
+
+ return cmd
+}
+
+// GetCmdQueryChannelClientState defines the command to query a client state from a channel
+func GetCmdQueryChannelClientState() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "client-state [port-id] [channel-id]",
+ Short: "Query the client state associated with a channel",
+ Long: "Query the client state associated with a channel, by providing its port and channel identifiers.",
+ Example: fmt.Sprintf("%s query ibc channel client-state [port-id] [channel-id]", version.AppName),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+
+ res, err := utils.QueryChannelClientState(clientCtx, portID, channelID, false)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res.IdentifiedClientState)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryPacketCommitments defines the command to query all packet commitments associated with
+// a channel
+func GetCmdQueryPacketCommitments() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "packet-commitments [port-id] [channel-id]",
+ Short: "Query all packet commitments associated with a channel",
+ Long: "Query all packet commitments associated with a channel",
+ Example: fmt.Sprintf("%s query %s %s packet-commitments [port-id] [channel-id]", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+ pageReq, err := client.ReadPageRequest(cmd.Flags())
+ if err != nil {
+ return err
+ }
+
+ req := &types.QueryPacketCommitmentsRequest{
+ PortId: args[0],
+ ChannelId: args[1],
+ Pagination: pageReq,
+ }
+
+ res, err := queryClient.PacketCommitments(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+ flags.AddPaginationFlagsToCmd(cmd, "packet commitments associated with a channel")
+
+ return cmd
+}
+
+// GetCmdQueryPacketCommitment defines the command to query a packet commitment
+func GetCmdQueryPacketCommitment() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "packet-commitment [port-id] [channel-id] [sequence]",
+ Short: "Query a packet commitment",
+ Long: "Query a packet commitment",
+ Example: fmt.Sprintf(
+ "%s query %s %s packet-commitment [port-id] [channel-id] [sequence]", version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ seq, err := strconv.ParseUint(args[2], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ res, err := utils.QueryPacketCommitment(clientCtx, portID, channelID, seq, prove)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryPacketReceipt defines the command to query a packet receipt
+func GetCmdQueryPacketReceipt() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "packet-receipt [port-id] [channel-id] [sequence]",
+ Short: "Query a packet receipt",
+ Long: "Query a packet receipt",
+ Example: fmt.Sprintf(
+ "%s query %s %s packet-receipt [port-id] [channel-id] [sequence]", version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ seq, err := strconv.ParseUint(args[2], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ res, err := utils.QueryPacketReceipt(clientCtx, portID, channelID, seq, prove)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryPacketAcknowledgement defines the command to query a packet acknowledgement
+func GetCmdQueryPacketAcknowledgement() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "packet-ack [port-id] [channel-id] [sequence]",
+ Short: "Query a packet acknowledgement",
+ Long: "Query a packet acknowledgement",
+ Example: fmt.Sprintf(
+ "%s query %s %s packet-ack [port-id] [channel-id] [sequence]", version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ seq, err := strconv.ParseUint(args[2], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ res, err := utils.QueryPacketAcknowledgement(clientCtx, portID, channelID, seq, prove)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryUnreceivedPackets defines the command to query all the unreceived
+// packets on the receiving chain
+func GetCmdQueryUnreceivedPackets() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "unreceived-packets [port-id] [channel-id]",
+ Short: "Query all the unreceived packets associated with a channel",
+ Long: `Determine if a packet, given a list of packet commitment sequences, is unreceived.
+
+The return value represents:
+- Unreceived packet commitments: no acknowledgement exists on receiving chain for the given packet commitment sequence on sending chain.
+`,
+ Example: fmt.Sprintf("%s query %s %s unreceived-packets [port-id] [channel-id] --sequences=1,2,3", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ seqSlice, err := cmd.Flags().GetInt64Slice(flagSequences)
+ if err != nil {
+ return err
+ }
+
+ seqs := make([]uint64, len(seqSlice))
+ for i := range seqSlice {
+ seqs[i] = uint64(seqSlice[i])
+ }
+
+ req := &types.QueryUnreceivedPacketsRequest{
+ PortId: args[0],
+ ChannelId: args[1],
+ PacketCommitmentSequences: seqs,
+ }
+
+ res, err := queryClient.UnreceivedPackets(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ cmd.Flags().Int64Slice(flagSequences, []int64{}, "comma separated list of packet sequence numbers")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryUnreceivedAcks defines the command to query all the unreceived acks on the original sending chain
+func GetCmdQueryUnreceivedAcks() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "unreceived-acks [port-id] [channel-id]",
+ Short: "Query all the unreceived acks associated with a channel",
+ Long: `Given a list of acknowledgement sequences from counterparty, determine if an ack on the counterparty chain has been received on the executing chain.
+
+The return value represents:
+- Unreceived packet acknowledgement: packet commitment exists on original sending (executing) chain and ack exists on receiving chain.
+`,
+ Example: fmt.Sprintf("%s query %s %s unreceived-acks [port-id] [channel-id] --sequences=1,2,3", version.AppName, host.ModuleName, types.SubModuleName),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ queryClient := types.NewQueryClient(clientCtx)
+
+ seqSlice, err := cmd.Flags().GetInt64Slice(flagSequences)
+ if err != nil {
+ return err
+ }
+
+ seqs := make([]uint64, len(seqSlice))
+ for i := range seqSlice {
+ seqs[i] = uint64(seqSlice[i])
+ }
+
+ req := &types.QueryUnreceivedAcksRequest{
+ PortId: args[0],
+ ChannelId: args[1],
+ PacketAckSequences: seqs,
+ }
+
+ res, err := queryClient.UnreceivedAcks(cmd.Context(), req)
+ if err != nil {
+ return err
+ }
+
+ return clientCtx.PrintProto(res)
+ },
+ }
+
+ cmd.Flags().Int64Slice(flagSequences, []int64{}, "comma separated list of packet sequence numbers")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// GetCmdQueryNextSequenceReceive defines the command to query a next receive sequence for a given channel
+func GetCmdQueryNextSequenceReceive() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "next-sequence-receive [port-id] [channel-id]",
+ Short: "Query a next receive sequence",
+ Long: "Query the next receive sequence for a given channel",
+ Example: fmt.Sprintf(
+ "%s query %s %s next-sequence-receive [port-id] [channel-id]", version.AppName, host.ModuleName, types.SubModuleName,
+ ),
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ prove, _ := cmd.Flags().GetBool(flags.FlagProve)
+
+ sequenceRes, err := utils.QueryNextSequenceReceive(clientCtx, portID, channelID, prove)
+ if err != nil {
+ return err
+ }
+
+ clientCtx = clientCtx.WithHeight(int64(sequenceRes.ProofHeight.RevisionHeight))
+ return clientCtx.PrintProto(sequenceRes)
+ },
+ }
+
+ cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/core/04-channel/client/cli/tx.go b/core/04-channel/client/cli/tx.go
new file mode 100644
index 00000000..20afe622
--- /dev/null
+++ b/core/04-channel/client/cli/tx.go
@@ -0,0 +1,288 @@
+package cli
+
+import (
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectionutils "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// IBC Channel flags
+const (
+ FlagOrdered = "ordered"
+ FlagIBCVersion = "ibc-version"
+)
+
+// NewChannelOpenInitCmd returns the command to create a MsgChannelOpenInit transaction
+func NewChannelOpenInitCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "open-init [port-id] [counterparty-port-id] [connection-hops]",
+ Short: "Creates and sends a ChannelOpenInit message",
+ Args: cobra.ExactArgs(3),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ counterpartyPortID := args[1]
+ hops := strings.Split(args[2], "/")
+ order := channelOrder(cmd.Flags())
+ version, _ := cmd.Flags().GetString(FlagIBCVersion)
+
+ msg := types.NewMsgChannelOpenInit(
+ portID, version, order, hops,
+ counterpartyPortID, clientCtx.GetFromAddress(),
+ )
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ChannelOpenInit(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ cmd.Flags().Bool(FlagOrdered, true, "Pass flag for opening ordered channels")
+ cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version")
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewChannelOpenTryCmd returns the command to create a MsgChannelOpenTry transaction
+func NewChannelOpenTryCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "open-try [port-id] [channel-id] [counterparty-port-id] [counterparty-channel-id] [connection-hops] [/path/to/proof_init.json] [proof-height]",
+ Short: "Creates and sends a ChannelOpenTry message",
+ Args: cobra.ExactArgs(7),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ counterpartyPortID := args[2]
+ counterpartyChannelID := args[3]
+ hops := strings.Split(args[4], "/")
+ order := channelOrder(cmd.Flags())
+
+ // TODO: Differentiate between channel and counterparty versions.
+ version, _ := cmd.Flags().GetString(FlagIBCVersion)
+
+ proofInit, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[5])
+ if err != nil {
+ return err
+ }
+
+ proofHeight, err := clienttypes.ParseHeight(args[6])
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgChannelOpenTry(
+ portID, channelID, version, order, hops,
+ counterpartyPortID, counterpartyChannelID, version,
+ proofInit, proofHeight, clientCtx.GetFromAddress(),
+ )
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ChannelOpenTry(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ cmd.Flags().Bool(FlagOrdered, true, "Pass flag for opening ordered channels")
+ cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version")
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewChannelOpenAckCmd returns the command to create a MsgChannelOpenAck transaction
+func NewChannelOpenAckCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "open-ack [port-id] [channel-id] [counterparty-channel-id] [/path/to/proof_try.json] [proof-height]",
+ Short: "Creates and sends a ChannelOpenAck message",
+ Args: cobra.ExactArgs(5),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+ counterpartyChannelID := args[2]
+
+ // TODO: Differentiate between channel and counterparty versions.
+ version, _ := cmd.Flags().GetString(FlagIBCVersion)
+
+ proofTry, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[3])
+ if err != nil {
+ return err
+ }
+
+ proofHeight, err := clienttypes.ParseHeight(args[4])
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgChannelOpenAck(
+ portID, channelID, counterpartyChannelID, version, proofTry, proofHeight, clientCtx.GetFromAddress(),
+ )
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ChannelOpenAck(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+ cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version")
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewChannelOpenConfirmCmd returns the command to create a MsgChannelOpenConfirm transaction
+func NewChannelOpenConfirmCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "open-confirm [port-id] [channel-id] [/path/to/proof_ack.json] [proof-height]",
+ Short: "Creates and sends a ChannelOpenConfirm message",
+ Args: cobra.ExactArgs(4),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+
+ proofAck, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[2])
+ if err != nil {
+ return err
+ }
+
+ proofHeight, err := clienttypes.ParseHeight(args[3])
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgChannelOpenConfirm(
+ portID, channelID, proofAck, proofHeight, clientCtx.GetFromAddress(),
+ )
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ChannelOpenConfirm(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewChannelCloseInitCmd returns the command to create a MsgChannelCloseInit transaction
+func NewChannelCloseInitCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "close-init [port-id] [channel-id]",
+ Short: "Creates and sends a ChannelCloseInit message",
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+
+ msg := types.NewMsgChannelCloseInit(portID, channelID, clientCtx.GetFromAddress())
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ChannelCloseInit(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+// NewChannelCloseConfirmCmd returns the command to create a MsgChannelCloseConfirm transaction
+func NewChannelCloseConfirmCmd() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "close-confirm [port-id] [channel-id] [/path/to/proof_init.json] [proof-height]",
+ Short: "Creates and sends a ChannelCloseConfirm message",
+ Args: cobra.ExactArgs(4),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ portID := args[0]
+ channelID := args[1]
+
+ proofInit, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[2])
+ if err != nil {
+ return err
+ }
+
+ proofHeight, err := clienttypes.ParseHeight(args[3])
+ if err != nil {
+ return err
+ }
+
+ msg := types.NewMsgChannelCloseConfirm(
+ portID, channelID, proofInit, proofHeight, clientCtx.GetFromAddress(),
+ )
+ svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
+ msgClient := types.NewMsgClient(svcMsgClientConn)
+ _, err = msgClient.ChannelCloseConfirm(cmd.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ },
+ }
+
+ flags.AddTxFlagsToCmd(cmd)
+
+ return cmd
+}
+
+func channelOrder(fs *pflag.FlagSet) types.Order {
+ if ordered, _ := fs.GetBool(FlagOrdered); ordered {
+ return types.ORDERED
+ }
+
+ return types.UNORDERED
+}
diff --git a/core/04-channel/client/utils/utils.go b/core/04-channel/client/utils/utils.go
new file mode 100644
index 00000000..167e05d0
--- /dev/null
+++ b/core/04-channel/client/utils/utils.go
@@ -0,0 +1,301 @@
+package utils
+
+import (
+ "context"
+ "encoding/binary"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clientutils "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// QueryChannel returns a channel end.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client.
+func QueryChannel(
+ clientCtx client.Context, portID, channelID string, prove bool,
+) (*types.QueryChannelResponse, error) {
+ if prove {
+ return queryChannelABCI(clientCtx, portID, channelID)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryChannelRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ }
+
+ return queryClient.Channel(context.Background(), req)
+}
+
+func queryChannelABCI(clientCtx client.Context, portID, channelID string) (*types.QueryChannelResponse, error) {
+ key := host.ChannelKey(portID, channelID)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if channel exists
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "portID (%s), channelID (%s)", portID, channelID)
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ var channel types.Channel
+ if err := cdc.UnmarshalBinaryBare(value, &channel); err != nil {
+ return nil, err
+ }
+
+ return types.NewQueryChannelResponse(channel, proofBz, proofHeight), nil
+}
+
+// QueryChannelClientState returns the ClientState of a channel end. If
+// prove is true, it performs an ABCI store query in order to retrieve the
+// merkle proof. Otherwise, it uses the gRPC query client.
+func QueryChannelClientState(
+ clientCtx client.Context, portID, channelID string, prove bool,
+) (*types.QueryChannelClientStateResponse, error) {
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryChannelClientStateRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ }
+
+ res, err := queryClient.ChannelClientState(context.Background(), req)
+ if err != nil {
+ return nil, err
+ }
+
+ if prove {
+ clientStateRes, err := clientutils.QueryClientStateABCI(clientCtx, res.IdentifiedClientState.ClientId)
+ if err != nil {
+ return nil, err
+ }
+
+ // use client state returned from ABCI query in case query height differs
+ identifiedClientState := clienttypes.IdentifiedClientState{
+ ClientId: res.IdentifiedClientState.ClientId,
+ ClientState: clientStateRes.ClientState,
+ }
+ res = types.NewQueryChannelClientStateResponse(identifiedClientState, clientStateRes.Proof, clientStateRes.ProofHeight)
+ }
+
+ return res, nil
+}
+
+// QueryChannelConsensusState returns the ConsensusState of a channel end. If
+// prove is true, it performs an ABCI store query in order to retrieve the
+// merkle proof. Otherwise, it uses the gRPC query client.
+func QueryChannelConsensusState(
+ clientCtx client.Context, portID, channelID string, height clienttypes.Height, prove bool,
+) (*types.QueryChannelConsensusStateResponse, error) {
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryChannelConsensusStateRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ RevisionNumber: height.RevisionNumber,
+ RevisionHeight: height.RevisionHeight,
+ }
+
+ res, err := queryClient.ChannelConsensusState(context.Background(), req)
+ if err != nil {
+ return nil, err
+ }
+
+ if prove {
+ consensusStateRes, err := clientutils.QueryConsensusStateABCI(clientCtx, res.ClientId, height)
+ if err != nil {
+ return nil, err
+ }
+
+ res = types.NewQueryChannelConsensusStateResponse(res.ClientId, consensusStateRes.ConsensusState, height, consensusStateRes.Proof, consensusStateRes.ProofHeight)
+ }
+
+ return res, nil
+}
+
+// QueryLatestConsensusState uses the channel Querier to return the
+// latest ConsensusState given the source port ID and source channel ID.
+func QueryLatestConsensusState(
+ clientCtx client.Context, portID, channelID string,
+) (exported.ConsensusState, clienttypes.Height, clienttypes.Height, error) {
+ clientRes, err := QueryChannelClientState(clientCtx, portID, channelID, false)
+ if err != nil {
+ return nil, clienttypes.Height{}, clienttypes.Height{}, err
+ }
+
+ var clientState exported.ClientState
+ if err := clientCtx.InterfaceRegistry.UnpackAny(clientRes.IdentifiedClientState.ClientState, &clientState); err != nil {
+ return nil, clienttypes.Height{}, clienttypes.Height{}, err
+ }
+
+ clientHeight, ok := clientState.GetLatestHeight().(clienttypes.Height)
+ if !ok {
+ return nil, clienttypes.Height{}, clienttypes.Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "invalid height type. expected type: %T, got: %T",
+ clienttypes.Height{}, clientHeight)
+ }
+ res, err := QueryChannelConsensusState(clientCtx, portID, channelID, clientHeight, false)
+ if err != nil {
+ return nil, clienttypes.Height{}, clienttypes.Height{}, err
+ }
+
+ var consensusState exported.ConsensusState
+ if err := clientCtx.InterfaceRegistry.UnpackAny(res.ConsensusState, &consensusState); err != nil {
+ return nil, clienttypes.Height{}, clienttypes.Height{}, err
+ }
+
+ return consensusState, clientHeight, res.ProofHeight, nil
+}
+
+// QueryNextSequenceReceive returns the next sequence receive.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client.
+func QueryNextSequenceReceive(
+ clientCtx client.Context, portID, channelID string, prove bool,
+) (*types.QueryNextSequenceReceiveResponse, error) {
+ if prove {
+ return queryNextSequenceRecvABCI(clientCtx, portID, channelID)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryNextSequenceReceiveRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ }
+
+ return queryClient.NextSequenceReceive(context.Background(), req)
+}
+
+func queryNextSequenceRecvABCI(clientCtx client.Context, portID, channelID string) (*types.QueryNextSequenceReceiveResponse, error) {
+ key := host.NextSequenceRecvKey(portID, channelID)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if next sequence receive exists
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "portID (%s), channelID (%s)", portID, channelID)
+ }
+
+ sequence := binary.BigEndian.Uint64(value)
+
+ return types.NewQueryNextSequenceReceiveResponse(sequence, proofBz, proofHeight), nil
+}
+
+// QueryPacketCommitment returns a packet commitment.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client.
+func QueryPacketCommitment(
+ clientCtx client.Context, portID, channelID string,
+ sequence uint64, prove bool,
+) (*types.QueryPacketCommitmentResponse, error) {
+ if prove {
+ return queryPacketCommitmentABCI(clientCtx, portID, channelID, sequence)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryPacketCommitmentRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ Sequence: sequence,
+ }
+
+ return queryClient.PacketCommitment(context.Background(), req)
+}
+
+func queryPacketCommitmentABCI(
+ clientCtx client.Context, portID, channelID string, sequence uint64,
+) (*types.QueryPacketCommitmentResponse, error) {
+ key := host.PacketCommitmentKey(portID, channelID, sequence)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if packet commitment exists
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "portID (%s), channelID (%s), sequence (%d)", portID, channelID, sequence)
+ }
+
+ return types.NewQueryPacketCommitmentResponse(value, proofBz, proofHeight), nil
+}
+
+// QueryPacketReceipt returns data about a packet receipt.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client.
+func QueryPacketReceipt(
+ clientCtx client.Context, portID, channelID string,
+ sequence uint64, prove bool,
+) (*types.QueryPacketReceiptResponse, error) {
+ if prove {
+ return queryPacketReceiptABCI(clientCtx, portID, channelID, sequence)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryPacketReceiptRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ Sequence: sequence,
+ }
+
+ return queryClient.PacketReceipt(context.Background(), req)
+}
+
+func queryPacketReceiptABCI(
+ clientCtx client.Context, portID, channelID string, sequence uint64,
+) (*types.QueryPacketReceiptResponse, error) {
+ key := host.PacketReceiptKey(portID, channelID, sequence)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ return types.NewQueryPacketReceiptResponse(value != nil, proofBz, proofHeight), nil
+}
+
+// QueryPacketAcknowledgement returns the data about a packet acknowledgement.
+// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise,
+// it uses the gRPC query client
+func QueryPacketAcknowledgement(clientCtx client.Context, portID, channelID string, sequence uint64, prove bool) (*types.QueryPacketAcknowledgementResponse, error) {
+ if prove {
+ return queryPacketAcknowledgementABCI(clientCtx, portID, channelID, sequence)
+ }
+
+ queryClient := types.NewQueryClient(clientCtx)
+ req := &types.QueryPacketAcknowledgementRequest{
+ PortId: portID,
+ ChannelId: channelID,
+ Sequence: sequence,
+ }
+
+ return queryClient.PacketAcknowledgement(context.Background(), req)
+}
+
+func queryPacketAcknowledgementABCI(clientCtx client.Context, portID, channelID string, sequence uint64) (*types.QueryPacketAcknowledgementResponse, error) {
+ key := host.PacketAcknowledgementKey(portID, channelID, sequence)
+
+ value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(value) == 0 {
+ return nil, sdkerrors.Wrapf(types.ErrInvalidAcknowledgement, "portID (%s), channelID (%s), sequence (%d)", portID, channelID, sequence)
+ }
+
+ return types.NewQueryPacketAcknowledgementResponse(value, proofBz, proofHeight), nil
+}
diff --git a/core/04-channel/genesis.go b/core/04-channel/genesis.go
new file mode 100644
index 00000000..07fad47d
--- /dev/null
+++ b/core/04-channel/genesis.go
@@ -0,0 +1,48 @@
+package channel
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// InitGenesis initializes the ibc channel submodule's state from a provided genesis
+// state.
+func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) {
+ for _, channel := range gs.Channels {
+ ch := types.NewChannel(channel.State, channel.Ordering, channel.Counterparty, channel.ConnectionHops, channel.Version)
+ k.SetChannel(ctx, channel.PortId, channel.ChannelId, ch)
+ }
+ for _, ack := range gs.Acknowledgements {
+ k.SetPacketAcknowledgement(ctx, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
+ }
+ for _, commitment := range gs.Commitments {
+ k.SetPacketCommitment(ctx, commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data)
+ }
+ for _, receipt := range gs.Receipts {
+ k.SetPacketReceipt(ctx, receipt.PortId, receipt.ChannelId, receipt.Sequence)
+ }
+ for _, ss := range gs.SendSequences {
+ k.SetNextSequenceSend(ctx, ss.PortId, ss.ChannelId, ss.Sequence)
+ }
+ for _, rs := range gs.RecvSequences {
+ k.SetNextSequenceRecv(ctx, rs.PortId, rs.ChannelId, rs.Sequence)
+ }
+ for _, as := range gs.AckSequences {
+ k.SetNextSequenceAck(ctx, as.PortId, as.ChannelId, as.Sequence)
+ }
+ k.SetNextChannelSequence(ctx, gs.NextChannelSequence)
+}
+
+// ExportGenesis returns the ibc channel submodule's exported genesis.
+func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
+ return types.GenesisState{
+ Channels: k.GetAllChannels(ctx),
+ Acknowledgements: k.GetAllPacketAcks(ctx),
+ Commitments: k.GetAllPacketCommitments(ctx),
+ Receipts: k.GetAllPacketReceipts(ctx),
+ SendSequences: k.GetAllPacketSendSeqs(ctx),
+ RecvSequences: k.GetAllPacketRecvSeqs(ctx),
+ AckSequences: k.GetAllPacketAckSeqs(ctx),
+ }
+}
diff --git a/core/04-channel/handler.go b/core/04-channel/handler.go
new file mode 100644
index 00000000..375c3526
--- /dev/null
+++ b/core/04-channel/handler.go
@@ -0,0 +1,186 @@
+package channel
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// HandleMsgChannelOpenInit defines the sdk.Handler for MsgChannelOpenInit
+func HandleMsgChannelOpenInit(ctx sdk.Context, k keeper.Keeper, portCap *capabilitytypes.Capability, msg *types.MsgChannelOpenInit) (*sdk.Result, string, *capabilitytypes.Capability, error) {
+ channelID, capKey, err := k.ChanOpenInit(
+ ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId,
+ portCap, msg.Channel.Counterparty, msg.Channel.Version,
+ )
+ if err != nil {
+ return nil, "", nil, sdkerrors.Wrap(err, "channel handshake open init failed")
+ }
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenInit,
+ sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, msg.Channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, msg.Channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, msg.Channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, channelID, capKey, nil
+}
+
+// HandleMsgChannelOpenTry defines the sdk.Handler for MsgChannelOpenTry
+func HandleMsgChannelOpenTry(ctx sdk.Context, k keeper.Keeper, portCap *capabilitytypes.Capability, msg *types.MsgChannelOpenTry) (*sdk.Result, string, *capabilitytypes.Capability, error) {
+ channelID, capKey, err := k.ChanOpenTry(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, msg.PreviousChannelId,
+ portCap, msg.Channel.Counterparty, msg.Channel.Version, msg.CounterpartyVersion, msg.ProofInit, msg.ProofHeight,
+ )
+ if err != nil {
+ return nil, "", nil, sdkerrors.Wrap(err, "channel handshake open try failed")
+ }
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenTry,
+ sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, msg.Channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, msg.Channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, msg.Channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, channelID, capKey, nil
+}
+
+// HandleMsgChannelOpenAck defines the sdk.Handler for MsgChannelOpenAck
+func HandleMsgChannelOpenAck(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelOpenAck) (*sdk.Result, error) {
+ err := k.ChanOpenAck(
+ ctx, msg.PortId, msg.ChannelId, channelCap, msg.CounterpartyVersion, msg.CounterpartyChannelId, msg.ProofTry, msg.ProofHeight,
+ )
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "channel handshake open ack failed")
+ }
+
+ channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenAck,
+ sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
+ sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, nil
+}
+
+// HandleMsgChannelOpenConfirm defines the sdk.Handler for MsgChannelOpenConfirm
+func HandleMsgChannelOpenConfirm(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelOpenConfirm) (*sdk.Result, error) {
+ err := k.ChanOpenConfirm(ctx, msg.PortId, msg.ChannelId, channelCap, msg.ProofAck, msg.ProofHeight)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "channel handshake open confirm failed")
+ }
+
+ channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenConfirm,
+ sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
+ sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, nil
+}
+
+// HandleMsgChannelCloseInit defines the sdk.Handler for MsgChannelCloseInit
+func HandleMsgChannelCloseInit(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelCloseInit) (*sdk.Result, error) {
+ err := k.ChanCloseInit(ctx, msg.PortId, msg.ChannelId, channelCap)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "channel handshake close init failed")
+ }
+
+ channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelCloseInit,
+ sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
+ sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, nil
+}
+
+// HandleMsgChannelCloseConfirm defines the sdk.Handler for MsgChannelCloseConfirm
+func HandleMsgChannelCloseConfirm(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelCloseConfirm) (*sdk.Result, error) {
+ err := k.ChanCloseConfirm(ctx, msg.PortId, msg.ChannelId, channelCap, msg.ProofInit, msg.ProofHeight)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "channel handshake close confirm failed")
+ }
+
+ channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelCloseConfirm,
+ sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
+ sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return &sdk.Result{
+ Events: ctx.EventManager().Events().ToABCIEvents(),
+ }, nil
+}
diff --git a/core/04-channel/keeper/grpc_query.go b/core/04-channel/keeper/grpc_query.go
new file mode 100644
index 00000000..30df0a33
--- /dev/null
+++ b/core/04-channel/keeper/grpc_query.go
@@ -0,0 +1,486 @@
+package keeper
+
+import (
+ "context"
+ "strconv"
+ "strings"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+var _ types.QueryServer = (*Keeper)(nil)
+
+// Channel implements the Query/Channel gRPC method
+func (q Keeper) Channel(c context.Context, req *types.QueryChannelRequest) (*types.QueryChannelResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ channel, found := q.GetChannel(ctx, req.PortId, req.ChannelId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id %s", req.PortId, req.ChannelId).Error(),
+ )
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryChannelResponse(channel, nil, selfHeight), nil
+}
+
+// Channels implements the Query/Channels gRPC method
+func (q Keeper) Channels(c context.Context, req *types.QueryChannelsRequest) (*types.QueryChannelsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ channels := []*types.IdentifiedChannel{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.KeyChannelEndPrefix))
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
+ var result types.Channel
+ if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil {
+ return err
+ }
+
+ portID, channelID, err := host.ParseChannelPath(string(key))
+ if err != nil {
+ return err
+ }
+
+ identifiedChannel := types.NewIdentifiedChannel(portID, channelID, result)
+ channels = append(channels, &identifiedChannel)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return &types.QueryChannelsResponse{
+ Channels: channels,
+ Pagination: pageRes,
+ Height: selfHeight,
+ }, nil
+}
+
+// ConnectionChannels implements the Query/ConnectionChannels gRPC method
+func (q Keeper) ConnectionChannels(c context.Context, req *types.QueryConnectionChannelsRequest) (*types.QueryConnectionChannelsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ConnectionIdentifierValidator(req.Connection); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ channels := []*types.IdentifiedChannel{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.KeyChannelEndPrefix))
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
+ var result types.Channel
+ if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil {
+ return err
+ }
+
+ // ignore channel and continue to the next item if the connection is
+ // different than the requested one
+ if result.ConnectionHops[0] != req.Connection {
+ return nil
+ }
+
+ portID, channelID, err := host.ParseChannelPath(string(key))
+ if err != nil {
+ return err
+ }
+
+ identifiedChannel := types.NewIdentifiedChannel(portID, channelID, result)
+ channels = append(channels, &identifiedChannel)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return &types.QueryConnectionChannelsResponse{
+ Channels: channels,
+ Pagination: pageRes,
+ Height: selfHeight,
+ }, nil
+}
+
+// ChannelClientState implements the Query/ChannelClientState gRPC method
+func (q Keeper) ChannelClientState(c context.Context, req *types.QueryChannelClientStateRequest) (*types.QueryChannelClientStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ clientID, clientState, err := q.GetChannelClientState(ctx, req.PortId, req.ChannelId)
+ if err != nil {
+ return nil, status.Error(codes.NotFound, err.Error())
+ }
+
+ identifiedClientState := clienttypes.NewIdentifiedClientState(clientID, clientState)
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryChannelClientStateResponse(identifiedClientState, nil, selfHeight), nil
+}
+
+// ChannelConsensusState implements the Query/ChannelConsensusState gRPC method
+func (q Keeper) ChannelConsensusState(c context.Context, req *types.QueryChannelConsensusStateRequest) (*types.QueryChannelConsensusStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ channel, found := q.GetChannel(ctx, req.PortId, req.ChannelId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id %s", req.PortId, req.ChannelId).Error(),
+ )
+ }
+
+ connection, found := q.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(connectiontypes.ErrConnectionNotFound, "connection-id: %s", channel.ConnectionHops[0]).Error(),
+ )
+ }
+
+ consHeight := clienttypes.NewHeight(req.RevisionNumber, req.RevisionHeight)
+ consensusState, found := q.clientKeeper.GetClientConsensusState(ctx, connection.ClientId, consHeight)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(clienttypes.ErrConsensusStateNotFound, "client-id: %s", connection.ClientId).Error(),
+ )
+ }
+
+ anyConsensusState, err := clienttypes.PackConsensusState(consensusState)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryChannelConsensusStateResponse(connection.ClientId, anyConsensusState, consHeight, nil, selfHeight), nil
+}
+
+// PacketCommitment implements the Query/PacketCommitment gRPC method
+func (q Keeper) PacketCommitment(c context.Context, req *types.QueryPacketCommitmentRequest) (*types.QueryPacketCommitmentResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ if req.Sequence == 0 {
+ return nil, status.Error(codes.InvalidArgument, "packet sequence cannot be 0")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ commitmentBz := q.GetPacketCommitment(ctx, req.PortId, req.ChannelId, req.Sequence)
+ if len(commitmentBz) == 0 {
+ return nil, status.Error(codes.NotFound, "packet commitment hash not found")
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryPacketCommitmentResponse(commitmentBz, nil, selfHeight), nil
+}
+
+// PacketCommitments implements the Query/PacketCommitments gRPC method
+func (q Keeper) PacketCommitments(c context.Context, req *types.QueryPacketCommitmentsRequest) (*types.QueryPacketCommitmentsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ commitments := []*types.PacketState{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.PacketCommitmentPrefixPath(req.PortId, req.ChannelId)))
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
+ keySplit := strings.Split(string(key), "/")
+
+ sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ commitment := types.NewPacketState(req.PortId, req.ChannelId, sequence, value)
+ commitments = append(commitments, &commitment)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return &types.QueryPacketCommitmentsResponse{
+ Commitments: commitments,
+ Pagination: pageRes,
+ Height: selfHeight,
+ }, nil
+}
+
+// PacketReceipt implements the Query/PacketReceipt gRPC method
+func (q Keeper) PacketReceipt(c context.Context, req *types.QueryPacketReceiptRequest) (*types.QueryPacketReceiptResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ if req.Sequence == 0 {
+ return nil, status.Error(codes.InvalidArgument, "packet sequence cannot be 0")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ _, recvd := q.GetPacketReceipt(ctx, req.PortId, req.ChannelId, req.Sequence)
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryPacketReceiptResponse(recvd, nil, selfHeight), nil
+}
+
+// PacketAcknowledgement implements the Query/PacketAcknowledgement gRPC method
+func (q Keeper) PacketAcknowledgement(c context.Context, req *types.QueryPacketAcknowledgementRequest) (*types.QueryPacketAcknowledgementResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ if req.Sequence == 0 {
+ return nil, status.Error(codes.InvalidArgument, "packet sequence cannot be 0")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ acknowledgementBz, found := q.GetPacketAcknowledgement(ctx, req.PortId, req.ChannelId, req.Sequence)
+ if !found || len(acknowledgementBz) == 0 {
+ return nil, status.Error(codes.NotFound, "packet acknowledgement hash not found")
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryPacketAcknowledgementResponse(acknowledgementBz, nil, selfHeight), nil
+}
+
+// PacketAcknowledgements implements the Query/PacketAcknowledgements gRPC method
+func (q Keeper) PacketAcknowledgements(c context.Context, req *types.QueryPacketAcknowledgementsRequest) (*types.QueryPacketAcknowledgementsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ acks := []*types.PacketState{}
+ store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.PacketAcknowledgementPrefixPath(req.PortId, req.ChannelId)))
+
+ pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
+ keySplit := strings.Split(string(key), "/")
+
+ sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ ack := types.NewPacketState(req.PortId, req.ChannelId, sequence, value)
+ acks = append(acks, &ack)
+ return nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return &types.QueryPacketAcknowledgementsResponse{
+ Acknowledgements: acks,
+ Pagination: pageRes,
+ Height: selfHeight,
+ }, nil
+}
+
+// UnreceivedPackets implements the Query/UnreceivedPackets gRPC method. Given
+// a list of counterparty packet commitments, the querier checks if the packet
+// has already been received by checking if a receipt exists on this
+// chain for the packet sequence. All packets that haven't been received yet
+// are returned in the response
+// Usage: To use this method correctly, first query all packet commitments on
+// the sending chain using the Query/PacketCommitments gRPC method.
+// Then input the returned sequences into the QueryUnreceivedPacketsRequest
+// and send the request to this Query/UnreceivedPackets on the **receiving**
+// chain. This gRPC method will then return the list of packet sequences that
+// are yet to be received on the receiving chain.
+//
+// NOTE: The querier makes the assumption that the provided list of packet
+// commitments is correct and will not function properly if the list
+// is not up to date. Ideally the query height should equal the latest height
+// on the counterparty's client which represents this chain.
+func (q Keeper) UnreceivedPackets(c context.Context, req *types.QueryUnreceivedPacketsRequest) (*types.QueryUnreceivedPacketsResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ var unreceivedSequences = []uint64{}
+
+ for i, seq := range req.PacketCommitmentSequences {
+ if seq == 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "packet sequence %d cannot be 0", i)
+ }
+
+ // if packet receipt exists on the receiving chain, then packet has already been received
+ if _, found := q.GetPacketReceipt(ctx, req.PortId, req.ChannelId, seq); !found {
+ unreceivedSequences = append(unreceivedSequences, seq)
+ }
+
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return &types.QueryUnreceivedPacketsResponse{
+ Sequences: unreceivedSequences,
+ Height: selfHeight,
+ }, nil
+}
+
+// UnreceivedAcks implements the Query/UnreceivedAcks gRPC method. Given
+// a list of counterparty packet acknowledgements, the querier checks if the packet
+// has already been received by checking if the packet commitment still exists on this
+// chain (original sender) for the packet sequence.
+// All acknowledgmeents that haven't been received yet are returned in the response.
+// Usage: To use this method correctly, first query all packet acknowledgements on
+// the original receiving chain (ie the chain that wrote the acks) using the Query/PacketAcknowledgements gRPC method.
+// Then input the returned sequences into the QueryUnreceivedAcksRequest
+// and send the request to this Query/UnreceivedAcks on the **original sending**
+// chain. This gRPC method will then return the list of packet sequences whose
+// acknowledgements are already written on the receiving chain but haven't yet
+// been received back to the sending chain.
+//
+// NOTE: The querier makes the assumption that the provided list of packet
+// acknowledgements is correct and will not function properly if the list
+// is not up to date. Ideally the query height should equal the latest height
+// on the counterparty's client which represents this chain.
+func (q Keeper) UnreceivedAcks(c context.Context, req *types.QueryUnreceivedAcksRequest) (*types.QueryUnreceivedAcksResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ var unreceivedSequences = []uint64{}
+
+ for i, seq := range req.PacketAckSequences {
+ if seq == 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "packet sequence %d cannot be 0", i)
+ }
+
+ // if packet commitment still exists on the original sending chain, then packet ack has not been received
+ // since processing the ack will delete the packet commitment
+ if commitment := q.GetPacketCommitment(ctx, req.PortId, req.ChannelId, seq); len(commitment) != 0 {
+ unreceivedSequences = append(unreceivedSequences, seq)
+ }
+
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return &types.QueryUnreceivedAcksResponse{
+ Sequences: unreceivedSequences,
+ Height: selfHeight,
+ }, nil
+}
+
+// NextSequenceReceive implements the Query/NextSequenceReceive gRPC method
+func (q Keeper) NextSequenceReceive(c context.Context, req *types.QueryNextSequenceReceiveRequest) (*types.QueryNextSequenceReceiveResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil {
+ return nil, err
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ sequence, found := q.GetNextSequenceRecv(ctx, req.PortId, req.ChannelId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrapf(types.ErrSequenceReceiveNotFound, "port-id: %s, channel-id %s", req.PortId, req.ChannelId).Error(),
+ )
+ }
+
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ return types.NewQueryNextSequenceReceiveResponse(sequence, nil, selfHeight), nil
+}
+
+func validategRPCRequest(portID, channelID string) error {
+ if err := host.PortIdentifierValidator(portID); err != nil {
+ return status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ if err := host.ChannelIdentifierValidator(channelID); err != nil {
+ return status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ return nil
+}
diff --git a/core/04-channel/keeper/grpc_query_test.go b/core/04-channel/keeper/grpc_query_test.go
new file mode 100644
index 00000000..689c241c
--- /dev/null
+++ b/core/04-channel/keeper/grpc_query_test.go
@@ -0,0 +1,1376 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/query"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *KeeperTestSuite) TestQueryChannel() {
+ var (
+ req *types.QueryChannelRequest
+ expChannel types.Channel
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryChannelRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryChannelRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ }
+ },
+ false,
+ },
+ {"channel not found",
+ func() {
+ req = &types.QueryChannelRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ // init channel
+ channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ expChannel = suite.chainA.GetChannel(channelA)
+
+ req = &types.QueryChannelRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.Channel(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(&expChannel, res.Channel)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryChannels() {
+ var (
+ req *types.QueryChannelsRequest
+ expChannels = []*types.IdentifiedChannel{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "empty pagination",
+ func() {
+ req = &types.QueryChannelsRequest{}
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ _, _, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // channel0 on first connection on chainA
+ counterparty0 := types.Counterparty{
+ PortId: connB0.Channels[0].PortID,
+ ChannelId: connB0.Channels[0].ID,
+ }
+
+ // channel1 is second channel on first connection on chainA
+ testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED)
+ counterparty1 := types.Counterparty{
+ PortId: connB0.Channels[1].PortID,
+ ChannelId: connB0.Channels[1].ID,
+ }
+
+ channel0 := types.NewChannel(
+ types.OPEN, types.UNORDERED,
+ counterparty0, []string{connA0.ID}, testchannel0.Version,
+ )
+ channel1 := types.NewChannel(
+ types.OPEN, types.ORDERED,
+ counterparty1, []string{connA0.ID}, testchannel1.Version,
+ )
+
+ idCh0 := types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0)
+ idCh1 := types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1)
+
+ expChannels = []*types.IdentifiedChannel{&idCh0, &idCh1}
+
+ req = &types.QueryChannelsRequest{
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 2,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.Channels(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expChannels, res.Channels)
+ suite.Require().Equal(len(expChannels), int(res.Pagination.Total))
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryConnectionChannels() {
+ var (
+ req *types.QueryConnectionChannelsRequest
+ expChannels = []*types.IdentifiedChannel{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid connection ID",
+ func() {
+ req = &types.QueryConnectionChannelsRequest{
+ Connection: "",
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ _, _, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // channel0 on first connection on chainA
+ counterparty0 := types.Counterparty{
+ PortId: connB0.Channels[0].PortID,
+ ChannelId: connB0.Channels[0].ID,
+ }
+
+ // channel1 is second channel on first connection on chainA
+ testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED)
+ counterparty1 := types.Counterparty{
+ PortId: connB0.Channels[1].PortID,
+ ChannelId: connB0.Channels[1].ID,
+ }
+
+ channel0 := types.NewChannel(
+ types.OPEN, types.UNORDERED,
+ counterparty0, []string{connA0.ID}, testchannel0.Version,
+ )
+ channel1 := types.NewChannel(
+ types.OPEN, types.ORDERED,
+ counterparty1, []string{connA0.ID}, testchannel1.Version,
+ )
+
+ idCh0 := types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0)
+ idCh1 := types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1)
+
+ expChannels = []*types.IdentifiedChannel{&idCh0, &idCh1}
+
+ req = &types.QueryConnectionChannelsRequest{
+ Connection: connA0.ID,
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 2,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ {
+ "success, empty response",
+ func() {
+ suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ expChannels = []*types.IdentifiedChannel{}
+ req = &types.QueryConnectionChannelsRequest{
+ Connection: "externalConnID",
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 2,
+ CountTotal: false,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ConnectionChannels(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expChannels, res.Channels)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryChannelClientState() {
+ var (
+ req *types.QueryChannelClientStateRequest
+ expIdentifiedClientState clienttypes.IdentifiedClientState
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryChannelClientStateRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryChannelClientStateRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ }
+ },
+ false,
+ },
+ {
+ "channel not found",
+ func() {
+ req = &types.QueryChannelClientStateRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "connection not found",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ channel := suite.chainA.GetChannel(channelA)
+ // update channel to reference a connection that does not exist
+ channel.ConnectionHops[0] = "doesnotexist"
+
+ // set connection hops to wrong connection ID
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+
+ req = &types.QueryChannelClientStateRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ }
+ }, false,
+ },
+ {
+ "client state for channel's connection not found",
+ func() {
+ _, _, connA, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ // set connection to empty so clientID is empty
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connectiontypes.ConnectionEnd{})
+
+ req = &types.QueryChannelClientStateRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ }
+ }, false,
+ },
+ {
+ "success",
+ func() {
+ clientA, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ // init channel
+ channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ expClientState := suite.chainA.GetClientState(clientA)
+ expIdentifiedClientState = clienttypes.NewIdentifiedClientState(clientA, expClientState)
+
+ req = &types.QueryChannelClientStateRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ChannelClientState(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState)
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.IdentifiedClientState.ClientState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryChannelConsensusState() {
+ var (
+ req *types.QueryChannelConsensusStateRequest
+ expConsensusState exported.ConsensusState
+ expClientID string
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryChannelConsensusStateRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ RevisionNumber: 0,
+ RevisionHeight: 1,
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryChannelConsensusStateRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ RevisionNumber: 0,
+ RevisionHeight: 1,
+ }
+ },
+ false,
+ },
+ {
+ "channel not found",
+ func() {
+ req = &types.QueryChannelConsensusStateRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ RevisionNumber: 0,
+ RevisionHeight: 1,
+ }
+ },
+ false,
+ },
+ {
+ "connection not found",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ channel := suite.chainA.GetChannel(channelA)
+ // update channel to reference a connection that does not exist
+ channel.ConnectionHops[0] = "doesnotexist"
+
+ // set connection hops to wrong connection ID
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+
+ req = &types.QueryChannelConsensusStateRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ RevisionNumber: 0,
+ RevisionHeight: 1,
+ }
+ }, false,
+ },
+ {
+ "consensus state for channel's connection not found",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ req = &types.QueryChannelConsensusStateRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ RevisionNumber: 0,
+ RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height
+ }
+ }, false,
+ },
+ {
+ "success",
+ func() {
+ clientA, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ // init channel
+ channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ clientState := suite.chainA.GetClientState(clientA)
+ expConsensusState, _ = suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ suite.Require().NotNil(expConsensusState)
+ expClientID = clientA
+
+ req = &types.QueryChannelConsensusStateRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ RevisionNumber: clientState.GetLatestHeight().GetRevisionNumber(),
+ RevisionHeight: clientState.GetLatestHeight().GetRevisionHeight(),
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.ChannelConsensusState(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ consensusState, err := clienttypes.UnpackConsensusState(res.ConsensusState)
+ suite.Require().NoError(err)
+ suite.Require().Equal(expConsensusState, consensusState)
+ suite.Require().Equal(expClientID, res.ClientId)
+
+ // ensure UnpackInterfaces is defined
+ cachedValue := res.ConsensusState.GetCachedValue()
+ suite.Require().NotNil(cachedValue)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryPacketCommitment() {
+ var (
+ req *types.QueryPacketCommitmentRequest
+ expCommitment []byte
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryPacketCommitmentRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryPacketCommitmentRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {"invalid sequence",
+ func() {
+ req = &types.QueryPacketCommitmentRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {"channel not found",
+ func() {
+ req = &types.QueryPacketCommitmentRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Sequence: 1,
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ expCommitment = []byte("hash")
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, expCommitment)
+
+ req = &types.QueryPacketCommitmentRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ Sequence: 1,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.PacketCommitment(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expCommitment, res.Commitment)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryPacketCommitments() {
+ var (
+ req *types.QueryPacketCommitmentsRequest
+ expCommitments = []*types.PacketState{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid ID",
+ func() {
+ req = &types.QueryPacketCommitmentsRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "success, empty res",
+ func() {
+ expCommitments = []*types.PacketState{}
+
+ req = &types.QueryPacketCommitmentsRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 2,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ expCommitments = make([]*types.PacketState, 9)
+
+ for i := uint64(0); i < 9; i++ {
+ commitment := types.NewPacketState(channelA.PortID, channelA.ID, i, []byte(fmt.Sprintf("hash_%d", i)))
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data)
+ expCommitments[i] = &commitment
+ }
+
+ req = &types.QueryPacketCommitmentsRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 11,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.PacketCommitments(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expCommitments, res.Commitments)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryPacketReceipt() {
+ var (
+ req *types.QueryPacketReceiptRequest
+ expReceived bool
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryPacketReceiptRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ Sequence: 1,
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryPacketReceiptRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ Sequence: 1,
+ }
+ },
+ false,
+ },
+ {"invalid sequence",
+ func() {
+ req = &types.QueryPacketReceiptRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {
+ "success: receipt not found",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1)
+
+ req = &types.QueryPacketReceiptRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ Sequence: 3,
+ }
+ expReceived = false
+ },
+ true,
+ },
+ {
+ "success: receipt found",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1)
+
+ req = &types.QueryPacketReceiptRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ Sequence: 1,
+ }
+ expReceived = true
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.PacketReceipt(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expReceived, res.Received)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() {
+ var (
+ req *types.QueryPacketAcknowledgementRequest
+ expAck []byte
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryPacketAcknowledgementRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryPacketAcknowledgementRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {"invalid sequence",
+ func() {
+ req = &types.QueryPacketAcknowledgementRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Sequence: 0,
+ }
+ },
+ false,
+ },
+ {"channel not found",
+ func() {
+ req = &types.QueryPacketAcknowledgementRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Sequence: 1,
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ expAck = []byte("hash")
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, expAck)
+
+ req = &types.QueryPacketAcknowledgementRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ Sequence: 1,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.PacketAcknowledgement(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expAck, res.Acknowledgement)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() {
+ var (
+ req *types.QueryPacketAcknowledgementsRequest
+ expAcknowledgements = []*types.PacketState{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid ID",
+ func() {
+ req = &types.QueryPacketAcknowledgementsRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "success, empty res",
+ func() {
+ expAcknowledgements = []*types.PacketState{}
+
+ req = &types.QueryPacketAcknowledgementsRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 2,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ {
+ "success",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ expAcknowledgements = make([]*types.PacketState, 9)
+
+ for i := uint64(0); i < 9; i++ {
+ ack := types.NewPacketState(channelA.PortID, channelA.ID, i, []byte(fmt.Sprintf("hash_%d", i)))
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
+ expAcknowledgements[i] = &ack
+ }
+
+ req = &types.QueryPacketAcknowledgementsRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ Pagination: &query.PageRequest{
+ Key: nil,
+ Limit: 11,
+ CountTotal: true,
+ },
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.PacketAcknowledgements(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expAcknowledgements, res.Acknowledgements)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() {
+ var (
+ req *types.QueryUnreceivedPacketsRequest
+ expSeq = []uint64{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryUnreceivedPacketsRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryUnreceivedPacketsRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ }
+ },
+ false,
+ },
+ {
+ "invalid seq",
+ func() {
+ req = &types.QueryUnreceivedPacketsRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ PacketCommitmentSequences: []uint64{0},
+ }
+ },
+ false,
+ },
+ {
+ "basic success unreceived packet commitments",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ // no ack exists
+
+ expSeq = []uint64{1}
+ req = &types.QueryUnreceivedPacketsRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ PacketCommitmentSequences: []uint64{1},
+ }
+ },
+ true,
+ },
+ {
+ "basic success unreceived packet commitments, nothing to relay",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1)
+
+ expSeq = []uint64{}
+ req = &types.QueryUnreceivedPacketsRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ PacketCommitmentSequences: []uint64{1},
+ }
+ },
+ true,
+ },
+ {
+ "success multiple unreceived packet commitments",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ expSeq = []uint64{} // reset
+ packetCommitments := []uint64{}
+
+ // set packet receipt for every other sequence
+ for seq := uint64(1); seq < 10; seq++ {
+ packetCommitments = append(packetCommitments, seq)
+
+ if seq%2 == 0 {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, seq)
+ } else {
+ expSeq = append(expSeq, seq)
+ }
+ }
+
+ req = &types.QueryUnreceivedPacketsRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ PacketCommitmentSequences: packetCommitments,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.UnreceivedPackets(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expSeq, res.Sequences)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() {
+ var (
+ req *types.QueryUnreceivedAcksRequest
+ expSeq = []uint64{}
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryUnreceivedAcksRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryUnreceivedAcksRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ }
+ },
+ false,
+ },
+ {
+ "invalid seq",
+ func() {
+ req = &types.QueryUnreceivedAcksRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ PacketAckSequences: []uint64{0},
+ }
+ },
+ false,
+ },
+ {
+ "basic success unreceived packet acks",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, []byte("commitment"))
+
+ expSeq = []uint64{1}
+ req = &types.QueryUnreceivedAcksRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ PacketAckSequences: []uint64{1},
+ }
+ },
+ true,
+ },
+ {
+ "basic success unreceived packet acknowledgements, nothing to relay",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ expSeq = []uint64{}
+ req = &types.QueryUnreceivedAcksRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ PacketAckSequences: []uint64{1},
+ }
+ },
+ true,
+ },
+ {
+ "success multiple unreceived packet acknowledgements",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ expSeq = []uint64{} // reset
+ packetAcks := []uint64{}
+
+ // set packet commitment for every other sequence
+ for seq := uint64(1); seq < 10; seq++ {
+ packetAcks = append(packetAcks, seq)
+
+ if seq%2 == 0 {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, seq, []byte("commitement"))
+ expSeq = append(expSeq, seq)
+ }
+ }
+
+ req = &types.QueryUnreceivedAcksRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ PacketAckSequences: packetAcks,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.UnreceivedAcks(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expSeq, res.Sequences)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() {
+ var (
+ req *types.QueryNextSequenceReceiveRequest
+ expSeq uint64
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty request",
+ func() {
+ req = nil
+ },
+ false,
+ },
+ {
+ "invalid port ID",
+ func() {
+ req = &types.QueryNextSequenceReceiveRequest{
+ PortId: "",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "invalid channel ID",
+ func() {
+ req = &types.QueryNextSequenceReceiveRequest{
+ PortId: "test-port-id",
+ ChannelId: "",
+ }
+ },
+ false,
+ },
+ {"channel not found",
+ func() {
+ req = &types.QueryNextSequenceReceiveRequest{
+ PortId: "test-port-id",
+ ChannelId: "test-channel-id",
+ }
+ },
+ false,
+ },
+ {
+ "success",
+ func() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ expSeq = 1
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(suite.chainA.GetContext(), channelA.PortID, channelA.ID, expSeq)
+
+ req = &types.QueryNextSequenceReceiveRequest{
+ PortId: channelA.PortID,
+ ChannelId: channelA.ID,
+ }
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+
+ res, err := suite.chainA.QueryServer.NextSequenceReceive(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(expSeq, res.NextSequenceReceive)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/core/04-channel/keeper/handshake.go b/core/04-channel/keeper/handshake.go
new file mode 100644
index 00000000..b7cff480
--- /dev/null
+++ b/core/04-channel/keeper/handshake.go
@@ -0,0 +1,496 @@
+package keeper
+
+import (
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CounterpartyHops returns the connection hops of the counterparty channel.
+// The counterparty hops are stored in the inverse order as the channel's.
+// NOTE: Since connectionHops only supports single connection channels for now,
+// this function requires that connection hops only contain a single connection id
+func (k Keeper) CounterpartyHops(ctx sdk.Context, ch types.Channel) ([]string, bool) {
+ // Return empty array if connection hops is more than one
+ // ConnectionHops length should be verified earlier
+ if len(ch.ConnectionHops) != 1 {
+ return []string{}, false
+ }
+ counterpartyHops := make([]string, 1)
+ hop := ch.ConnectionHops[0]
+ conn, found := k.connectionKeeper.GetConnection(ctx, hop)
+ if !found {
+ return []string{}, false
+ }
+
+ counterpartyHops[0] = conn.GetCounterparty().GetConnectionID()
+ return counterpartyHops, true
+}
+
+// ChanOpenInit is called by a module to initiate a channel opening handshake with
+// a module on another chain. The counterparty channel identifier is validated to be
+// empty in msg validation.
+func (k Keeper) ChanOpenInit(
+ ctx sdk.Context,
+ order types.Order,
+ connectionHops []string,
+ portID string,
+ portCap *capabilitytypes.Capability,
+ counterparty types.Counterparty,
+ version string,
+) (string, *capabilitytypes.Capability, error) {
+ // connection hop length checked on msg.ValidateBasic()
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, connectionHops[0])
+ if !found {
+ return "", nil, sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, connectionHops[0])
+ }
+
+ getVersions := connectionEnd.GetVersions()
+ if len(getVersions) != 1 {
+ return "", nil, sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidVersion,
+ "single version must be negotiated on connection before opening channel, got: %v",
+ getVersions,
+ )
+ }
+
+ if !connectiontypes.VerifySupportedFeature(getVersions[0], order.String()) {
+ return "", nil, sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidVersion,
+ "connection version %s does not support channel ordering: %s",
+ getVersions[0], order.String(),
+ )
+ }
+
+ if !k.portKeeper.Authenticate(ctx, portCap, portID) {
+ return "", nil, sdkerrors.Wrapf(porttypes.ErrInvalidPort, "caller does not own port capability for port ID %s", portID)
+ }
+
+ channelID := k.GenerateChannelIdentifier(ctx)
+ channel := types.NewChannel(types.INIT, order, counterparty, connectionHops, version)
+ k.SetChannel(ctx, portID, channelID, channel)
+
+ capKey, err := k.scopedKeeper.NewCapability(ctx, host.ChannelCapabilityPath(portID, channelID))
+ if err != nil {
+ return "", nil, sdkerrors.Wrapf(err, "could not create channel capability for port ID %s and channel ID %s", portID, channelID)
+ }
+
+ k.SetNextSequenceSend(ctx, portID, channelID, 1)
+ k.SetNextSequenceRecv(ctx, portID, channelID, 1)
+ k.SetNextSequenceAck(ctx, portID, channelID, 1)
+
+ k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", "NONE", "new-state", "INIT")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "channel", "open-init")
+ }()
+
+ return channelID, capKey, nil
+}
+
+// ChanOpenTry is called by a module to accept the first step of a channel opening
+// handshake initiated by a module on another chain.
+func (k Keeper) ChanOpenTry(
+ ctx sdk.Context,
+ order types.Order,
+ connectionHops []string,
+ portID,
+ previousChannelID string,
+ portCap *capabilitytypes.Capability,
+ counterparty types.Counterparty,
+ version,
+ counterpartyVersion string,
+ proofInit []byte,
+ proofHeight exported.Height,
+) (string, *capabilitytypes.Capability, error) {
+ var (
+ previousChannel types.Channel
+ previousChannelFound bool
+ )
+
+ channelID := previousChannelID
+
+ // empty channel identifier indicates continuing a previous channel handshake
+ if previousChannelID != "" {
+ // channel identifier and connection hop length checked on msg.ValidateBasic()
+ // ensure that the previous channel exists
+ previousChannel, previousChannelFound = k.GetChannel(ctx, portID, previousChannelID)
+ if !previousChannelFound {
+ return "", nil, sdkerrors.Wrapf(types.ErrInvalidChannel, "previous channel does not exist for supplied previous channelID %s", previousChannelID)
+ }
+ // previous channel must use the same fields
+ if !(previousChannel.Ordering == order &&
+ previousChannel.Counterparty.PortId == counterparty.PortId &&
+ previousChannel.Counterparty.ChannelId == "" &&
+ previousChannel.ConnectionHops[0] == connectionHops[0] &&
+ previousChannel.Version == version) {
+ return "", nil, sdkerrors.Wrap(types.ErrInvalidChannel, "channel fields mismatch previous channel fields")
+ }
+
+ if previousChannel.State != types.INIT {
+ return "", nil, sdkerrors.Wrapf(types.ErrInvalidChannelState, "previous channel state is in %s, expected INIT", previousChannel.State)
+ }
+
+ } else {
+ // generate a new channel
+ channelID = k.GenerateChannelIdentifier(ctx)
+ }
+
+ if !k.portKeeper.Authenticate(ctx, portCap, portID) {
+ return "", nil, sdkerrors.Wrapf(porttypes.ErrInvalidPort, "caller does not own port capability for port ID %s", portID)
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, connectionHops[0])
+ if !found {
+ return "", nil, sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, connectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return "", nil, sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ getVersions := connectionEnd.GetVersions()
+ if len(getVersions) != 1 {
+ return "", nil, sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidVersion,
+ "single version must be negotiated on connection before opening channel, got: %v",
+ getVersions,
+ )
+ }
+
+ if !connectiontypes.VerifySupportedFeature(getVersions[0], order.String()) {
+ return "", nil, sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidVersion,
+ "connection version %s does not support channel ordering: %s",
+ getVersions[0], order.String(),
+ )
+ }
+
+ // NOTE: this step has been switched with the one below to reverse the connection
+ // hops
+ channel := types.NewChannel(types.TRYOPEN, order, counterparty, connectionHops, version)
+
+ counterpartyHops, found := k.CounterpartyHops(ctx, channel)
+ if !found {
+ // should not reach here, connectionEnd was able to be retrieved above
+ panic("cannot find connection")
+ }
+
+ // expectedCounterpaty is the counterparty of the counterparty's channel end
+ // (i.e self)
+ expectedCounterparty := types.NewCounterparty(portID, "")
+ expectedChannel := types.NewChannel(
+ types.INIT, channel.Ordering, expectedCounterparty,
+ counterpartyHops, counterpartyVersion,
+ )
+
+ if err := k.connectionKeeper.VerifyChannelState(
+ ctx, connectionEnd, proofHeight, proofInit,
+ counterparty.PortId, counterparty.ChannelId, expectedChannel,
+ ); err != nil {
+ return "", nil, err
+ }
+
+ var (
+ capKey *capabilitytypes.Capability
+ err error
+ )
+
+ if !previousChannelFound {
+ capKey, err = k.scopedKeeper.NewCapability(ctx, host.ChannelCapabilityPath(portID, channelID))
+ if err != nil {
+ return "", nil, sdkerrors.Wrapf(err, "could not create channel capability for port ID %s and channel ID %s", portID, channelID)
+ }
+
+ k.SetNextSequenceSend(ctx, portID, channelID, 1)
+ k.SetNextSequenceRecv(ctx, portID, channelID, 1)
+ k.SetNextSequenceAck(ctx, portID, channelID, 1)
+ } else {
+ // capability initialized in ChanOpenInit
+ capKey, found = k.scopedKeeper.GetCapability(ctx, host.ChannelCapabilityPath(portID, channelID))
+ if !found {
+ return "", nil, sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound,
+ "capability not found for existing channel, portID (%s) channelID (%s)", portID, channelID,
+ )
+ }
+ }
+
+ k.SetChannel(ctx, portID, channelID, channel)
+
+ k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", previousChannel.State.String(), "new-state", "TRYOPEN")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "channel", "open-try")
+ }()
+
+ return channelID, capKey, nil
+}
+
+// ChanOpenAck is called by the handshake-originating module to acknowledge the
+// acceptance of the initial request by the counterparty module on the other chain.
+func (k Keeper) ChanOpenAck(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ chanCap *capabilitytypes.Capability,
+ counterpartyVersion,
+ counterpartyChannelID string,
+ proofTry []byte,
+ proofHeight exported.Height,
+) error {
+ channel, found := k.GetChannel(ctx, portID, channelID)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ if !(channel.State == types.INIT || channel.State == types.TRYOPEN) {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel state should be INIT or TRYOPEN (got %s)", channel.State.String(),
+ )
+ }
+
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) {
+ return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ counterpartyHops, found := k.CounterpartyHops(ctx, channel)
+ if !found {
+ // should not reach here, connectionEnd was able to be retrieved above
+ panic("cannot find connection")
+ }
+
+ // counterparty of the counterparty channel end (i.e self)
+ expectedCounterparty := types.NewCounterparty(portID, channelID)
+ expectedChannel := types.NewChannel(
+ types.TRYOPEN, channel.Ordering, expectedCounterparty,
+ counterpartyHops, counterpartyVersion,
+ )
+
+ if err := k.connectionKeeper.VerifyChannelState(
+ ctx, connectionEnd, proofHeight, proofTry,
+ channel.Counterparty.PortId, counterpartyChannelID,
+ expectedChannel,
+ ); err != nil {
+ return err
+ }
+
+ k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", channel.State.String(), "new-state", "OPEN")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "channel", "open-ack")
+ }()
+
+ channel.State = types.OPEN
+ channel.Version = counterpartyVersion
+ channel.Counterparty.ChannelId = counterpartyChannelID
+ k.SetChannel(ctx, portID, channelID, channel)
+
+ return nil
+}
+
+// ChanOpenConfirm is called by the counterparty module to close their end of the
+// channel, since the other end has been closed.
+func (k Keeper) ChanOpenConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ chanCap *capabilitytypes.Capability,
+ proofAck []byte,
+ proofHeight exported.Height,
+) error {
+ channel, found := k.GetChannel(ctx, portID, channelID)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ if channel.State != types.TRYOPEN {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel state is not TRYOPEN (got %s)", channel.State.String(),
+ )
+ }
+
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) {
+ return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ counterpartyHops, found := k.CounterpartyHops(ctx, channel)
+ if !found {
+ // Should not reach here, connectionEnd was able to be retrieved above
+ panic("cannot find connection")
+ }
+
+ counterparty := types.NewCounterparty(portID, channelID)
+ expectedChannel := types.NewChannel(
+ types.OPEN, channel.Ordering, counterparty,
+ counterpartyHops, channel.Version,
+ )
+
+ if err := k.connectionKeeper.VerifyChannelState(
+ ctx, connectionEnd, proofHeight, proofAck,
+ channel.Counterparty.PortId, channel.Counterparty.ChannelId,
+ expectedChannel,
+ ); err != nil {
+ return err
+ }
+
+ channel.State = types.OPEN
+ k.SetChannel(ctx, portID, channelID, channel)
+ k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", "TRYOPEN", "new-state", "OPEN")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "channel", "open-confirm")
+ }()
+ return nil
+}
+
+// Closing Handshake
+//
+// This section defines the set of functions required to close a channel handshake
+// as defined in https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#closing-handshake
+//
+// ChanCloseInit is called by either module to close their end of the channel. Once
+// closed, channels cannot be reopened.
+func (k Keeper) ChanCloseInit(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ chanCap *capabilitytypes.Capability,
+) error {
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) {
+ return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ channel, found := k.GetChannel(ctx, portID, channelID)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ if channel.State == types.CLOSED {
+ return sdkerrors.Wrap(types.ErrInvalidChannelState, "channel is already CLOSED")
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", channel.State.String(), "new-state", "CLOSED")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "channel", "close-init")
+ }()
+
+ channel.State = types.CLOSED
+ k.SetChannel(ctx, portID, channelID, channel)
+
+ return nil
+}
+
+// ChanCloseConfirm is called by the counterparty module to close their end of the
+// channel, since the other end has been closed.
+func (k Keeper) ChanCloseConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ chanCap *capabilitytypes.Capability,
+ proofInit []byte,
+ proofHeight exported.Height,
+) error {
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) {
+ return sdkerrors.Wrap(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)")
+ }
+
+ channel, found := k.GetChannel(ctx, portID, channelID)
+ if !found {
+ return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID)
+ }
+
+ if channel.State == types.CLOSED {
+ return sdkerrors.Wrap(types.ErrInvalidChannelState, "channel is already CLOSED")
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ counterpartyHops, found := k.CounterpartyHops(ctx, channel)
+ if !found {
+ // Should not reach here, connectionEnd was able to be retrieved above
+ panic("cannot find connection")
+ }
+
+ counterparty := types.NewCounterparty(portID, channelID)
+ expectedChannel := types.NewChannel(
+ types.CLOSED, channel.Ordering, counterparty,
+ counterpartyHops, channel.Version,
+ )
+
+ if err := k.connectionKeeper.VerifyChannelState(
+ ctx, connectionEnd, proofHeight, proofInit,
+ channel.Counterparty.PortId, channel.Counterparty.ChannelId,
+ expectedChannel,
+ ); err != nil {
+ return err
+ }
+
+ k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", channel.State.String(), "new-state", "CLOSED")
+
+ defer func() {
+ telemetry.IncrCounter(1, "ibc", "channel", "close-confirm")
+ }()
+
+ channel.State = types.CLOSED
+ k.SetChannel(ctx, portID, channelID, channel)
+
+ return nil
+}
diff --git a/core/04-channel/keeper/handshake_test.go b/core/04-channel/keeper/handshake_test.go
new file mode 100644
index 00000000..120e1f8f
--- /dev/null
+++ b/core/04-channel/keeper/handshake_test.go
@@ -0,0 +1,773 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type testCase = struct {
+ msg string
+ malleate func()
+ expPass bool
+}
+
+// TestChanOpenInit tests the OpenInit handshake call for channels. It uses message passing
+// to enter into the appropriate state and then calls ChanOpenInit directly. The channel is
+// being created on chainA. The port capability must be created on chainA before ChanOpenInit
+// can succeed.
+func (suite *KeeperTestSuite) TestChanOpenInit() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ features []string
+ portCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {"success", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"}
+ suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ }, true},
+ {"channel already exists", func() {
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ }, false},
+ {"connection doesn't exist", func() {
+ // any non-nil values of connA and connB are acceptable
+ suite.Require().NotNil(connA)
+ suite.Require().NotNil(connB)
+ }, false},
+ {"capability is incorrect", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"}
+ portCap = capabilitytypes.NewCapability(3)
+ }, false},
+ {"connection version not negotiated", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // modify connA versions
+ conn := suite.chainA.GetConnection(connA)
+
+ version := connectiontypes.NewVersion("2", []string{"ORDER_ORDERED", "ORDER_UNORDERED"})
+ conn.Versions = append(conn.Versions, version)
+
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainA.GetContext(),
+ connA.ID, conn,
+ )
+ features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"}
+ suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ }, false},
+ {"connection does not support ORDERED channels", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // modify connA versions to only support UNORDERED channels
+ conn := suite.chainA.GetConnection(connA)
+
+ version := connectiontypes.NewVersion("1", []string{"ORDER_UNORDERED"})
+ conn.Versions = []*connectiontypes.Version{version}
+
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainA.GetContext(),
+ connA.ID, conn,
+ )
+ // NOTE: Opening UNORDERED channels is still expected to pass but ORDERED channels should fail
+ features = []string{"ORDER_UNORDERED"}
+ suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ }, true},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ // run test for all types of ordering
+ for _, order := range []types.Order{types.UNORDERED, types.ORDERED} {
+ suite.SetupTest() // reset
+ tc.malleate()
+
+ counterparty := types.NewCounterparty(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID, connB.FirstOrNextTestChannel(ibctesting.MockPort).ID)
+ channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ channelID, cap, err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanOpenInit(
+ suite.chainA.GetContext(), order, []string{connA.ID},
+ channelA.PortID, portCap, counterparty, channelA.Version,
+ )
+
+ // check if order is supported by channel to determine expected behaviour
+ orderSupported := false
+ for _, f := range features {
+ if f == order.String() {
+ orderSupported = true
+ }
+ }
+
+ // Testcase must have expectedPass = true AND channel order supported before
+ // asserting the channel handshake initiation succeeded
+ if tc.expPass && orderSupported {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(cap)
+ suite.Require().Equal(types.FormatChannelIdentifier(0), channelID)
+
+ chanCap, ok := suite.chainA.App.ScopedIBCKeeper.GetCapability(
+ suite.chainA.GetContext(),
+ host.ChannelCapabilityPath(channelA.PortID, channelA.ID),
+ )
+ suite.Require().True(ok, "could not retrieve channel capability after successful ChanOpenInit")
+ suite.Require().Equal(chanCap.String(), cap.String(), "channel capability is not correct")
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(cap)
+ suite.Require().Equal("", channelID)
+ }
+ }
+ })
+ }
+}
+
+// TestChanOpenTry tests the OpenTry handshake call for channels. It uses message passing
+// to enter into the appropriate state and then calls ChanOpenTry directly. The channel
+// is being created on chainB. The port capability must be created on chainB before
+// ChanOpenTry can succeed.
+func (suite *KeeperTestSuite) TestChanOpenTry() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ previousChannelID string
+ portCap *capabilitytypes.Capability
+ heightDiff uint64
+ )
+
+ testCases := []testCase{
+ {"success", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+
+ suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ }, true},
+ {"success with crossing hello", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ _, channelB, err := suite.coordinator.ChanOpenInitOnBothChains(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ previousChannelID = channelB.ID
+ portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ }, true},
+ {"previous channel with invalid state", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // make previous channel have wrong ordering
+ suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.UNORDERED)
+ }, false},
+ {"connection doesn't exist", func() {
+ // any non-nil values of connA and connB are acceptable
+ suite.Require().NotNil(connA)
+ suite.Require().NotNil(connB)
+
+ // pass capability check
+ suite.chainB.CreatePortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
+ portCap = suite.chainB.GetPortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
+ }, false},
+ {"connection is not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ // pass capability check
+ suite.chainB.CreatePortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
+ portCap = suite.chainB.GetPortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
+
+ var err error
+ connB, connA, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ suite.Require().NoError(err)
+ }, false},
+ {"consensus state not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+
+ suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+
+ heightDiff = 3 // consensus state doesn't exist at this height
+ }, false},
+ {"channel verification failed", func() {
+ // not creating a channel on chainA will result in an invalid proof of existence
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ }, false},
+ {"port capability not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+
+ portCap = capabilitytypes.NewCapability(3)
+ }, false},
+ {"connection version not negotiated", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+
+ // modify connB versions
+ conn := suite.chainB.GetConnection(connB)
+
+ version := connectiontypes.NewVersion("2", []string{"ORDER_ORDERED", "ORDER_UNORDERED"})
+ conn.Versions = append(conn.Versions, version)
+
+ suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainB.GetContext(),
+ connB.ID, conn,
+ )
+ suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ }, false},
+ {"connection does not support ORDERED channels", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+
+ // modify connA versions to only support UNORDERED channels
+ conn := suite.chainA.GetConnection(connA)
+
+ version := connectiontypes.NewVersion("1", []string{"ORDER_UNORDERED"})
+ conn.Versions = []*connectiontypes.Version{version}
+
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainA.GetContext(),
+ connA.ID, conn,
+ )
+ suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+ heightDiff = 0 // must be explicitly changed in malleate
+ previousChannelID = ""
+
+ tc.malleate()
+ channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+ channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
+ counterparty := types.NewCounterparty(channelA.PortID, channelA.ID)
+
+ channelKey := host.ChannelKey(counterparty.PortId, counterparty.ChannelId)
+ proof, proofHeight := suite.chainA.QueryProof(channelKey)
+
+ channelID, cap, err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanOpenTry(
+ suite.chainB.GetContext(), types.ORDERED, []string{connB.ID},
+ channelB.PortID, previousChannelID, portCap, counterparty, channelB.Version, connA.FirstOrNextTestChannel(ibctesting.MockPort).Version,
+ proof, malleateHeight(proofHeight, heightDiff),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(cap)
+
+ chanCap, ok := suite.chainB.App.ScopedIBCKeeper.GetCapability(
+ suite.chainB.GetContext(),
+ host.ChannelCapabilityPath(channelB.PortID, channelID),
+ )
+ suite.Require().True(ok, "could not retrieve channel capapbility after successful ChanOpenTry")
+ suite.Require().Equal(chanCap.String(), cap.String(), "channel capability is not correct")
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestChanOpenAck tests the OpenAck handshake call for channels. It uses message passing
+// to enter into the appropriate state and then calls ChanOpenAck directly. The handshake
+// call is occurring on chainA.
+func (suite *KeeperTestSuite) TestChanOpenAck() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ counterpartyChannelID string
+ channelCap *capabilitytypes.Capability
+ heightDiff uint64
+ )
+
+ testCases := []testCase{
+ {"success", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"success with empty stored counterparty channel ID", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ // set the channel's counterparty channel identifier to empty string
+ channel := suite.chainA.GetChannel(channelA)
+ channel.Counterparty.ChannelId = ""
+
+ // use a different channel identifier
+ counterpartyChannelID = channelB.ID
+
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"channel doesn't exist", func() {}, false},
+ {"channel state is not INIT or TRYOPEN", func() {
+ // create fully open channels on both chains
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelA := connA.Channels[0]
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"connection not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+
+ // set the channel's connection hops to wrong connection ID
+ channel := suite.chainA.GetChannel(channelA)
+ channel.ConnectionHops[0] = "doesnotexist"
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ }, false},
+ {"connection is not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ var err error
+ connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // create channel in init
+ channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"consensus state not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+
+ heightDiff = 3 // consensus state doesn't exist at this height
+ }, false},
+ {"invalid counterparty channel identifier", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ counterpartyChannelID = "otheridentifier"
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel verification failed", func() {
+ // chainB is INIT, chainA in TRYOPEN
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelB, channelA, err := suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainA, suite.chainB, channelA, channelB, connA, types.ORDERED)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel capability not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+
+ channelCap = capabilitytypes.NewCapability(6)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+ counterpartyChannelID = "" // must be explicitly changed in malleate
+ heightDiff = 0 // must be explicitly changed
+
+ tc.malleate()
+
+ channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+ channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ if counterpartyChannelID == "" {
+ counterpartyChannelID = channelB.ID
+ }
+
+ channelKey := host.ChannelKey(channelB.PortID, channelB.ID)
+ proof, proofHeight := suite.chainB.QueryProof(channelKey)
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanOpenAck(
+ suite.chainA.GetContext(), channelA.PortID, channelA.ID, channelCap, channelB.Version, counterpartyChannelID,
+ proof, malleateHeight(proofHeight, heightDiff),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestChanOpenConfirm tests the OpenAck handshake call for channels. It uses message passing
+// to enter into the appropriate state and then calls ChanOpenConfirm directly. The handshake
+// call is occurring on chainB.
+func (suite *KeeperTestSuite) TestChanOpenConfirm() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ channelCap *capabilitytypes.Capability
+ heightDiff uint64
+ )
+ testCases := []testCase{
+ {"success", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, true},
+ {"channel doesn't exist", func() {}, false},
+ {"channel state is not TRYOPEN", func() {
+ // create fully open channels on both cahins
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelB := connB.Channels[0]
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"connection not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ // set the channel's connection hops to wrong connection ID
+ channel := suite.chainB.GetChannel(channelB)
+ channel.ConnectionHops[0] = "doesnotexist"
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), channelB.PortID, channelB.ID, channel)
+ }, false},
+ {"connection is not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ var err error
+ connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ suite.Require().NoError(err)
+ channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
+ suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"consensus state not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ heightDiff = 3
+ }, false},
+ {"channel verification failed", func() {
+ // chainA is INIT, chainB in TRYOPEN
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"channel capability not found", func() {
+ _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ suite.Require().NoError(err)
+
+ channelCap = capabilitytypes.NewCapability(6)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+ heightDiff = 0 // must be explicitly changed
+
+ tc.malleate()
+
+ channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+ channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ channelKey := host.ChannelKey(channelA.PortID, channelA.ID)
+ proof, proofHeight := suite.chainA.QueryProof(channelKey)
+
+ err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanOpenConfirm(
+ suite.chainB.GetContext(), channelB.PortID, channelB.ID,
+ channelCap, proof, malleateHeight(proofHeight, heightDiff),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestChanCloseInit tests the initial closing of a handshake on chainA by calling
+// ChanCloseInit. Both chains will use message passing to setup OPEN channels.
+func (suite *KeeperTestSuite) TestChanCloseInit() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ channelCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {"success", func() {
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelA := connA.Channels[0]
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"channel doesn't exist", func() {
+ // any non-nil values work for connections
+ suite.Require().NotNil(connA)
+ suite.Require().NotNil(connB)
+ channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ // ensure channel capability check passes
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel state is CLOSED", func() {
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelA := connA.Channels[0]
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+
+ // close channel
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+ }, false},
+ {"connection not found", func() {
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelA := connA.Channels[0]
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+
+ // set the channel's connection hops to wrong connection ID
+ channel := suite.chainA.GetChannel(channelA)
+ channel.ConnectionHops[0] = "doesnotexist"
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ }, false},
+ {"connection is not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ var err error
+ connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ // create channel in init
+ channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+
+ // ensure channel capability check passes
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel capability not found", func() {
+ _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = capabilitytypes.NewCapability(3)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanCloseInit(
+ suite.chainA.GetContext(), channelA.PortID, channelA.ID, channelCap,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestChanCloseConfirm tests the confirming closing channel ends by calling ChanCloseConfirm
+// on chainB. Both chains will use message passing to setup OPEN channels. ChanCloseInit is
+// bypassed on chainA by setting the channel state in the ChannelKeeper.
+func (suite *KeeperTestSuite) TestChanCloseConfirm() {
+ var (
+ connA *ibctesting.TestConnection
+ connB *ibctesting.TestConnection
+ channelA ibctesting.TestChannel
+ channelB ibctesting.TestChannel
+ channelCap *capabilitytypes.Capability
+ heightDiff uint64
+ )
+
+ testCases := []testCase{
+ {"success", func() {
+ _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+ }, true},
+ {"channel doesn't exist", func() {
+ // any non-nil values work for connections
+ suite.Require().NotNil(connA)
+ suite.Require().NotNil(connB)
+ channelB = connB.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ // ensure channel capability check passes
+ suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"channel state is CLOSED", func() {
+ _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.Require().NoError(err)
+ }, false},
+ {"connection not found", func() {
+ _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ // set the channel's connection hops to wrong connection ID
+ channel := suite.chainB.GetChannel(channelB)
+ channel.ConnectionHops[0] = "doesnotexist"
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), channelB.PortID, channelB.ID, channel)
+ }, false},
+ {"connection is not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ var err error
+ connB, connA, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ suite.Require().NoError(err)
+
+ // create channel in init
+ channelB, _, err := suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.Require().NoError(err)
+
+ // ensure channel capability check passes
+ suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"consensus state not found", func() {
+ _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+
+ heightDiff = 3
+ }, false},
+ {"channel verification failed", func() {
+ // channel not closed
+ _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"channel capability not found", func() {
+ _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+
+ channelCap = capabilitytypes.NewCapability(3)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+ heightDiff = 0 // must explicitly be changed
+
+ tc.malleate()
+
+ channelA = connA.FirstOrNextTestChannel(ibctesting.MockPort)
+ channelB = connB.FirstOrNextTestChannel(ibctesting.MockPort)
+
+ channelKey := host.ChannelKey(channelA.PortID, channelA.ID)
+ proof, proofHeight := suite.chainA.QueryProof(channelKey)
+
+ err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanCloseConfirm(
+ suite.chainB.GetContext(), channelB.PortID, channelB.ID, channelCap,
+ proof, malleateHeight(proofHeight, heightDiff),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func malleateHeight(height exported.Height, diff uint64) exported.Height {
+ return clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+diff)
+}
diff --git a/core/04-channel/keeper/keeper.go b/core/04-channel/keeper/keeper.go
new file mode 100644
index 00000000..60452f31
--- /dev/null
+++ b/core/04-channel/keeper/keeper.go
@@ -0,0 +1,432 @@
+package keeper
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/tendermint/tendermint/libs/log"
+ db "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// Keeper defines the IBC channel keeper
+type Keeper struct {
+ // implements gRPC QueryServer interface
+ types.QueryServer
+
+ storeKey sdk.StoreKey
+ cdc codec.BinaryMarshaler
+ clientKeeper types.ClientKeeper
+ connectionKeeper types.ConnectionKeeper
+ portKeeper types.PortKeeper
+ scopedKeeper capabilitykeeper.ScopedKeeper
+}
+
+// NewKeeper creates a new IBC channel Keeper instance
+func NewKeeper(
+ cdc codec.BinaryMarshaler, key sdk.StoreKey,
+ clientKeeper types.ClientKeeper, connectionKeeper types.ConnectionKeeper,
+ portKeeper types.PortKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,
+) Keeper {
+ return Keeper{
+ storeKey: key,
+ cdc: cdc,
+ clientKeeper: clientKeeper,
+ connectionKeeper: connectionKeeper,
+ portKeeper: portKeeper,
+ scopedKeeper: scopedKeeper,
+ }
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper) Logger(ctx sdk.Context) log.Logger {
+ return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
+}
+
+// GenerateChannelIdentifier returns the next channel identifier.
+func (k Keeper) GenerateChannelIdentifier(ctx sdk.Context) string {
+ nextChannelSeq := k.GetNextChannelSequence(ctx)
+ channelID := types.FormatChannelIdentifier(nextChannelSeq)
+
+ nextChannelSeq++
+ k.SetNextChannelSequence(ctx, nextChannelSeq)
+ return channelID
+}
+
+// GetChannel returns a channel with a particular identifier binded to a specific port
+func (k Keeper) GetChannel(ctx sdk.Context, portID, channelID string) (types.Channel, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.ChannelKey(portID, channelID))
+ if bz == nil {
+ return types.Channel{}, false
+ }
+
+ var channel types.Channel
+ k.cdc.MustUnmarshalBinaryBare(bz, &channel)
+ return channel, true
+}
+
+// SetChannel sets a channel to the store
+func (k Keeper) SetChannel(ctx sdk.Context, portID, channelID string, channel types.Channel) {
+ store := ctx.KVStore(k.storeKey)
+ bz := k.cdc.MustMarshalBinaryBare(&channel)
+ store.Set(host.ChannelKey(portID, channelID), bz)
+}
+
+// GetNextChannelSequence gets the next channel sequence from the store.
+func (k Keeper) GetNextChannelSequence(ctx sdk.Context) uint64 {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get([]byte(types.KeyNextChannelSequence))
+ if bz == nil {
+ panic("next channel sequence is nil")
+ }
+
+ return sdk.BigEndianToUint64(bz)
+}
+
+// SetNextChannelSequence sets the next channel sequence to the store.
+func (k Keeper) SetNextChannelSequence(ctx sdk.Context, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ bz := sdk.Uint64ToBigEndian(sequence)
+ store.Set([]byte(types.KeyNextChannelSequence), bz)
+}
+
+// GetNextSequenceSend gets a channel's next send sequence from the store
+func (k Keeper) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.NextSequenceSendKey(portID, channelID))
+ if bz == nil {
+ return 0, false
+ }
+
+ return sdk.BigEndianToUint64(bz), true
+}
+
+// SetNextSequenceSend sets a channel's next send sequence to the store
+func (k Keeper) SetNextSequenceSend(ctx sdk.Context, portID, channelID string, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ bz := sdk.Uint64ToBigEndian(sequence)
+ store.Set(host.NextSequenceSendKey(portID, channelID), bz)
+}
+
+// GetNextSequenceRecv gets a channel's next receive sequence from the store
+func (k Keeper) GetNextSequenceRecv(ctx sdk.Context, portID, channelID string) (uint64, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.NextSequenceRecvKey(portID, channelID))
+ if bz == nil {
+ return 0, false
+ }
+
+ return sdk.BigEndianToUint64(bz), true
+}
+
+// SetNextSequenceRecv sets a channel's next receive sequence to the store
+func (k Keeper) SetNextSequenceRecv(ctx sdk.Context, portID, channelID string, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ bz := sdk.Uint64ToBigEndian(sequence)
+ store.Set(host.NextSequenceRecvKey(portID, channelID), bz)
+}
+
+// GetNextSequenceAck gets a channel's next ack sequence from the store
+func (k Keeper) GetNextSequenceAck(ctx sdk.Context, portID, channelID string) (uint64, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.NextSequenceAckKey(portID, channelID))
+ if bz == nil {
+ return 0, false
+ }
+
+ return sdk.BigEndianToUint64(bz), true
+}
+
+// SetNextSequenceAck sets a channel's next ack sequence to the store
+func (k Keeper) SetNextSequenceAck(ctx sdk.Context, portID, channelID string, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ bz := sdk.Uint64ToBigEndian(sequence)
+ store.Set(host.NextSequenceAckKey(portID, channelID), bz)
+}
+
+// GetPacketReceipt gets a packet receipt from the store
+func (k Keeper) GetPacketReceipt(ctx sdk.Context, portID, channelID string, sequence uint64) (string, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.PacketReceiptKey(portID, channelID, sequence))
+ if bz == nil {
+ return "", false
+ }
+
+ return string(bz), true
+}
+
+// SetPacketReceipt sets an empty packet receipt to the store
+func (k Keeper) SetPacketReceipt(ctx sdk.Context, portID, channelID string, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ store.Set(host.PacketReceiptKey(portID, channelID, sequence), []byte{byte(1)})
+}
+
+// GetPacketCommitment gets the packet commitment hash from the store
+func (k Keeper) GetPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) []byte {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.PacketCommitmentKey(portID, channelID, sequence))
+ return bz
+}
+
+// HasPacketCommitment returns true if the packet commitment exists
+func (k Keeper) HasPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) bool {
+ store := ctx.KVStore(k.storeKey)
+ return store.Has(host.PacketCommitmentKey(portID, channelID, sequence))
+}
+
+// SetPacketCommitment sets the packet commitment hash to the store
+func (k Keeper) SetPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64, commitmentHash []byte) {
+ store := ctx.KVStore(k.storeKey)
+ store.Set(host.PacketCommitmentKey(portID, channelID, sequence), commitmentHash)
+}
+
+func (k Keeper) deletePacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) {
+ store := ctx.KVStore(k.storeKey)
+ store.Delete(host.PacketCommitmentKey(portID, channelID, sequence))
+}
+
+// SetPacketAcknowledgement sets the packet ack hash to the store
+func (k Keeper) SetPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64, ackHash []byte) {
+ store := ctx.KVStore(k.storeKey)
+ store.Set(host.PacketAcknowledgementKey(portID, channelID, sequence), ackHash)
+}
+
+// GetPacketAcknowledgement gets the packet ack hash from the store
+func (k Keeper) GetPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64) ([]byte, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(host.PacketAcknowledgementKey(portID, channelID, sequence))
+ if bz == nil {
+ return nil, false
+ }
+ return bz, true
+}
+
+// HasPacketAcknowledgement check if the packet ack hash is already on the store
+func (k Keeper) HasPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64) bool {
+ store := ctx.KVStore(k.storeKey)
+ return store.Has(host.PacketAcknowledgementKey(portID, channelID, sequence))
+}
+
+// IteratePacketSequence provides an iterator over all send, receive or ack sequences.
+// For each sequence, cb will be called. If the cb returns true, the iterator
+// will close and stop.
+func (k Keeper) IteratePacketSequence(ctx sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64) bool) {
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ portID, channelID, err := host.ParseChannelPath(string(iterator.Key()))
+ if err != nil {
+ // return if the key is not a channel key
+ return
+ }
+
+ sequence := sdk.BigEndianToUint64(iterator.Value())
+
+ if cb(portID, channelID, sequence) {
+ break
+ }
+ }
+}
+
+// GetAllPacketSendSeqs returns all stored next send sequences.
+func (k Keeper) GetAllPacketSendSeqs(ctx sdk.Context) (seqs []types.PacketSequence) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqSendPrefix))
+ k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextSendSeq uint64) bool {
+ ps := types.NewPacketSequence(portID, channelID, nextSendSeq)
+ seqs = append(seqs, ps)
+ return false
+ })
+ return seqs
+}
+
+// GetAllPacketRecvSeqs returns all stored next recv sequences.
+func (k Keeper) GetAllPacketRecvSeqs(ctx sdk.Context) (seqs []types.PacketSequence) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqRecvPrefix))
+ k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextRecvSeq uint64) bool {
+ ps := types.NewPacketSequence(portID, channelID, nextRecvSeq)
+ seqs = append(seqs, ps)
+ return false
+ })
+ return seqs
+}
+
+// GetAllPacketAckSeqs returns all stored next acknowledgements sequences.
+func (k Keeper) GetAllPacketAckSeqs(ctx sdk.Context) (seqs []types.PacketSequence) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqAckPrefix))
+ k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextAckSeq uint64) bool {
+ ps := types.NewPacketSequence(portID, channelID, nextAckSeq)
+ seqs = append(seqs, ps)
+ return false
+ })
+ return seqs
+}
+
+// IteratePacketCommitment provides an iterator over all PacketCommitment objects. For each
+// packet commitment, cb will be called. If the cb returns true, the iterator will close
+// and stop.
+func (k Keeper) IteratePacketCommitment(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, hash []byte) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketCommitmentPrefix))
+ k.iterateHashes(ctx, iterator, cb)
+}
+
+// GetAllPacketCommitments returns all stored PacketCommitments objects.
+func (k Keeper) GetAllPacketCommitments(ctx sdk.Context) (commitments []types.PacketState) {
+ k.IteratePacketCommitment(ctx, func(portID, channelID string, sequence uint64, hash []byte) bool {
+ pc := types.NewPacketState(portID, channelID, sequence, hash)
+ commitments = append(commitments, pc)
+ return false
+ })
+ return commitments
+}
+
+// IteratePacketCommitmentAtChannel provides an iterator over all PacketCommmitment objects
+// at a specified channel. For each packet commitment, cb will be called. If the cb returns
+// true, the iterator will close and stop.
+func (k Keeper) IteratePacketCommitmentAtChannel(ctx sdk.Context, portID, channelID string, cb func(_, _ string, sequence uint64, hash []byte) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.PacketCommitmentPrefixPath(portID, channelID)))
+ k.iterateHashes(ctx, iterator, cb)
+}
+
+// GetAllPacketCommitmentsAtChannel returns all stored PacketCommitments objects for a specified
+// port ID and channel ID.
+func (k Keeper) GetAllPacketCommitmentsAtChannel(ctx sdk.Context, portID, channelID string) (commitments []types.PacketState) {
+ k.IteratePacketCommitmentAtChannel(ctx, portID, channelID, func(_, _ string, sequence uint64, hash []byte) bool {
+ pc := types.NewPacketState(portID, channelID, sequence, hash)
+ commitments = append(commitments, pc)
+ return false
+ })
+ return commitments
+}
+
+// IteratePacketReceipt provides an iterator over all PacketReceipt objects. For each
+// receipt, cb will be called. If the cb returns true, the iterator will close
+// and stop.
+func (k Keeper) IteratePacketReceipt(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, receipt []byte) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketReceiptPrefix))
+ k.iterateHashes(ctx, iterator, cb)
+}
+
+// GetAllPacketReceipts returns all stored PacketReceipt objects.
+func (k Keeper) GetAllPacketReceipts(ctx sdk.Context) (receipts []types.PacketState) {
+ k.IteratePacketReceipt(ctx, func(portID, channelID string, sequence uint64, receipt []byte) bool {
+ packetReceipt := types.NewPacketState(portID, channelID, sequence, receipt)
+ receipts = append(receipts, packetReceipt)
+ return false
+ })
+ return receipts
+}
+
+// IteratePacketAcknowledgement provides an iterator over all PacketAcknowledgement objects. For each
+// aknowledgement, cb will be called. If the cb returns true, the iterator will close
+// and stop.
+func (k Keeper) IteratePacketAcknowledgement(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, hash []byte) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketAckPrefix))
+ k.iterateHashes(ctx, iterator, cb)
+}
+
+// GetAllPacketAcks returns all stored PacketAcknowledgements objects.
+func (k Keeper) GetAllPacketAcks(ctx sdk.Context) (acks []types.PacketState) {
+ k.IteratePacketAcknowledgement(ctx, func(portID, channelID string, sequence uint64, ack []byte) bool {
+ packetAck := types.NewPacketState(portID, channelID, sequence, ack)
+ acks = append(acks, packetAck)
+ return false
+ })
+ return acks
+}
+
+// IterateChannels provides an iterator over all Channel objects. For each
+// Channel, cb will be called. If the cb returns true, the iterator will close
+// and stop.
+func (k Keeper) IterateChannels(ctx sdk.Context, cb func(types.IdentifiedChannel) bool) {
+ store := ctx.KVStore(k.storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyChannelEndPrefix))
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ var channel types.Channel
+ k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &channel)
+
+ portID, channelID := host.MustParseChannelPath(string(iterator.Key()))
+ identifiedChannel := types.NewIdentifiedChannel(portID, channelID, channel)
+ if cb(identifiedChannel) {
+ break
+ }
+ }
+}
+
+// GetAllChannels returns all stored Channel objects.
+func (k Keeper) GetAllChannels(ctx sdk.Context) (channels []types.IdentifiedChannel) {
+ k.IterateChannels(ctx, func(channel types.IdentifiedChannel) bool {
+ channels = append(channels, channel)
+ return false
+ })
+ return channels
+}
+
+// GetChannelClientState returns the associated client state with its ID, from a port and channel identifier.
+func (k Keeper) GetChannelClientState(ctx sdk.Context, portID, channelID string) (string, exported.ClientState, error) {
+ channel, found := k.GetChannel(ctx, portID, channelID)
+ if !found {
+ return "", nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id: %s", portID, channelID)
+ }
+
+ connection, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return "", nil, sdkerrors.Wrapf(connectiontypes.ErrConnectionNotFound, "connection-id: %s", channel.ConnectionHops[0])
+ }
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, connection.ClientId)
+ if !found {
+ return "", nil, sdkerrors.Wrapf(clienttypes.ErrClientNotFound, "client-id: %s", connection.ClientId)
+ }
+
+ return connection.ClientId, clientState, nil
+}
+
+// LookupModuleByChannel will return the IBCModule along with the capability associated with a given channel defined by its portID and channelID
+func (k Keeper) LookupModuleByChannel(ctx sdk.Context, portID, channelID string) (string, *capabilitytypes.Capability, error) {
+ modules, cap, err := k.scopedKeeper.LookupModules(ctx, host.ChannelCapabilityPath(portID, channelID))
+ if err != nil {
+ return "", nil, err
+ }
+
+ return porttypes.GetModuleOwner(modules), cap, nil
+}
+
+// common functionality for IteratePacketCommitment and IteratePacketAcknowledgement
+func (k Keeper) iterateHashes(_ sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64, hash []byte) bool) {
+ defer iterator.Close()
+
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ portID := keySplit[2]
+ channelID := keySplit[4]
+
+ sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ if cb(portID, channelID, sequence, iterator.Value()) {
+ break
+ }
+ }
+}
diff --git a/core/04-channel/keeper/keeper_test.go b/core/04-channel/keeper/keeper_test.go
new file mode 100644
index 00000000..a9b7dd6c
--- /dev/null
+++ b/core/04-channel/keeper/keeper_test.go
@@ -0,0 +1,329 @@
+package keeper_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+// KeeperTestSuite is a testing suite to test keeper functions.
+type KeeperTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+// TestKeeperTestSuite runs all the tests within this package.
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+
+// SetupTest creates a coordinator with 2 test chains.
+func (suite *KeeperTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)
+ suite.coordinator.CommitNBlocks(suite.chainA, 2)
+ suite.coordinator.CommitNBlocks(suite.chainB, 2)
+}
+
+// TestSetChannel create clients and connections on both chains. It tests for the non-existence
+// and existence of a channel in INIT on chainA.
+func (suite *KeeperTestSuite) TestSetChannel() {
+ // create client and connections on both chains
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+
+ // check for channel to be created on chainA
+ channelA := suite.chainA.NextTestChannel(connA, ibctesting.MockPort)
+ _, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID)
+ suite.False(found)
+
+ // init channel
+ channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.NoError(err)
+
+ storedChannel, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID)
+ // counterparty channel id is empty after open init
+ expectedCounterparty := types.NewCounterparty(channelB.PortID, "")
+
+ suite.True(found)
+ suite.Equal(types.INIT, storedChannel.State)
+ suite.Equal(types.ORDERED, storedChannel.Ordering)
+ suite.Equal(expectedCounterparty, storedChannel.Counterparty)
+}
+
+// TestGetAllChannels creates multiple channels on chain A through various connections
+// and tests their retrieval. 2 channels are on connA0 and 1 channel is on connA1
+func (suite KeeperTestSuite) TestGetAllChannels() {
+ clientA, clientB, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // channel0 on first connection on chainA
+ counterparty0 := types.Counterparty{
+ PortId: connB0.Channels[0].PortID,
+ ChannelId: connB0.Channels[0].ID,
+ }
+
+ // channel1 is second channel on first connection on chainA
+ testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED)
+ counterparty1 := types.Counterparty{
+ PortId: connB0.Channels[1].PortID,
+ ChannelId: connB0.Channels[1].ID,
+ }
+
+ connA1, connB1 := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
+
+ // channel2 is on a second connection on chainA
+ testchannel2, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA1, connB1, ibctesting.MockPort, ibctesting.MockPort, types.UNORDERED)
+ suite.Require().NoError(err)
+
+ // counterparty channel id is empty after open init
+ counterparty2 := types.Counterparty{
+ PortId: connB1.Channels[0].PortID,
+ ChannelId: "",
+ }
+
+ channel0 := types.NewChannel(
+ types.OPEN, types.UNORDERED,
+ counterparty0, []string{connA0.ID}, testchannel0.Version,
+ )
+ channel1 := types.NewChannel(
+ types.OPEN, types.ORDERED,
+ counterparty1, []string{connA0.ID}, testchannel1.Version,
+ )
+ channel2 := types.NewChannel(
+ types.INIT, types.UNORDERED,
+ counterparty2, []string{connA1.ID}, testchannel2.Version,
+ )
+
+ expChannels := []types.IdentifiedChannel{
+ types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0),
+ types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1),
+ types.NewIdentifiedChannel(testchannel2.PortID, testchannel2.ID, channel2),
+ }
+
+ ctxA := suite.chainA.GetContext()
+
+ channels := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllChannels(ctxA)
+ suite.Require().Len(channels, len(expChannels))
+ suite.Require().Equal(expChannels, channels)
+}
+
+// TestGetAllSequences sets all packet sequences for two different channels on chain A and
+// tests their retrieval.
+func (suite KeeperTestSuite) TestGetAllSequences() {
+ _, _, connA, connB, channelA0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED)
+
+ seq1 := types.NewPacketSequence(channelA0.PortID, channelA0.ID, 1)
+ seq2 := types.NewPacketSequence(channelA0.PortID, channelA0.ID, 2)
+ seq3 := types.NewPacketSequence(channelA1.PortID, channelA1.ID, 3)
+
+ // seq1 should be overwritten by seq2
+ expSeqs := []types.PacketSequence{seq2, seq3}
+
+ ctxA := suite.chainA.GetContext()
+
+ for _, seq := range []types.PacketSequence{seq1, seq2, seq3} {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
+ }
+
+ sendSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketSendSeqs(ctxA)
+ recvSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketRecvSeqs(ctxA)
+ ackSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketAckSeqs(ctxA)
+ suite.Len(sendSeqs, 2)
+ suite.Len(recvSeqs, 2)
+ suite.Len(ackSeqs, 2)
+
+ suite.Equal(expSeqs, sendSeqs)
+ suite.Equal(expSeqs, recvSeqs)
+ suite.Equal(expSeqs, ackSeqs)
+}
+
+// TestGetAllPacketState creates a set of acks, packet commitments, and receipts on two different
+// channels on chain A and tests their retrieval.
+func (suite KeeperTestSuite) TestGetAllPacketState() {
+ _, _, connA, connB, channelA0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED)
+
+ // channel 0 acks
+ ack1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte("ack"))
+ ack2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("ack"))
+
+ // duplicate ack
+ ack2dup := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("ack"))
+
+ // channel 1 acks
+ ack3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte("ack"))
+
+ // create channel 0 receipts
+ receipt := string([]byte{byte(1)})
+ rec1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte(receipt))
+ rec2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte(receipt))
+
+ // channel 1 receipts
+ rec3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte(receipt))
+ rec4 := types.NewPacketState(channelA1.PortID, channelA1.ID, 2, []byte(receipt))
+
+ // channel 0 packet commitments
+ comm1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte("hash"))
+ comm2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("hash"))
+
+ // channel 1 packet commitments
+ comm3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte("hash"))
+ comm4 := types.NewPacketState(channelA1.PortID, channelA1.ID, 2, []byte("hash"))
+
+ expAcks := []types.PacketState{ack1, ack2, ack3}
+ expReceipts := []types.PacketState{rec1, rec2, rec3, rec4}
+ expCommitments := []types.PacketState{comm1, comm2, comm3, comm4}
+
+ ctxA := suite.chainA.GetContext()
+
+ // set acknowledgements
+ for _, ack := range []types.PacketState{ack1, ack2, ack2dup, ack3} {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(ctxA, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
+ }
+
+ // set packet receipts
+ for _, rec := range expReceipts {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(ctxA, rec.PortId, rec.ChannelId, rec.Sequence)
+ }
+
+ // set packet commitments
+ for _, comm := range expCommitments {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, comm.PortId, comm.ChannelId, comm.Sequence, comm.Data)
+ }
+
+ acks := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketAcks(ctxA)
+ receipts := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketReceipts(ctxA)
+ commitments := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketCommitments(ctxA)
+
+ suite.Require().Len(acks, len(expAcks))
+ suite.Require().Len(commitments, len(expCommitments))
+ suite.Require().Len(receipts, len(expReceipts))
+
+ suite.Require().Equal(expAcks, acks)
+ suite.Require().Equal(expReceipts, receipts)
+ suite.Require().Equal(expCommitments, commitments)
+}
+
+// TestSetSequence verifies that the keeper correctly sets the sequence counters.
+func (suite *KeeperTestSuite) TestSetSequence() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ ctxA := suite.chainA.GetContext()
+ one := uint64(1)
+
+ // initialized channel has next send seq of 1
+ seq, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID)
+ suite.True(found)
+ suite.Equal(one, seq)
+
+ // initialized channel has next seq recv of 1
+ seq, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceRecv(ctxA, channelA.PortID, channelA.ID)
+ suite.True(found)
+ suite.Equal(one, seq)
+
+ // initialized channel has next seq ack of
+ seq, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(ctxA, channelA.PortID, channelA.ID)
+ suite.True(found)
+ suite.Equal(one, seq)
+
+ nextSeqSend, nextSeqRecv, nextSeqAck := uint64(10), uint64(10), uint64(10)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(ctxA, channelA.PortID, channelA.ID, nextSeqSend)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(ctxA, channelA.PortID, channelA.ID, nextSeqRecv)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(ctxA, channelA.PortID, channelA.ID, nextSeqAck)
+
+ storedNextSeqSend, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID)
+ suite.True(found)
+ suite.Equal(nextSeqSend, storedNextSeqSend)
+
+ storedNextSeqRecv, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID)
+ suite.True(found)
+ suite.Equal(nextSeqRecv, storedNextSeqRecv)
+
+ storedNextSeqAck, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(ctxA, channelA.PortID, channelA.ID)
+ suite.True(found)
+ suite.Equal(nextSeqAck, storedNextSeqAck)
+}
+
+// TestGetAllPacketCommitmentsAtChannel verifies that the keeper returns all stored packet
+// commitments for a specific channel. The test will store consecutive commitments up to the
+// value of "seq" and then add non-consecutive up to the value of "maxSeq". A final commitment
+// with the value maxSeq + 1 is set on a different channel.
+func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {
+ _, _, connA, connB, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ // create second channel
+ channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED)
+
+ ctxA := suite.chainA.GetContext()
+ expectedSeqs := make(map[uint64]bool)
+ hash := []byte("commitment")
+
+ seq := uint64(15)
+ maxSeq := uint64(25)
+ suite.Require().Greater(maxSeq, seq)
+
+ // create consecutive commitments
+ for i := uint64(1); i < seq; i++ {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA.PortID, channelA.ID, i, hash)
+ expectedSeqs[i] = true
+ }
+
+ // add non-consecutive commitments
+ for i := seq; i < maxSeq; i += 2 {
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA.PortID, channelA.ID, i, hash)
+ expectedSeqs[i] = true
+ }
+
+ // add sequence on different channel/port
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA1.PortID, channelA1.ID, maxSeq+1, hash)
+
+ commitments := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, channelA.PortID, channelA.ID)
+
+ suite.Equal(len(expectedSeqs), len(commitments))
+ // ensure above for loops occurred
+ suite.NotEqual(0, len(commitments))
+
+ // verify that all the packet commitments were stored
+ for _, packet := range commitments {
+ suite.True(expectedSeqs[packet.Sequence])
+ suite.Equal(channelA.PortID, packet.PortId)
+ suite.Equal(channelA.ID, packet.ChannelId)
+ suite.Equal(hash, packet.Data)
+
+ // prevent duplicates from passing checks
+ expectedSeqs[packet.Sequence] = false
+ }
+}
+
+// TestSetPacketAcknowledgement verifies that packet acknowledgements are correctly
+// set in the keeper.
+func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() {
+ _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ ctxA := suite.chainA.GetContext()
+ seq := uint64(10)
+
+ storedAckHash, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq)
+ suite.Require().False(found)
+ suite.Require().Nil(storedAckHash)
+
+ ackHash := []byte("ackhash")
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq, ackHash)
+
+ storedAckHash, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq)
+ suite.Require().True(found)
+ suite.Require().Equal(ackHash, storedAckHash)
+ suite.Require().True(suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq))
+}
diff --git a/core/04-channel/keeper/packet.go b/core/04-channel/keeper/packet.go
new file mode 100644
index 00000000..49b59733
--- /dev/null
+++ b/core/04-channel/keeper/packet.go
@@ -0,0 +1,528 @@
+package keeper
+
+import (
+ "bytes"
+ "fmt"
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// SendPacket is called by a module in order to send an IBC packet on a channel
+// end owned by the calling module to the corresponding module on the counterparty
+// chain.
+func (k Keeper) SendPacket(
+ ctx sdk.Context,
+ channelCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+) error {
+ if err := packet.ValidateBasic(); err != nil {
+ return sdkerrors.Wrap(err, "packet failed basic validation")
+ }
+
+ channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrap(types.ErrChannelNotFound, packet.GetSourceChannel())
+ }
+
+ if channel.State == types.CLOSED {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel is CLOSED (got %s)", channel.State.String(),
+ )
+ }
+
+ if !k.scopedKeeper.AuthenticateCapability(ctx, channelCap, host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel())) {
+ return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel())
+ }
+
+ if packet.GetDestPort() != channel.Counterparty.PortId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId,
+ )
+ }
+
+ if packet.GetDestChannel() != channel.Counterparty.ChannelId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId,
+ )
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, connectionEnd.GetClientID())
+ if !found {
+ return clienttypes.ErrConsensusStateNotFound
+ }
+
+ // prevent accidental sends with clients that cannot be updated
+ if clientState.IsFrozen() {
+ return sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "cannot send packet on a frozen client with ID %s", connectionEnd.GetClientID())
+ }
+
+ // check if packet timeouted on the receiving chain
+ latestHeight := clientState.GetLatestHeight()
+ timeoutHeight := packet.GetTimeoutHeight()
+ if !timeoutHeight.IsZero() && latestHeight.GTE(timeoutHeight) {
+ return sdkerrors.Wrapf(
+ types.ErrPacketTimeout,
+ "receiving chain block height >= packet timeout height (%s >= %s)", latestHeight, timeoutHeight,
+ )
+ }
+
+ latestTimestamp, err := k.connectionKeeper.GetTimestampAtHeight(ctx, connectionEnd, latestHeight)
+ if err != nil {
+ return err
+ }
+
+ if packet.GetTimeoutTimestamp() != 0 && latestTimestamp >= packet.GetTimeoutTimestamp() {
+ return sdkerrors.Wrapf(
+ types.ErrPacketTimeout,
+ "receiving chain block timestamp >= packet timeout timestamp (%s >= %s)", time.Unix(0, int64(latestTimestamp)), time.Unix(0, int64(packet.GetTimeoutTimestamp())),
+ )
+ }
+
+ nextSequenceSend, found := k.GetNextSequenceSend(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrapf(
+ types.ErrSequenceSendNotFound,
+ "source port: %s, source channel: %s", packet.GetSourcePort(), packet.GetSourceChannel(),
+ )
+ }
+
+ if packet.GetSequence() != nextSequenceSend {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet sequence ≠ next send sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceSend,
+ )
+ }
+
+ commitment := types.CommitPacket(k.cdc, packet)
+
+ nextSequenceSend++
+ k.SetNextSequenceSend(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), nextSequenceSend)
+ k.SetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment)
+
+ // Emit Event with Packet data along with other packet information for relayer to pick up
+ // and relay to other chain
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeSendPacket,
+ sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())),
+ sdk.NewAttribute(types.AttributeKeyTimeoutHeight, timeoutHeight.String()),
+ sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
+ sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
+ sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()),
+ sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()),
+ sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()),
+ sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()),
+ sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()),
+ // we only support 1-hop packets now, and that is the most important hop for a relayer
+ // (is it going to a chain I am connected to)
+ sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ k.Logger(ctx).Info("packet sent", "packet", fmt.Sprintf("%v", packet))
+ return nil
+}
+
+// RecvPacket is called by a module in order to receive & process an IBC packet
+// sent on the corresponding channel end on the counterparty chain.
+func (k Keeper) RecvPacket(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+ proof []byte,
+ proofHeight exported.Height,
+) error {
+ channel, found := k.GetChannel(ctx, packet.GetDestPort(), packet.GetDestChannel())
+ if !found {
+ return sdkerrors.Wrap(types.ErrChannelNotFound, packet.GetDestChannel())
+ }
+
+ if channel.State != types.OPEN {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel state is not OPEN (got %s)", channel.State.String(),
+ )
+ }
+
+ // Authenticate capability to ensure caller has authority to receive packet on this channel
+ capName := host.ChannelCapabilityPath(packet.GetDestPort(), packet.GetDestChannel())
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelCapability,
+ "channel capability failed authentication for capability name %s", capName,
+ )
+ }
+
+ // packet must come from the channel's counterparty
+ if packet.GetSourcePort() != channel.Counterparty.PortId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet source port doesn't match the counterparty's port (%s ≠ %s)", packet.GetSourcePort(), channel.Counterparty.PortId,
+ )
+ }
+
+ if packet.GetSourceChannel() != channel.Counterparty.ChannelId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet source channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetSourceChannel(), channel.Counterparty.ChannelId,
+ )
+ }
+
+ // Connection must be OPEN to receive a packet. It is possible for connection to not yet be open if packet was
+ // sent optimistically before connection and channel handshake completed. However, to receive a packet,
+ // connection and channel must both be open
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ // check if packet timeouted by comparing it with the latest height of the chain
+ selfHeight := clienttypes.GetSelfHeight(ctx)
+ timeoutHeight := packet.GetTimeoutHeight()
+ if !timeoutHeight.IsZero() && selfHeight.GTE(timeoutHeight) {
+ return sdkerrors.Wrapf(
+ types.ErrPacketTimeout,
+ "block height >= packet timeout height (%s >= %s)", selfHeight, timeoutHeight,
+ )
+ }
+
+ // check if packet timeouted by comparing it with the latest timestamp of the chain
+ if packet.GetTimeoutTimestamp() != 0 && uint64(ctx.BlockTime().UnixNano()) >= packet.GetTimeoutTimestamp() {
+ return sdkerrors.Wrapf(
+ types.ErrPacketTimeout,
+ "block timestamp >= packet timeout timestamp (%s >= %s)", ctx.BlockTime(), time.Unix(0, int64(packet.GetTimeoutTimestamp())),
+ )
+ }
+
+ commitment := types.CommitPacket(k.cdc, packet)
+
+ // verify that the counterparty did commit to sending this packet
+ if err := k.connectionKeeper.VerifyPacketCommitment(
+ ctx, connectionEnd, proofHeight, proof,
+ packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(),
+ commitment,
+ ); err != nil {
+ return sdkerrors.Wrap(err, "couldn't verify counterparty packet commitment")
+ }
+
+ switch channel.Ordering {
+ case types.UNORDERED:
+ // check if the packet receipt has been received already for unordered channels
+ _, found := k.GetPacketReceipt(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ if found {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet sequence (%d) already has been received", packet.GetSequence(),
+ )
+ }
+
+ // All verification complete, update state
+ // For unordered channels we must set the receipt so it can be verified on the other side.
+ // This receipt does not contain any data, since the packet has not yet been processed,
+ // it's just a single store key set to an empty string to indicate that the packet has been received
+ k.SetPacketReceipt(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+
+ case types.ORDERED:
+ // check if the packet is being received in order
+ nextSequenceRecv, found := k.GetNextSequenceRecv(ctx, packet.GetDestPort(), packet.GetDestChannel())
+ if !found {
+ return sdkerrors.Wrapf(
+ types.ErrSequenceReceiveNotFound,
+ "destination port: %s, destination channel: %s", packet.GetDestPort(), packet.GetDestChannel(),
+ )
+ }
+
+ if packet.GetSequence() != nextSequenceRecv {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet sequence ≠ next receive sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceRecv,
+ )
+ }
+
+ // All verification complete, update state
+ // In ordered case, we must increment nextSequenceRecv
+ nextSequenceRecv++
+
+ // incrementing nextSequenceRecv and storing under this chain's channelEnd identifiers
+ // Since this is the receiving chain, our channelEnd is packet's destination port and channel
+ k.SetNextSequenceRecv(ctx, packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv)
+
+ }
+
+ // log that a packet has been received & executed
+ k.Logger(ctx).Info("packet received", "packet", fmt.Sprintf("%v", packet))
+
+ // emit an event that the relayer can query for
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeRecvPacket,
+ sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())),
+ sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()),
+ sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
+ sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
+ sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()),
+ sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()),
+ sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()),
+ sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()),
+ sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()),
+ // we only support 1-hop packets now, and that is the most important hop for a relayer
+ // (is it going to a chain I am connected to)
+ sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return nil
+}
+
+// WriteAcknowledgement writes the packet execution acknowledgement to the state,
+// which will be verified by the counterparty chain using AcknowledgePacket.
+//
+// CONTRACT:
+//
+// 1) For synchronous execution, this function is be called in the IBC handler .
+// For async handling, it needs to be called directly by the module which originally
+// processed the packet.
+//
+// 2) Assumes that packet receipt has been written (unordered), or nextSeqRecv was incremented (ordered)
+// previously by RecvPacket.
+func (k Keeper) WriteAcknowledgement(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+ acknowledgement []byte,
+) error {
+ channel, found := k.GetChannel(ctx, packet.GetDestPort(), packet.GetDestChannel())
+ if !found {
+ return sdkerrors.Wrap(types.ErrChannelNotFound, packet.GetDestChannel())
+ }
+
+ if channel.State != types.OPEN {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel state is not OPEN (got %s)", channel.State.String(),
+ )
+ }
+
+ // Authenticate capability to ensure caller has authority to receive packet on this channel
+ capName := host.ChannelCapabilityPath(packet.GetDestPort(), packet.GetDestChannel())
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelCapability,
+ "channel capability failed authentication for capability name %s", capName,
+ )
+ }
+
+ // NOTE: IBC app modules might have written the acknowledgement synchronously on
+ // the OnRecvPacket callback so we need to check if the acknowledgement is already
+ // set on the store and return an error if so.
+ if k.HasPacketAcknowledgement(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) {
+ return types.ErrAcknowledgementExists
+ }
+
+ if len(acknowledgement) == 0 {
+ return sdkerrors.Wrap(types.ErrInvalidAcknowledgement, "acknowledgement cannot be empty")
+ }
+
+ // set the acknowledgement so that it can be verified on the other side
+ k.SetPacketAcknowledgement(
+ ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
+ types.CommitAcknowledgement(acknowledgement),
+ )
+
+ // log that a packet acknowledgement has been written
+ k.Logger(ctx).Info("acknowledged written", "packet", fmt.Sprintf("%v", packet))
+
+ // emit an event that the relayer can query for
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeWriteAck,
+ sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())),
+ sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()),
+ sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
+ sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
+ sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()),
+ sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()),
+ sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()),
+ sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()),
+ sdk.NewAttribute(types.AttributeKeyAck, string(acknowledgement)),
+ // we only support 1-hop packets now, and that is the most important hop for a relayer
+ // (is it going to a chain I am connected to)
+ sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return nil
+}
+
+// AcknowledgePacket is called by a module to process the acknowledgement of a
+// packet previously sent by the calling module on a channel to a counterparty
+// module on the counterparty chain. Its intended usage is within the ante
+// handler. AcknowledgePacket will clean up the packet commitment,
+// which is no longer necessary since the packet has been received and acted upon.
+// It will also increment NextSequenceAck in case of ORDERED channels.
+func (k Keeper) AcknowledgePacket(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+ acknowledgement []byte,
+ proof []byte,
+ proofHeight exported.Height,
+) error {
+ channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrapf(
+ types.ErrChannelNotFound,
+ "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel(),
+ )
+ }
+
+ if channel.State != types.OPEN {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel state is not OPEN (got %s)", channel.State.String(),
+ )
+ }
+
+ // Authenticate capability to ensure caller has authority to receive packet on this channel
+ capName := host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel())
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelCapability,
+ "channel capability failed authentication for capability name %s", capName,
+ )
+ }
+
+ // packet must have been sent to the channel's counterparty
+ if packet.GetDestPort() != channel.Counterparty.PortId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId,
+ )
+ }
+
+ if packet.GetDestChannel() != channel.Counterparty.ChannelId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId,
+ )
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ if connectionEnd.GetState() != int32(connectiontypes.OPEN) {
+ return sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnectionState,
+ "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(),
+ )
+ }
+
+ commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ packetCommitment := types.CommitPacket(k.cdc, packet)
+
+ // verify we sent the packet and haven't cleared it out yet
+ if !bytes.Equal(commitment, packetCommitment) {
+ return sdkerrors.Wrapf(types.ErrInvalidPacket, "commitment bytes are not equal: got (%v), expected (%v)", packetCommitment, commitment)
+ }
+
+ if err := k.connectionKeeper.VerifyPacketAcknowledgement(
+ ctx, connectionEnd, proofHeight, proof, packet.GetDestPort(), packet.GetDestChannel(),
+ packet.GetSequence(), acknowledgement,
+ ); err != nil {
+ return sdkerrors.Wrap(err, "packet acknowledgement verification failed")
+ }
+
+ // assert packets acknowledged in order
+ if channel.Ordering == types.ORDERED {
+ nextSequenceAck, found := k.GetNextSequenceAck(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrapf(
+ types.ErrSequenceAckNotFound,
+ "source port: %s, source channel: %s", packet.GetSourcePort(), packet.GetSourceChannel(),
+ )
+ }
+
+ if packet.GetSequence() != nextSequenceAck {
+ return sdkerrors.Wrapf(
+ sdkerrors.ErrInvalidSequence,
+ "packet sequence ≠ next ack sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceAck,
+ )
+ }
+
+ // All verification complete, in the case of ORDERED channels we must increment nextSequenceAck
+ nextSequenceAck++
+
+ // incrementing NextSequenceAck and storing under this chain's channelEnd identifiers
+ // Since this is the original sending chain, our channelEnd is packet's source port and channel
+ k.SetNextSequenceAck(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), nextSequenceAck)
+
+ }
+
+ // Delete packet commitment, since the packet has been acknowledged, the commitement is no longer necessary
+ k.deletePacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ // log that a packet has been acknowledged
+ k.Logger(ctx).Info("packet acknowledged", "packet", fmt.Sprintf("%v", packet))
+
+ // emit an event marking that we have processed the acknowledgement
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeAcknowledgePacket,
+ sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()),
+ sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
+ sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
+ sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()),
+ sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()),
+ sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()),
+ sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()),
+ sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()),
+ // we only support 1-hop packets now, and that is the most important hop for a relayer
+ // (is it going to a chain I am connected to)
+ sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return nil
+}
diff --git a/core/04-channel/keeper/packet_test.go b/core/04-channel/keeper/packet_test.go
new file mode 100644
index 00000000..232e6875
--- /dev/null
+++ b/core/04-channel/keeper/packet_test.go
@@ -0,0 +1,665 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+var (
+ validPacketData = []byte("VALID PACKET DATA")
+ disabledTimeoutTimestamp = uint64(0)
+ disabledTimeoutHeight = clienttypes.ZeroHeight()
+ timeoutHeight = clienttypes.NewHeight(0, 100)
+
+ // for when the testing package cannot be used
+ clientIDA = "clientA"
+ clientIDB = "clientB"
+ connIDA = "connA"
+ connIDB = "connB"
+ portID = "portid"
+ channelIDA = "channelidA"
+ channelIDB = "channelidB"
+)
+
+// TestSendPacket tests SendPacket from chainA to chainB
+func (suite *KeeperTestSuite) TestSendPacket() {
+ var (
+ packet exported.PacketI
+ channelCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {"success: UNORDERED channel", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"success: ORDERED channel", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"sending packet out of order on UNORDERED channel", func() {
+ // setup creates an unordered channel
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"sending packet out of order on ORDERED channel", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet basic validation failed, empty packet data", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket([]byte{}, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel closed", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+ }, false},
+ {"packet dest port ≠ channel counterparty port", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong port for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet dest channel ID ≠ channel counterparty channel ID", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong channel for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"connection not found", func() {
+ channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
+ channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
+ // pass channel check
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"client state not found", func() {
+ _, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ // change connection client ID
+ connection := suite.chainA.GetConnection(connA)
+ connection.ClientId = ibctesting.InvalidID
+ suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection)
+
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"client state is frozen", func() {
+ _, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+
+ connection := suite.chainA.GetConnection(connA)
+ clientState := suite.chainA.GetClientState(connection.ClientId)
+ cs, ok := clientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+
+ // freeze client
+ cs.FrozenHeight = clienttypes.NewHeight(0, 1)
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs)
+
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+
+ {"timeout height passed", func() {
+ clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use client state latest height for timeout
+ clientState := suite.chainA.GetClientState(clientA)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clientState.GetLatestHeight().(clienttypes.Height), disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"timeout timestamp passed", func() {
+ clientA, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use latest time on client state
+ clientState := suite.chainA.GetClientState(clientA)
+ connection := suite.chainA.GetConnection(connA)
+ timestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight(suite.chainA.GetContext(), connection, clientState.GetLatestHeight())
+ suite.Require().NoError(err)
+
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, timestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"next sequence send not found", func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ // manually creating channel prevents next sequence from being set
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
+ )
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"next sequence wrong", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 5)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel capability not found", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = capabilitytypes.NewCapability(5)
+ }, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.SendPacket(suite.chainA.GetContext(), channelCap, packet)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+
+}
+
+// TestRecvPacket test RecvPacket on chainB. Since packet commitment verification will always
+// occur last (resource instensive), only tests expected to succeed and packet commitment
+// verification tests need to simulate sending a packet from chainA to chainB.
+func (suite *KeeperTestSuite) TestRecvPacket() {
+ var (
+ packet exported.PacketI
+ channelCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {"success: ORDERED channel", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, true},
+ {"success UNORDERED channel", func() {
+ // setup uses an UNORDERED channel
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, true},
+ {"success with out of order packet: UNORDERED channel", func() {
+ // setup uses an UNORDERED channel
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // send 2 packets
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ // set sequence to 2
+ packet = types.NewPacket(validPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ // attempts to receive packet 2 without receiving packet 1
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, true},
+ {"out of order packet failure with ORDERED channel", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // send 2 packets
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ // set sequence to 2
+ packet = types.NewPacket(validPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ // attempts to receive packet 2 without receiving packet 1
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"channel not open", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.Require().NoError(err)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"capability cannot authenticate", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ channelCap = capabilitytypes.NewCapability(3)
+ }, false},
+ {"packet source port ≠ channel counterparty port", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong port for dest
+ packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"packet source channel ID ≠ channel counterparty channel ID", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong port for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"connection not found", func() {
+ channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
+ channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
+ // pass channel check
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.GetContext(),
+ channelB.PortID, channelB.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"connection not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ // connection on chainB is in INIT
+ connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ suite.Require().NoError(err)
+
+ channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ // pass channel check
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.GetContext(),
+ channelB.PortID, channelB.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"timeout height passed", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"timeout timestamp passed", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"next receive sequence is not found", func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+
+ // manually creating channel prevents next recv sequence from being set
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.GetContext(),
+ channelB.PortID, channelB.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version),
+ )
+
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // manually set packet commitment
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.TestHash)
+ suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"receipt already stored", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), channelB.PortID, channelB.ID, 1)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"validation failed", func() {
+ // packet commitment not set resulting in invalid proof
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ suite.SetupTest() // reset
+ tc.malleate()
+
+ // get proof of packet commitment from chainA
+ packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainA.QueryProof(packetKey)
+
+ err := suite.chainB.App.IBCKeeper.ChannelKeeper.RecvPacket(suite.chainB.GetContext(), channelCap, packet, proof, proofHeight)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ channelB, _ := suite.chainB.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel())
+ nextSeqRecv, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetNextSequenceRecv(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel())
+ suite.Require().True(found)
+ receipt, receiptStored := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketReceipt(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+
+ if channelB.Ordering == types.ORDERED {
+ suite.Require().Equal(packet.GetSequence()+1, nextSeqRecv, "sequence not incremented in ordered channel")
+ suite.Require().False(receiptStored, "packet receipt stored on ORDERED channel")
+ } else {
+ suite.Require().Equal(uint64(1), nextSeqRecv, "sequence incremented for UNORDERED channel")
+ suite.Require().True(receiptStored, "packet receipt not stored after RecvPacket in UNORDERED channel")
+ suite.Require().Equal(string([]byte{byte(1)}), receipt, "packet receipt is not empty string")
+ }
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+
+}
+
+func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
+ var (
+ ack []byte
+ packet exported.PacketI
+ channelCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {
+ "success",
+ func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.TestHash
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ },
+ true,
+ },
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.TestHash
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {"channel not open", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.TestHash
+
+ err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.Require().NoError(err)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ }, false},
+ {
+ "capability authentication failed",
+ func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.TestHash
+ channelCap = capabilitytypes.NewCapability(3)
+ },
+ false,
+ },
+ {
+ "no-op, already acked",
+ func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.TestHash
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack)
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ },
+ false,
+ },
+ {
+ "empty acknowledgement",
+ func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = nil
+ channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ },
+ false,
+ },
+ }
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ err := suite.chainB.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(suite.chainB.GetContext(), channelCap, packet, ack)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestAcknowledgePacket tests the call AcknowledgePacket on chainA.
+func (suite *KeeperTestSuite) TestAcknowledgePacket() {
+ var (
+ packet types.Packet
+ ack = ibcmock.MockAcknowledgement
+
+ channelCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {"success on ordered channel", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // create packet receipt and acknowledgement
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"success on unordered channel", func() {
+ // setup uses an UNORDERED channel
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // create packet receipt and acknowledgement
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"channel not open", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"capability authentication failed", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // create packet receipt and acknowledgement
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ channelCap = capabilitytypes.NewCapability(3)
+ }, false},
+ {"packet destination port ≠ channel counterparty port", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong port for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet destination channel ID ≠ channel counterparty channel ID", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong channel for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"connection not found", func() {
+ channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
+ channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
+ // pass channel check
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.GetContext(),
+ channelB.PortID, channelB.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"connection not OPEN", func() {
+ clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ // connection on chainA is in INIT
+ connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ suite.Require().NoError(err)
+
+ channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ // pass channel check
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet hasn't been sent", func() {
+ // packet commitment never written
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet ack verification failed", func() {
+ // ack never written
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // create packet commitment
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"next ack sequence not found", func() {
+ _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ // manually creating channel prevents next sequence acknowledgement from being set
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
+ )
+ // manually set packet commitment
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.TestHash)
+
+ // manually set packet acknowledgement and capability
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), channelB.PortID, channelB.ID, packet.GetSequence(), ibctesting.TestHash)
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"next ack sequence mismatch", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // create packet acknowledgement
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ // set next sequence ack wrong
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 10)
+ channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ suite.SetupTest() // reset
+ tc.malleate()
+
+ packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainB.QueryProof(packetKey)
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack, proof, proofHeight)
+ pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ channelA, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
+ sequenceAck, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
+
+ if tc.expPass {
+ suite.NoError(err)
+ suite.Nil(pc)
+
+ if channelA.Ordering == types.ORDERED {
+ suite.Require().Equal(packet.GetSequence()+1, sequenceAck, "sequence not incremented in ordered channel")
+ } else {
+ suite.Require().Equal(uint64(1), sequenceAck, "sequence incremented for UNORDERED channel")
+ }
+ } else {
+ suite.Error(err)
+ }
+ })
+ }
+}
diff --git a/core/04-channel/keeper/timeout.go b/core/04-channel/keeper/timeout.go
new file mode 100644
index 00000000..1f3dac91
--- /dev/null
+++ b/core/04-channel/keeper/timeout.go
@@ -0,0 +1,276 @@
+package keeper
+
+import (
+ "bytes"
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// TimeoutPacket is called by a module which originally attempted to send a
+// packet to a counterparty module, where the timeout height has passed on the
+// counterparty chain without the packet being committed, to prove that the
+// packet can no longer be executed and to allow the calling module to safely
+// perform appropriate state transitions. Its intended usage is within the
+// ante handler.
+func (k Keeper) TimeoutPacket(
+ ctx sdk.Context,
+ packet exported.PacketI,
+ proof []byte,
+ proofHeight exported.Height,
+ nextSequenceRecv uint64,
+) error {
+ channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrapf(
+ types.ErrChannelNotFound,
+ "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel(),
+ )
+ }
+
+ if channel.State != types.OPEN {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelState,
+ "channel state is not OPEN (got %s)", channel.State.String(),
+ )
+ }
+
+ // NOTE: TimeoutPacket is called by the AnteHandler which acts upon the packet.Route(),
+ // so the capability authentication can be omitted here
+
+ if packet.GetDestPort() != channel.Counterparty.PortId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId,
+ )
+ }
+
+ if packet.GetDestChannel() != channel.Counterparty.ChannelId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId,
+ )
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(
+ connectiontypes.ErrConnectionNotFound,
+ channel.ConnectionHops[0],
+ )
+ }
+
+ // check that timeout height or timeout timestamp has passed on the other end
+ proofTimestamp, err := k.connectionKeeper.GetTimestampAtHeight(ctx, connectionEnd, proofHeight)
+ if err != nil {
+ return err
+ }
+
+ timeoutHeight := packet.GetTimeoutHeight()
+ if (timeoutHeight.IsZero() || proofHeight.LT(timeoutHeight)) &&
+ (packet.GetTimeoutTimestamp() == 0 || proofTimestamp < packet.GetTimeoutTimestamp()) {
+ return sdkerrors.Wrap(types.ErrPacketTimeout, "packet timeout has not been reached for height or timestamp")
+ }
+
+ commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ packetCommitment := types.CommitPacket(k.cdc, packet)
+
+ // verify we sent the packet and haven't cleared it out yet
+ if !bytes.Equal(commitment, packetCommitment) {
+ return sdkerrors.Wrapf(types.ErrInvalidPacket, "packet commitment bytes are not equal: got (%v), expected (%v)", commitment, packetCommitment)
+ }
+
+ switch channel.Ordering {
+ case types.ORDERED:
+ // check that packet has not been received
+ if nextSequenceRecv > packet.GetSequence() {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet already received, next sequence receive > packet sequence (%d > %d)", nextSequenceRecv, packet.GetSequence(),
+ )
+ }
+
+ // check that the recv sequence is as claimed
+ err = k.connectionKeeper.VerifyNextSequenceRecv(
+ ctx, connectionEnd, proofHeight, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv,
+ )
+ case types.UNORDERED:
+ err = k.connectionKeeper.VerifyPacketReceiptAbsence(
+ ctx, connectionEnd, proofHeight, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
+ )
+ default:
+ panic(sdkerrors.Wrapf(types.ErrInvalidChannelOrdering, channel.Ordering.String()))
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // NOTE: the remaining code is located in the TimeoutExecuted function
+ return nil
+}
+
+// TimeoutExecuted deletes the commitment send from this chain after it verifies timeout.
+// If the timed-out packet came from an ORDERED channel then this channel will be closed.
+//
+// CONTRACT: this function must be called in the IBC handler
+func (k Keeper) TimeoutExecuted(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+) error {
+ channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel())
+ }
+
+ capName := host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel())
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) {
+ return sdkerrors.Wrapf(
+ types.ErrChannelCapabilityNotFound,
+ "caller does not own capability for channel with capability name %s", capName,
+ )
+ }
+
+ k.deletePacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ if channel.Ordering == types.ORDERED {
+ channel.State = types.CLOSED
+ k.SetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), channel)
+ }
+
+ k.Logger(ctx).Info("packet timed-out", "packet", fmt.Sprintf("%v", packet))
+
+ // emit an event marking that we have processed the timeout
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeTimeoutPacket,
+ sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()),
+ sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
+ sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
+ sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()),
+ sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()),
+ sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()),
+ sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()),
+ sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
+ return nil
+}
+
+// TimeoutOnClose is called by a module in order to prove that the channel to
+// which an unreceived packet was addressed has been closed, so the packet will
+// never be received (even if the timeoutHeight has not yet been reached).
+func (k Keeper) TimeoutOnClose(
+ ctx sdk.Context,
+ chanCap *capabilitytypes.Capability,
+ packet exported.PacketI,
+ proof,
+ proofClosed []byte,
+ proofHeight exported.Height,
+ nextSequenceRecv uint64,
+) error {
+ channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
+ if !found {
+ return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel())
+ }
+
+ capName := host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel())
+ if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidChannelCapability,
+ "channel capability failed authentication with capability name %s", capName,
+ )
+ }
+
+ if packet.GetDestPort() != channel.Counterparty.PortId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId,
+ )
+ }
+
+ if packet.GetDestChannel() != channel.Counterparty.ChannelId {
+ return sdkerrors.Wrapf(
+ types.ErrInvalidPacket,
+ "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId,
+ )
+ }
+
+ connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0])
+ if !found {
+ return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0])
+ }
+
+ commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ packetCommitment := types.CommitPacket(k.cdc, packet)
+
+ // verify we sent the packet and haven't cleared it out yet
+ if !bytes.Equal(commitment, packetCommitment) {
+ return sdkerrors.Wrapf(types.ErrInvalidPacket, "packet commitment bytes are not equal: got (%v), expected (%v)", commitment, packetCommitment)
+ }
+
+ counterpartyHops, found := k.CounterpartyHops(ctx, channel)
+ if !found {
+ // Should not reach here, connectionEnd was able to be retrieved above
+ panic("cannot find connection")
+ }
+
+ counterparty := types.NewCounterparty(packet.GetSourcePort(), packet.GetSourceChannel())
+ expectedChannel := types.NewChannel(
+ types.CLOSED, channel.Ordering, counterparty, counterpartyHops, channel.Version,
+ )
+
+ // check that the opposing channel end has closed
+ if err := k.connectionKeeper.VerifyChannelState(
+ ctx, connectionEnd, proofHeight, proofClosed,
+ channel.Counterparty.PortId, channel.Counterparty.ChannelId,
+ expectedChannel,
+ ); err != nil {
+ return err
+ }
+
+ var err error
+ switch channel.Ordering {
+ case types.ORDERED:
+ // check that packet has not been received
+ if nextSequenceRecv > packet.GetSequence() {
+ return sdkerrors.Wrapf(types.ErrInvalidPacket, "packet already received, next sequence receive > packet sequence (%d > %d", nextSequenceRecv, packet.GetSequence())
+ }
+
+ // check that the recv sequence is as claimed
+ err = k.connectionKeeper.VerifyNextSequenceRecv(
+ ctx, connectionEnd, proofHeight, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv,
+ )
+ case types.UNORDERED:
+ err = k.connectionKeeper.VerifyPacketReceiptAbsence(
+ ctx, connectionEnd, proofHeight, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
+ )
+ default:
+ panic(sdkerrors.Wrapf(types.ErrInvalidChannelOrdering, channel.Ordering.String()))
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // NOTE: the remaining code is located in the TimeoutExecuted function
+ return nil
+}
diff --git a/core/04-channel/keeper/timeout_test.go b/core/04-channel/keeper/timeout_test.go
new file mode 100644
index 00000000..640452e8
--- /dev/null
+++ b/core/04-channel/keeper/timeout_test.go
@@ -0,0 +1,351 @@
+package keeper_test
+
+import (
+ "fmt"
+
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+// TestTimeoutPacket test the TimeoutPacket call on chainA by ensuring the timeout has passed
+// on chainB, but that no ack has been written yet. Test cases expected to reach proof
+// verification must specify which proof to use using the ordered bool.
+func (suite *KeeperTestSuite) TestTimeoutPacket() {
+ var (
+ packet types.Packet
+ nextSeqRecv uint64
+ ordered bool
+ )
+
+ testCases := []testCase{
+ {"success: ORDERED", func() {
+ ordered = true
+
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, true},
+ {"success: UNORDERED", func() {
+ ordered = false
+
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, true},
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"channel not open", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ suite.Require().NoError(err)
+ }, false},
+ {"packet destination port ≠ channel counterparty port", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong port for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"packet destination channel ID ≠ channel counterparty channel ID", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong channel for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"connection not found", func() {
+ channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
+ channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
+ // pass channel check
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"timeout", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, false},
+ {"packet already received ", func() {
+ ordered = true
+ nextSeqRecv = 2
+
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, false},
+ {"packet hasn't been sent", func() {
+ clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, false},
+ {"next seq receive verification failed", func() {
+ // set ordered to false resulting in wrong proof provided
+ ordered = false
+
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, false},
+ {"packet ack verification failed", func() {
+ // set ordered to true resulting in wrong proof provided
+ ordered = true
+
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ }, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ var (
+ proof []byte
+ proofHeight exported.Height
+ )
+
+ suite.SetupTest() // reset
+ nextSeqRecv = 1 // must be explicitly changed
+ tc.malleate()
+
+ orderedPacketKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ unorderedPacketKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+
+ if ordered {
+ proof, proofHeight = suite.chainB.QueryProof(orderedPacketKey)
+ } else {
+ proof, proofHeight = suite.chainB.QueryProof(unorderedPacketKey)
+ }
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutPacket(suite.chainA.GetContext(), packet, proof, proofHeight, nextSeqRecv)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// TestTimeoutExectued verifies that packet commitments are deleted on chainA after the
+// channel capabilities are verified.
+func (suite *KeeperTestSuite) TestTimeoutExecuted() {
+ var (
+ packet types.Packet
+ chanCap *capabilitytypes.Capability
+ )
+
+ testCases := []testCase{
+ {"success ORDERED", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"incorrect capability", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+
+ chanCap = capabilitytypes.NewCapability(100)
+ }, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutExecuted(suite.chainA.GetContext(), chanCap, packet)
+ pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ if tc.expPass {
+ suite.NoError(err)
+ suite.Nil(pc)
+ } else {
+ suite.Error(err)
+ }
+ })
+ }
+}
+
+// TestTimeoutOnClose tests the call TimeoutOnClose on chainA by closing the corresponding
+// channel on chainB after the packet commitment has been created.
+func (suite *KeeperTestSuite) TestTimeoutOnClose() {
+ var (
+ packet types.Packet
+ chanCap *capabilitytypes.Capability
+ nextSeqRecv uint64
+ ordered bool
+ )
+
+ testCases := []testCase{
+ {"success: ORDERED", func() {
+ ordered = true
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"success: UNORDERED", func() {
+ ordered = false
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, true},
+ {"channel not found", func() {
+ // use wrong channel naming
+ _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ }, false},
+ {"packet dest port ≠ channel counterparty port", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong port for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet dest channel ID ≠ channel counterparty channel ID", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ // use wrong channel for dest
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"connection not found", func() {
+ channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
+ channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
+ // pass channel check
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ channelA.PortID, channelA.ID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
+ )
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // create chancap
+ suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet hasn't been sent", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet already received", func() {
+ nextSeqRecv = 2
+ ordered = true
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel verification failed", func() {
+ ordered = true
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"next seq receive verification failed", func() {
+ // set ordered to false providing the wrong proof for ORDERED case
+ ordered = false
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"packet ack verification failed", func() {
+ // set ordered to true providing the wrong proof for UNORDERED case
+ ordered = true
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ }, false},
+ {"channel capability not found", func() {
+ ordered = true
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
+ packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ chanCap = capabilitytypes.NewCapability(100)
+ }, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
+ var proof []byte
+
+ suite.SetupTest() // reset
+ nextSeqRecv = 1 // must be explicitly changed
+ tc.malleate()
+
+ channelKey := host.ChannelKey(packet.GetDestPort(), packet.GetDestChannel())
+ unorderedPacketKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ orderedPacketKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+
+ proofClosed, proofHeight := suite.chainB.QueryProof(channelKey)
+
+ if ordered {
+ proof, _ = suite.chainB.QueryProof(orderedPacketKey)
+ } else {
+ proof, _ = suite.chainB.QueryProof(unorderedPacketKey)
+ }
+
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutOnClose(suite.chainA.GetContext(), chanCap, packet, proof, proofClosed, proofHeight, nextSeqRecv)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+
+}
diff --git a/core/04-channel/module.go b/core/04-channel/module.go
new file mode 100644
index 00000000..569120ad
--- /dev/null
+++ b/core/04-channel/module.go
@@ -0,0 +1,29 @@
+package channel
+
+import (
+ "github.com/gogo/protobuf/grpc"
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// Name returns the IBC channel ICS name.
+func Name() string {
+ return types.SubModuleName
+}
+
+// GetTxCmd returns the root tx command for IBC channels.
+func GetTxCmd() *cobra.Command {
+ return cli.NewTxCmd()
+}
+
+// GetQueryCmd returns the root query command for IBC channels.
+func GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd()
+}
+
+// RegisterQueryService registers the gRPC query service for IBC channels.
+func RegisterQueryService(server grpc.Server, queryServer types.QueryServer) {
+ types.RegisterQueryServer(server, queryServer)
+}
diff --git a/core/04-channel/simulation/decoder.go b/core/04-channel/simulation/decoder.go
new file mode 100644
index 00000000..809976cc
--- /dev/null
+++ b/core/04-channel/simulation/decoder.go
@@ -0,0 +1,48 @@
+package simulation
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
+// Value to the corresponding channel type.
+func NewDecodeStore(cdc codec.BinaryMarshaler, kvA, kvB kv.Pair) (string, bool) {
+ switch {
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyChannelEndPrefix)):
+ var channelA, channelB types.Channel
+ cdc.MustUnmarshalBinaryBare(kvA.Value, &channelA)
+ cdc.MustUnmarshalBinaryBare(kvB.Value, &channelB)
+ return fmt.Sprintf("Channel A: %v\nChannel B: %v", channelA, channelB), true
+
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqSendPrefix)):
+ seqA := sdk.BigEndianToUint64(kvA.Value)
+ seqB := sdk.BigEndianToUint64(kvB.Value)
+ return fmt.Sprintf("NextSeqSend A: %d\nNextSeqSend B: %d", seqA, seqB), true
+
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqRecvPrefix)):
+ seqA := sdk.BigEndianToUint64(kvA.Value)
+ seqB := sdk.BigEndianToUint64(kvB.Value)
+ return fmt.Sprintf("NextSeqRecv A: %d\nNextSeqRecv B: %d", seqA, seqB), true
+
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqAckPrefix)):
+ seqA := sdk.BigEndianToUint64(kvA.Value)
+ seqB := sdk.BigEndianToUint64(kvB.Value)
+ return fmt.Sprintf("NextSeqAck A: %d\nNextSeqAck B: %d", seqA, seqB), true
+
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyPacketCommitmentPrefix)):
+ return fmt.Sprintf("CommitmentHash A: %X\nCommitmentHash B: %X", kvA.Value, kvB.Value), true
+
+ case bytes.HasPrefix(kvA.Key, []byte(host.KeyPacketAckPrefix)):
+ return fmt.Sprintf("AckHash A: %X\nAckHash B: %X", kvA.Value, kvB.Value), true
+
+ default:
+ return "", false
+ }
+}
diff --git a/core/04-channel/simulation/decoder_test.go b/core/04-channel/simulation/decoder_test.go
new file mode 100644
index 00000000..5f2ba2f5
--- /dev/null
+++ b/core/04-channel/simulation/decoder_test.go
@@ -0,0 +1,89 @@
+package simulation_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+func TestDecodeStore(t *testing.T) {
+ app := simapp.Setup(false)
+ cdc := app.AppCodec()
+
+ channelID := "channelidone"
+ portID := "portidone"
+
+ channel := types.Channel{
+ State: types.OPEN,
+ Version: "1.0",
+ }
+
+ bz := []byte{0x1, 0x2, 0x3}
+
+ kvPairs := kv.Pairs{
+ Pairs: []kv.Pair{
+ {
+ Key: host.ChannelKey(portID, channelID),
+ Value: cdc.MustMarshalBinaryBare(&channel),
+ },
+ {
+ Key: host.NextSequenceSendKey(portID, channelID),
+ Value: sdk.Uint64ToBigEndian(1),
+ },
+ {
+ Key: host.NextSequenceRecvKey(portID, channelID),
+ Value: sdk.Uint64ToBigEndian(1),
+ },
+ {
+ Key: host.NextSequenceAckKey(portID, channelID),
+ Value: sdk.Uint64ToBigEndian(1),
+ },
+ {
+ Key: host.PacketCommitmentKey(portID, channelID, 1),
+ Value: bz,
+ },
+ {
+ Key: host.PacketAcknowledgementKey(portID, channelID, 1),
+ Value: bz,
+ },
+ {
+ Key: []byte{0x99},
+ Value: []byte{0x99},
+ },
+ },
+ }
+ tests := []struct {
+ name string
+ expectedLog string
+ }{
+ {"Channel", fmt.Sprintf("Channel A: %v\nChannel B: %v", channel, channel)},
+ {"NextSeqSend", "NextSeqSend A: 1\nNextSeqSend B: 1"},
+ {"NextSeqRecv", "NextSeqRecv A: 1\nNextSeqRecv B: 1"},
+ {"NextSeqAck", "NextSeqAck A: 1\nNextSeqAck B: 1"},
+ {"CommitmentHash", fmt.Sprintf("CommitmentHash A: %X\nCommitmentHash B: %X", bz, bz)},
+ {"AckHash", fmt.Sprintf("AckHash A: %X\nAckHash B: %X", bz, bz)},
+ {"other", ""},
+ }
+
+ for i, tt := range tests {
+ i, tt := i, tt
+ t.Run(tt.name, func(t *testing.T) {
+ res, found := simulation.NewDecodeStore(cdc, kvPairs.Pairs[i], kvPairs.Pairs[i])
+ if i == len(tests)-1 {
+ require.False(t, found, string(kvPairs.Pairs[i].Key))
+ require.Empty(t, res, string(kvPairs.Pairs[i].Key))
+ } else {
+ require.True(t, found, string(kvPairs.Pairs[i].Key))
+ require.Equal(t, tt.expectedLog, res, string(kvPairs.Pairs[i].Key))
+ }
+ })
+ }
+}
diff --git a/core/04-channel/simulation/genesis.go b/core/04-channel/simulation/genesis.go
new file mode 100644
index 00000000..ed339021
--- /dev/null
+++ b/core/04-channel/simulation/genesis.go
@@ -0,0 +1,13 @@
+package simulation
+
+import (
+ "math/rand"
+
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// GenChannelGenesis returns the default channel genesis state.
+func GenChannelGenesis(_ *rand.Rand, _ []simtypes.Account) types.GenesisState {
+ return types.DefaultGenesisState()
+}
diff --git a/core/04-channel/types/channel.go b/core/04-channel/types/channel.go
new file mode 100644
index 00000000..8513a812
--- /dev/null
+++ b/core/04-channel/types/channel.go
@@ -0,0 +1,172 @@
+package types
+
+import (
+ "strings"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ exported.ChannelI = (*Channel)(nil)
+ _ exported.CounterpartyChannelI = (*Counterparty)(nil)
+)
+
+// NewChannel creates a new Channel instance
+func NewChannel(
+ state State, ordering Order, counterparty Counterparty,
+ hops []string, version string,
+) Channel {
+ return Channel{
+ State: state,
+ Ordering: ordering,
+ Counterparty: counterparty,
+ ConnectionHops: hops,
+ Version: version,
+ }
+}
+
+// GetState implements Channel interface.
+func (ch Channel) GetState() int32 {
+ return int32(ch.State)
+}
+
+// GetOrdering implements Channel interface.
+func (ch Channel) GetOrdering() int32 {
+ return int32(ch.Ordering)
+}
+
+// GetCounterparty implements Channel interface.
+func (ch Channel) GetCounterparty() exported.CounterpartyChannelI {
+ return ch.Counterparty
+}
+
+// GetConnectionHops implements Channel interface.
+func (ch Channel) GetConnectionHops() []string {
+ return ch.ConnectionHops
+}
+
+// GetVersion implements Channel interface.
+func (ch Channel) GetVersion() string {
+ return ch.Version
+}
+
+// ValidateBasic performs a basic validation of the channel fields
+func (ch Channel) ValidateBasic() error {
+ if ch.State == UNINITIALIZED {
+ return ErrInvalidChannelState
+ }
+ if !(ch.Ordering == ORDERED || ch.Ordering == UNORDERED) {
+ return sdkerrors.Wrap(ErrInvalidChannelOrdering, ch.Ordering.String())
+ }
+ if len(ch.ConnectionHops) != 1 {
+ return sdkerrors.Wrap(
+ ErrTooManyConnectionHops,
+ "current IBC version only supports one connection hop",
+ )
+ }
+ if err := host.ConnectionIdentifierValidator(ch.ConnectionHops[0]); err != nil {
+ return sdkerrors.Wrap(err, "invalid connection hop ID")
+ }
+ return ch.Counterparty.ValidateBasic()
+}
+
+// NewCounterparty returns a new Counterparty instance
+func NewCounterparty(portID, channelID string) Counterparty {
+ return Counterparty{
+ PortId: portID,
+ ChannelId: channelID,
+ }
+}
+
+// GetPortID implements CounterpartyChannelI interface
+func (c Counterparty) GetPortID() string {
+ return c.PortId
+}
+
+// GetChannelID implements CounterpartyChannelI interface
+func (c Counterparty) GetChannelID() string {
+ return c.ChannelId
+}
+
+// ValidateBasic performs a basic validation check of the identifiers
+func (c Counterparty) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(c.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty port ID")
+ }
+ if c.ChannelId != "" {
+ if err := host.ChannelIdentifierValidator(c.ChannelId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty channel ID")
+ }
+ }
+ return nil
+}
+
+// NewIdentifiedChannel creates a new IdentifiedChannel instance
+func NewIdentifiedChannel(portID, channelID string, ch Channel) IdentifiedChannel {
+ return IdentifiedChannel{
+ State: ch.State,
+ Ordering: ch.Ordering,
+ Counterparty: ch.Counterparty,
+ ConnectionHops: ch.ConnectionHops,
+ Version: ch.Version,
+ PortId: portID,
+ ChannelId: channelID,
+ }
+}
+
+// ValidateBasic performs a basic validation of the identifiers and channel fields.
+func (ic IdentifiedChannel) ValidateBasic() error {
+ if err := host.ChannelIdentifierValidator(ic.ChannelId); err != nil {
+ return sdkerrors.Wrap(err, "invalid channel ID")
+ }
+ if err := host.PortIdentifierValidator(ic.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ channel := NewChannel(ic.State, ic.Ordering, ic.Counterparty, ic.ConnectionHops, ic.Version)
+ return channel.ValidateBasic()
+}
+
+// NewResultAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Result
+// type in the Response field.
+func NewResultAcknowledgement(result []byte) Acknowledgement {
+ return Acknowledgement{
+ Response: &Acknowledgement_Result{
+ Result: result,
+ },
+ }
+}
+
+// NewErrorAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Error
+// type in the Response field.
+func NewErrorAcknowledgement(err string) Acknowledgement {
+ return Acknowledgement{
+ Response: &Acknowledgement_Error{
+ Error: err,
+ },
+ }
+}
+
+// GetBytes is a helper for serialising acknowledgements
+func (ack Acknowledgement) GetBytes() []byte {
+ return sdk.MustSortJSON(SubModuleCdc.MustMarshalJSON(&ack))
+}
+
+// ValidateBasic performs a basic validation of the acknowledgement
+func (ack Acknowledgement) ValidateBasic() error {
+ switch resp := ack.Response.(type) {
+ case *Acknowledgement_Result:
+ if len(resp.Result) == 0 {
+ return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement result cannot be empty")
+ }
+ case *Acknowledgement_Error:
+ if strings.TrimSpace(resp.Error) == "" {
+ return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement error cannot be empty")
+ }
+ default:
+ return sdkerrors.Wrapf(ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", resp)
+ }
+ return nil
+}
diff --git a/core/04-channel/types/channel.pb.go b/core/04-channel/types/channel.pb.go
new file mode 100644
index 00000000..dada2008
--- /dev/null
+++ b/core/04-channel/types/channel.pb.go
@@ -0,0 +1,2270 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/channel/v1/channel.proto
+
+package types
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// State defines if a channel is in one of the following states:
+// CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
+type State int32
+
+const (
+ // Default State
+ UNINITIALIZED State = 0
+ // A channel has just started the opening handshake.
+ INIT State = 1
+ // A channel has acknowledged the handshake step on the counterparty chain.
+ TRYOPEN State = 2
+ // A channel has completed the handshake. Open channels are
+ // ready to send and receive packets.
+ OPEN State = 3
+ // A channel has been closed and can no longer be used to send or receive
+ // packets.
+ CLOSED State = 4
+)
+
+var State_name = map[int32]string{
+ 0: "STATE_UNINITIALIZED_UNSPECIFIED",
+ 1: "STATE_INIT",
+ 2: "STATE_TRYOPEN",
+ 3: "STATE_OPEN",
+ 4: "STATE_CLOSED",
+}
+
+var State_value = map[string]int32{
+ "STATE_UNINITIALIZED_UNSPECIFIED": 0,
+ "STATE_INIT": 1,
+ "STATE_TRYOPEN": 2,
+ "STATE_OPEN": 3,
+ "STATE_CLOSED": 4,
+}
+
+func (x State) String() string {
+ return proto.EnumName(State_name, int32(x))
+}
+
+func (State) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{0}
+}
+
+// Order defines if a channel is ORDERED or UNORDERED
+type Order int32
+
+const (
+ // zero-value for channel ordering
+ NONE Order = 0
+ // packets can be delivered in any order, which may differ from the order in
+ // which they were sent.
+ UNORDERED Order = 1
+ // packets are delivered exactly in the order which they were sent
+ ORDERED Order = 2
+)
+
+var Order_name = map[int32]string{
+ 0: "ORDER_NONE_UNSPECIFIED",
+ 1: "ORDER_UNORDERED",
+ 2: "ORDER_ORDERED",
+}
+
+var Order_value = map[string]int32{
+ "ORDER_NONE_UNSPECIFIED": 0,
+ "ORDER_UNORDERED": 1,
+ "ORDER_ORDERED": 2,
+}
+
+func (x Order) String() string {
+ return proto.EnumName(Order_name, int32(x))
+}
+
+func (Order) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{1}
+}
+
+// Channel defines pipeline for exactly-once packet delivery between specific
+// modules on separate blockchains, which has at least one end capable of
+// sending packets and one end capable of receiving packets.
+type Channel struct {
+ // current state of the channel end
+ State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibcgo.core.channel.v1.State" json:"state,omitempty"`
+ // whether the channel is ordered or unordered
+ Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibcgo.core.channel.v1.Order" json:"ordering,omitempty"`
+ // counterparty channel end
+ Counterparty Counterparty `protobuf:"bytes,3,opt,name=counterparty,proto3" json:"counterparty"`
+ // list of connection identifiers, in order, along which packets sent on
+ // this channel will travel
+ ConnectionHops []string `protobuf:"bytes,4,rep,name=connection_hops,json=connectionHops,proto3" json:"connection_hops,omitempty" yaml:"connection_hops"`
+ // opaque channel version, which is agreed upon during the handshake
+ Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (m *Channel) Reset() { *m = Channel{} }
+func (m *Channel) String() string { return proto.CompactTextString(m) }
+func (*Channel) ProtoMessage() {}
+func (*Channel) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{0}
+}
+func (m *Channel) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Channel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Channel.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Channel) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Channel.Merge(m, src)
+}
+func (m *Channel) XXX_Size() int {
+ return m.Size()
+}
+func (m *Channel) XXX_DiscardUnknown() {
+ xxx_messageInfo_Channel.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Channel proto.InternalMessageInfo
+
+// IdentifiedChannel defines a channel with additional port and channel
+// identifier fields.
+type IdentifiedChannel struct {
+ // current state of the channel end
+ State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibcgo.core.channel.v1.State" json:"state,omitempty"`
+ // whether the channel is ordered or unordered
+ Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibcgo.core.channel.v1.Order" json:"ordering,omitempty"`
+ // counterparty channel end
+ Counterparty Counterparty `protobuf:"bytes,3,opt,name=counterparty,proto3" json:"counterparty"`
+ // list of connection identifiers, in order, along which packets sent on
+ // this channel will travel
+ ConnectionHops []string `protobuf:"bytes,4,rep,name=connection_hops,json=connectionHops,proto3" json:"connection_hops,omitempty" yaml:"connection_hops"`
+ // opaque channel version, which is agreed upon during the handshake
+ Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
+ // port identifier
+ PortId string `protobuf:"bytes,6,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel identifier
+ ChannelId string `protobuf:"bytes,7,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+}
+
+func (m *IdentifiedChannel) Reset() { *m = IdentifiedChannel{} }
+func (m *IdentifiedChannel) String() string { return proto.CompactTextString(m) }
+func (*IdentifiedChannel) ProtoMessage() {}
+func (*IdentifiedChannel) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{1}
+}
+func (m *IdentifiedChannel) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IdentifiedChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_IdentifiedChannel.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *IdentifiedChannel) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IdentifiedChannel.Merge(m, src)
+}
+func (m *IdentifiedChannel) XXX_Size() int {
+ return m.Size()
+}
+func (m *IdentifiedChannel) XXX_DiscardUnknown() {
+ xxx_messageInfo_IdentifiedChannel.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IdentifiedChannel proto.InternalMessageInfo
+
+// Counterparty defines a channel end counterparty
+type Counterparty struct {
+ // port on the counterparty chain which owns the other end of the channel.
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ // channel end on the counterparty chain
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+}
+
+func (m *Counterparty) Reset() { *m = Counterparty{} }
+func (m *Counterparty) String() string { return proto.CompactTextString(m) }
+func (*Counterparty) ProtoMessage() {}
+func (*Counterparty) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{2}
+}
+func (m *Counterparty) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Counterparty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Counterparty.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Counterparty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counterparty.Merge(m, src)
+}
+func (m *Counterparty) XXX_Size() int {
+ return m.Size()
+}
+func (m *Counterparty) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counterparty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counterparty proto.InternalMessageInfo
+
+// Packet defines a type that carries data across different chains through IBC
+type Packet struct {
+ // number corresponds to the order of sends and receives, where a Packet
+ // with an earlier sequence number must be sent and received before a Packet
+ // with a later sequence number.
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ // identifies the port on the sending chain.
+ SourcePort string `protobuf:"bytes,2,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"`
+ // identifies the channel end on the sending chain.
+ SourceChannel string `protobuf:"bytes,3,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"`
+ // identifies the port on the receiving chain.
+ DestinationPort string `protobuf:"bytes,4,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty" yaml:"destination_port"`
+ // identifies the channel end on the receiving chain.
+ DestinationChannel string `protobuf:"bytes,5,opt,name=destination_channel,json=destinationChannel,proto3" json:"destination_channel,omitempty" yaml:"destination_channel"`
+ // actual opaque bytes transferred directly to the application module
+ Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"`
+ // block height after which the packet times out
+ TimeoutHeight types.Height `protobuf:"bytes,7,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"`
+ // block timestamp (in nanoseconds) after which the packet times out
+ TimeoutTimestamp uint64 `protobuf:"varint,8,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"`
+}
+
+func (m *Packet) Reset() { *m = Packet{} }
+func (m *Packet) String() string { return proto.CompactTextString(m) }
+func (*Packet) ProtoMessage() {}
+func (*Packet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{3}
+}
+func (m *Packet) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Packet.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Packet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Packet.Merge(m, src)
+}
+func (m *Packet) XXX_Size() int {
+ return m.Size()
+}
+func (m *Packet) XXX_DiscardUnknown() {
+ xxx_messageInfo_Packet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Packet proto.InternalMessageInfo
+
+// PacketState defines the generic type necessary to retrieve and store
+// packet commitments, acknowledgements, and receipts.
+// Caller is responsible for knowing the context necessary to interpret this
+// state as a commitment, acknowledgement, or a receipt.
+type PacketState struct {
+ // channel port identifier.
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ // channel unique identifier.
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+ // packet sequence.
+ Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ // embedded data that represents packet state.
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *PacketState) Reset() { *m = PacketState{} }
+func (m *PacketState) String() string { return proto.CompactTextString(m) }
+func (*PacketState) ProtoMessage() {}
+func (*PacketState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{4}
+}
+func (m *PacketState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketState.Merge(m, src)
+}
+func (m *PacketState) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketState) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketState proto.InternalMessageInfo
+
+// Acknowledgement is the recommended acknowledgement format to be used by
+// app-specific protocols.
+// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
+// conflicts with other protobuf message formats used for acknowledgements.
+// The first byte of any message with this format will be the non-ASCII values
+// `0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
+// https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
+type Acknowledgement struct {
+ // response contains either a result or an error and must be non-empty
+ //
+ // Types that are valid to be assigned to Response:
+ // *Acknowledgement_Result
+ // *Acknowledgement_Error
+ Response isAcknowledgement_Response `protobuf_oneof:"response"`
+}
+
+func (m *Acknowledgement) Reset() { *m = Acknowledgement{} }
+func (m *Acknowledgement) String() string { return proto.CompactTextString(m) }
+func (*Acknowledgement) ProtoMessage() {}
+func (*Acknowledgement) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3a7a8797f9808eee, []int{5}
+}
+func (m *Acknowledgement) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Acknowledgement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Acknowledgement.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Acknowledgement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Acknowledgement.Merge(m, src)
+}
+func (m *Acknowledgement) XXX_Size() int {
+ return m.Size()
+}
+func (m *Acknowledgement) XXX_DiscardUnknown() {
+ xxx_messageInfo_Acknowledgement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Acknowledgement proto.InternalMessageInfo
+
+type isAcknowledgement_Response interface {
+ isAcknowledgement_Response()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Acknowledgement_Result struct {
+ Result []byte `protobuf:"bytes,21,opt,name=result,proto3,oneof" json:"result,omitempty"`
+}
+type Acknowledgement_Error struct {
+ Error string `protobuf:"bytes,22,opt,name=error,proto3,oneof" json:"error,omitempty"`
+}
+
+func (*Acknowledgement_Result) isAcknowledgement_Response() {}
+func (*Acknowledgement_Error) isAcknowledgement_Response() {}
+
+func (m *Acknowledgement) GetResponse() isAcknowledgement_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Acknowledgement) GetResult() []byte {
+ if x, ok := m.GetResponse().(*Acknowledgement_Result); ok {
+ return x.Result
+ }
+ return nil
+}
+
+func (m *Acknowledgement) GetError() string {
+ if x, ok := m.GetResponse().(*Acknowledgement_Error); ok {
+ return x.Error
+ }
+ return ""
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Acknowledgement) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Acknowledgement_Result)(nil),
+ (*Acknowledgement_Error)(nil),
+ }
+}
+
+func init() {
+ proto.RegisterEnum("ibcgo.core.channel.v1.State", State_name, State_value)
+ proto.RegisterEnum("ibcgo.core.channel.v1.Order", Order_name, Order_value)
+ proto.RegisterType((*Channel)(nil), "ibcgo.core.channel.v1.Channel")
+ proto.RegisterType((*IdentifiedChannel)(nil), "ibcgo.core.channel.v1.IdentifiedChannel")
+ proto.RegisterType((*Counterparty)(nil), "ibcgo.core.channel.v1.Counterparty")
+ proto.RegisterType((*Packet)(nil), "ibcgo.core.channel.v1.Packet")
+ proto.RegisterType((*PacketState)(nil), "ibcgo.core.channel.v1.PacketState")
+ proto.RegisterType((*Acknowledgement)(nil), "ibcgo.core.channel.v1.Acknowledgement")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/channel/v1/channel.proto", fileDescriptor_3a7a8797f9808eee)
+}
+
+var fileDescriptor_3a7a8797f9808eee = []byte{
+ // 904 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6e, 0xe3, 0x46,
+ 0x10, 0x16, 0x65, 0xea, 0x6f, 0x64, 0xc9, 0xf2, 0x26, 0xd6, 0x31, 0x8c, 0x4f, 0x54, 0x78, 0x29,
+ 0x8c, 0x0b, 0x4e, 0x8a, 0x9d, 0x43, 0x12, 0x5c, 0x15, 0x4b, 0xe2, 0x41, 0x04, 0x2e, 0x92, 0x41,
+ 0xcb, 0x45, 0xae, 0x11, 0x28, 0x72, 0x23, 0x11, 0x27, 0x71, 0x15, 0x72, 0x65, 0xc3, 0x65, 0xba,
+ 0x83, 0xaa, 0xbc, 0x80, 0x80, 0x00, 0x01, 0xd2, 0xe4, 0x01, 0xf2, 0x0a, 0x57, 0x5e, 0x99, 0x8a,
+ 0x08, 0xec, 0x3a, 0x8d, 0x9e, 0x20, 0xe0, 0x2e, 0xa9, 0xbf, 0xcb, 0x4f, 0x97, 0x2a, 0x95, 0x76,
+ 0xe6, 0xfb, 0x66, 0xe6, 0xdb, 0x99, 0x11, 0x17, 0x1e, 0x39, 0x03, 0x6b, 0x48, 0xea, 0x16, 0xf1,
+ 0x70, 0xdd, 0x1a, 0x99, 0xae, 0x8b, 0xc7, 0xf5, 0xeb, 0xd3, 0xf8, 0x58, 0x9b, 0x7a, 0x84, 0x12,
+ 0x74, 0xc4, 0x48, 0xb5, 0x90, 0x54, 0x8b, 0x91, 0xeb, 0x53, 0xf9, 0xfd, 0x21, 0x19, 0x12, 0xc6,
+ 0xa8, 0x87, 0x27, 0x4e, 0x96, 0x3f, 0xda, 0xcc, 0x38, 0x76, 0xb0, 0x4b, 0x59, 0x42, 0x76, 0xe2,
+ 0x14, 0xf5, 0x97, 0x24, 0x64, 0x9a, 0x3c, 0x0f, 0x3a, 0x83, 0x94, 0x4f, 0x4d, 0x8a, 0x25, 0xa1,
+ 0x2a, 0x9c, 0x14, 0xcf, 0x8e, 0x6b, 0x7f, 0x59, 0xab, 0x76, 0x19, 0x72, 0x0c, 0x4e, 0x45, 0x5f,
+ 0x42, 0x96, 0x78, 0x36, 0xf6, 0x1c, 0x77, 0x28, 0x25, 0xff, 0x31, 0xac, 0x1b, 0xd2, 0x8c, 0x15,
+ 0x1b, 0x7d, 0x0d, 0xfb, 0x16, 0x99, 0xb9, 0x14, 0x7b, 0x53, 0xd3, 0xa3, 0xb7, 0xd2, 0x5e, 0x55,
+ 0x38, 0xc9, 0x9f, 0x3d, 0xfa, 0x9b, 0xe8, 0xe6, 0x06, 0xb5, 0x21, 0xbe, 0x09, 0x94, 0x84, 0xb1,
+ 0x15, 0x8e, 0x9a, 0x70, 0x60, 0x11, 0xd7, 0xc5, 0x16, 0x75, 0x88, 0xdb, 0x1f, 0x91, 0xa9, 0x2f,
+ 0x89, 0xd5, 0xbd, 0x93, 0x5c, 0x43, 0x5e, 0x06, 0x4a, 0xf9, 0xd6, 0x9c, 0x8c, 0x9f, 0xa9, 0x3b,
+ 0x04, 0xd5, 0x28, 0xae, 0x3d, 0x6d, 0x32, 0xf5, 0x91, 0x04, 0x99, 0x6b, 0xec, 0xf9, 0x0e, 0x71,
+ 0xa5, 0x54, 0x55, 0x38, 0xc9, 0x19, 0xb1, 0xf9, 0x4c, 0x7c, 0xfd, 0xa3, 0x92, 0x50, 0xff, 0x48,
+ 0xc2, 0xa1, 0x6e, 0x63, 0x97, 0x3a, 0xdf, 0x3a, 0xd8, 0xfe, 0xbf, 0x6f, 0xff, 0xda, 0x37, 0xf4,
+ 0x00, 0x32, 0x53, 0xe2, 0xd1, 0xbe, 0x63, 0x4b, 0x69, 0x86, 0xa4, 0x43, 0x53, 0xb7, 0xd1, 0x43,
+ 0x80, 0x48, 0x66, 0x88, 0x65, 0x18, 0x96, 0x8b, 0x3c, 0xba, 0x1d, 0xf5, 0xfb, 0x06, 0xf6, 0x37,
+ 0x2f, 0x80, 0x3e, 0x59, 0x67, 0x0b, 0x7b, 0x9d, 0x6b, 0xa0, 0x65, 0xa0, 0x14, 0xb9, 0xc8, 0x08,
+ 0x50, 0x57, 0x15, 0x9e, 0x6e, 0x55, 0x48, 0x32, 0xfe, 0xd1, 0x32, 0x50, 0x0e, 0xa3, 0x4b, 0xad,
+ 0x30, 0xf5, 0xdd, 0xc2, 0xdf, 0x8b, 0x90, 0xbe, 0x30, 0xad, 0x57, 0x98, 0x22, 0x19, 0xb2, 0x3e,
+ 0xfe, 0x6e, 0x86, 0x5d, 0x8b, 0x0f, 0x58, 0x34, 0x56, 0x36, 0xfa, 0x02, 0xf2, 0x3e, 0x99, 0x79,
+ 0x16, 0xee, 0x87, 0x35, 0xa3, 0x1a, 0xe5, 0x65, 0xa0, 0x20, 0x5e, 0x63, 0x03, 0x54, 0x0d, 0xe0,
+ 0xd6, 0x05, 0xf1, 0x28, 0xfa, 0x0a, 0x8a, 0x11, 0x16, 0x55, 0x66, 0x63, 0xcc, 0x35, 0x3e, 0x58,
+ 0x06, 0xca, 0xd1, 0x56, 0x6c, 0x84, 0xab, 0x46, 0x81, 0x3b, 0xe2, 0xa5, 0x7b, 0x0e, 0x25, 0x1b,
+ 0xfb, 0xd4, 0x71, 0x4d, 0x36, 0x17, 0x56, 0x5f, 0x64, 0x39, 0x3e, 0x5c, 0x06, 0xca, 0x03, 0x9e,
+ 0x63, 0x97, 0xa1, 0x1a, 0x07, 0x1b, 0x2e, 0xa6, 0xa4, 0x0b, 0xef, 0x6d, 0xb2, 0x62, 0x39, 0x6c,
+ 0x8c, 0x8d, 0xca, 0x32, 0x50, 0xe4, 0x77, 0x53, 0xad, 0x34, 0xa1, 0x0d, 0x6f, 0x2c, 0x0c, 0x81,
+ 0x68, 0x9b, 0xd4, 0x64, 0xe3, 0xde, 0x37, 0xd8, 0x19, 0x0d, 0xa0, 0x48, 0x9d, 0x09, 0x26, 0x33,
+ 0xda, 0x1f, 0x61, 0x67, 0x38, 0xa2, 0x6c, 0xe0, 0xf9, 0x9d, 0x9d, 0xe7, 0xdf, 0xa5, 0xeb, 0xd3,
+ 0x5a, 0x9b, 0x71, 0x1a, 0x0f, 0xc3, 0x75, 0x5d, 0x37, 0x64, 0x3b, 0x83, 0x6a, 0x14, 0x22, 0x07,
+ 0x67, 0x23, 0x1d, 0x0e, 0x63, 0x46, 0xf8, 0xeb, 0x53, 0x73, 0x32, 0x95, 0xb2, 0xe1, 0xc0, 0x1a,
+ 0xc7, 0xcb, 0x40, 0x91, 0xb6, 0x93, 0xac, 0x28, 0xaa, 0x51, 0x8a, 0x7c, 0xbd, 0xd8, 0x15, 0xed,
+ 0xc0, 0xcf, 0x02, 0xe4, 0xf9, 0x0e, 0xb0, 0x7f, 0xee, 0x7f, 0xb0, 0x7c, 0x5b, 0xbb, 0xb6, 0xb7,
+ 0xb3, 0x6b, 0x71, 0x5f, 0xc5, 0x75, 0x5f, 0x23, 0xa1, 0x5d, 0x38, 0x38, 0xb7, 0x5e, 0xb9, 0xe4,
+ 0x66, 0x8c, 0xed, 0x21, 0x9e, 0x60, 0x97, 0x22, 0x09, 0xd2, 0x1e, 0xf6, 0x67, 0x63, 0x2a, 0x1d,
+ 0x85, 0xf4, 0x76, 0xc2, 0x88, 0x6c, 0x54, 0x86, 0x14, 0xf6, 0x3c, 0xe2, 0x49, 0xe5, 0x50, 0x53,
+ 0x3b, 0x61, 0x70, 0xb3, 0x01, 0x90, 0xf5, 0xb0, 0x3f, 0x25, 0xae, 0x8f, 0x1f, 0xff, 0x2a, 0x40,
+ 0x8a, 0xdf, 0xf9, 0x73, 0x50, 0x2e, 0x7b, 0xe7, 0x3d, 0xad, 0x7f, 0xd5, 0xd1, 0x3b, 0x7a, 0x4f,
+ 0x3f, 0x7f, 0xa1, 0xbf, 0xd4, 0x5a, 0xfd, 0xab, 0xce, 0xe5, 0x85, 0xd6, 0xd4, 0x9f, 0xeb, 0x5a,
+ 0xab, 0x94, 0x90, 0x0f, 0xe7, 0x8b, 0x6a, 0x61, 0x8b, 0x80, 0x24, 0x00, 0x1e, 0x17, 0x3a, 0x4b,
+ 0x82, 0x9c, 0x9d, 0x2f, 0xaa, 0x62, 0x78, 0x46, 0x15, 0x28, 0x70, 0xa4, 0x67, 0x7c, 0xd3, 0xbd,
+ 0xd0, 0x3a, 0xa5, 0xa4, 0x9c, 0x9f, 0x2f, 0xaa, 0x99, 0xc8, 0x5c, 0x47, 0x32, 0x70, 0x8f, 0x47,
+ 0x32, 0xe4, 0x18, 0xf6, 0x39, 0xd2, 0x7c, 0xd1, 0xbd, 0xd4, 0x5a, 0x25, 0x51, 0x86, 0xf9, 0xa2,
+ 0x9a, 0xe6, 0x96, 0x2c, 0xbe, 0xfe, 0xa9, 0x92, 0x78, 0x7c, 0x03, 0x29, 0xf6, 0xbd, 0x44, 0x1f,
+ 0x43, 0xb9, 0x6b, 0xb4, 0x34, 0xa3, 0xdf, 0xe9, 0x76, 0xb4, 0x1d, 0xbd, 0x2c, 0x65, 0xe8, 0x47,
+ 0x2a, 0x1c, 0x70, 0xd6, 0x55, 0x87, 0xfd, 0x6a, 0xad, 0x92, 0x20, 0x17, 0xe6, 0x8b, 0x6a, 0x6e,
+ 0xe5, 0x08, 0x05, 0x73, 0x4e, 0xcc, 0x88, 0x04, 0x47, 0x26, 0x2f, 0xdc, 0x68, 0xbf, 0xb9, 0xab,
+ 0x08, 0x6f, 0xef, 0x2a, 0xc2, 0xef, 0x77, 0x15, 0xe1, 0x87, 0xfb, 0x4a, 0xe2, 0xed, 0x7d, 0x25,
+ 0xf1, 0xdb, 0x7d, 0x25, 0xf1, 0xb2, 0x36, 0x74, 0xe8, 0x68, 0x36, 0xa8, 0x59, 0x64, 0x52, 0xb7,
+ 0x88, 0x3f, 0x21, 0x7e, 0xdd, 0x19, 0x58, 0x4f, 0xe2, 0x77, 0xf9, 0xd3, 0xa7, 0x4f, 0xe2, 0xc7,
+ 0x9e, 0xde, 0x4e, 0xb1, 0x3f, 0x48, 0xb3, 0x87, 0xf9, 0xb3, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff,
+ 0xbc, 0xc9, 0x53, 0x80, 0x0f, 0x08, 0x00, 0x00,
+}
+
+func (m *Channel) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Channel) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Channel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.ConnectionHops) > 0 {
+ for iNdEx := len(m.ConnectionHops) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ConnectionHops[iNdEx])
+ copy(dAtA[i:], m.ConnectionHops[iNdEx])
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.ConnectionHops[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ {
+ size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintChannel(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Ordering != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.Ordering))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.State != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.State))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IdentifiedChannel) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IdentifiedChannel) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IdentifiedChannel) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.ConnectionHops) > 0 {
+ for iNdEx := len(m.ConnectionHops) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ConnectionHops[iNdEx])
+ copy(dAtA[i:], m.ConnectionHops[iNdEx])
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.ConnectionHops[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ {
+ size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintChannel(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Ordering != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.Ordering))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.State != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.State))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Counterparty) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Counterparty) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Counterparty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Packet) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Packet) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Packet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimeoutTimestamp != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.TimeoutTimestamp))
+ i--
+ dAtA[i] = 0x40
+ }
+ {
+ size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintChannel(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.DestinationChannel) > 0 {
+ i -= len(m.DestinationChannel)
+ copy(dAtA[i:], m.DestinationChannel)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.DestinationChannel)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.DestinationPort) > 0 {
+ i -= len(m.DestinationPort)
+ copy(dAtA[i:], m.DestinationPort)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.DestinationPort)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.SourceChannel) > 0 {
+ i -= len(m.SourceChannel)
+ copy(dAtA[i:], m.SourceChannel)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.SourceChannel)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.SourcePort) > 0 {
+ i -= len(m.SourcePort)
+ copy(dAtA[i:], m.SourcePort)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.SourcePort)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintChannel(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Acknowledgement) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Acknowledgement) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Acknowledgement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Response != nil {
+ {
+ size := m.Response.Size()
+ i -= size
+ if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Acknowledgement_Result) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Acknowledgement_Result) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.Result != nil {
+ i -= len(m.Result)
+ copy(dAtA[i:], m.Result)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.Result)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xaa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Acknowledgement_Error) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Acknowledgement_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.Error)
+ copy(dAtA[i:], m.Error)
+ i = encodeVarintChannel(dAtA, i, uint64(len(m.Error)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb2
+ return len(dAtA) - i, nil
+}
+func encodeVarintChannel(dAtA []byte, offset int, v uint64) int {
+ offset -= sovChannel(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Channel) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.State != 0 {
+ n += 1 + sovChannel(uint64(m.State))
+ }
+ if m.Ordering != 0 {
+ n += 1 + sovChannel(uint64(m.Ordering))
+ }
+ l = m.Counterparty.Size()
+ n += 1 + l + sovChannel(uint64(l))
+ if len(m.ConnectionHops) > 0 {
+ for _, s := range m.ConnectionHops {
+ l = len(s)
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ return n
+}
+
+func (m *IdentifiedChannel) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.State != 0 {
+ n += 1 + sovChannel(uint64(m.State))
+ }
+ if m.Ordering != 0 {
+ n += 1 + sovChannel(uint64(m.Ordering))
+ }
+ l = m.Counterparty.Size()
+ n += 1 + l + sovChannel(uint64(l))
+ if len(m.ConnectionHops) > 0 {
+ for _, s := range m.ConnectionHops {
+ l = len(s)
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ return n
+}
+
+func (m *Counterparty) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ return n
+}
+
+func (m *Packet) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovChannel(uint64(m.Sequence))
+ }
+ l = len(m.SourcePort)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.SourceChannel)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.DestinationPort)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.DestinationChannel)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = m.TimeoutHeight.Size()
+ n += 1 + l + sovChannel(uint64(l))
+ if m.TimeoutTimestamp != 0 {
+ n += 1 + sovChannel(uint64(m.TimeoutTimestamp))
+ }
+ return n
+}
+
+func (m *PacketState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovChannel(uint64(m.Sequence))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovChannel(uint64(l))
+ }
+ return n
+}
+
+func (m *Acknowledgement) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Response != nil {
+ n += m.Response.Size()
+ }
+ return n
+}
+
+func (m *Acknowledgement_Result) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Result != nil {
+ l = len(m.Result)
+ n += 2 + l + sovChannel(uint64(l))
+ }
+ return n
+}
+func (m *Acknowledgement_Error) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Error)
+ n += 2 + l + sovChannel(uint64(l))
+ return n
+}
+
+func sovChannel(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozChannel(x uint64) (n int) {
+ return sovChannel(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Channel) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Channel: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Channel: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.State |= State(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ordering", wireType)
+ }
+ m.Ordering = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Ordering |= Order(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionHops", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionHops = append(m.ConnectionHops, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipChannel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IdentifiedChannel) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IdentifiedChannel: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IdentifiedChannel: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+ }
+ m.State = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.State |= State(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ordering", wireType)
+ }
+ m.Ordering = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Ordering |= Order(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionHops", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ConnectionHops = append(m.ConnectionHops, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipChannel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Counterparty) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Counterparty: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Counterparty: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipChannel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Packet) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Packet: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourcePort = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SourceChannel = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationPort", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationPort = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DestinationChannel", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DestinationChannel = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType)
+ }
+ m.TimeoutTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TimeoutTimestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipChannel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipChannel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Acknowledgement) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Acknowledgement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Acknowledgement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 21:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := make([]byte, postIndex-iNdEx)
+ copy(v, dAtA[iNdEx:postIndex])
+ m.Response = &Acknowledgement_Result{v}
+ iNdEx = postIndex
+ case 22:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthChannel
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Response = &Acknowledgement_Error{string(dAtA[iNdEx:postIndex])}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipChannel(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthChannel
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipChannel(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowChannel
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthChannel
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupChannel
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthChannel
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthChannel = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowChannel = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupChannel = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/04-channel/types/channel_test.go b/core/04-channel/types/channel_test.go
new file mode 100644
index 00000000..30fee444
--- /dev/null
+++ b/core/04-channel/types/channel_test.go
@@ -0,0 +1,119 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+func TestChannelValidateBasic(t *testing.T) {
+ counterparty := types.Counterparty{"portidone", "channelidone"}
+ testCases := []struct {
+ name string
+ channel types.Channel
+ expPass bool
+ }{
+ {"valid channel", types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, connHops, version), true},
+ {"invalid state", types.NewChannel(types.UNINITIALIZED, types.ORDERED, counterparty, connHops, version), false},
+ {"invalid order", types.NewChannel(types.TRYOPEN, types.NONE, counterparty, connHops, version), false},
+ {"more than 1 connection hop", types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, []string{"connection1", "connection2"}, version), false},
+ {"invalid connection hop identifier", types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, []string{"(invalid)"}, version), false},
+ {"invalid counterparty", types.NewChannel(types.TRYOPEN, types.ORDERED, types.NewCounterparty("(invalidport)", "channelidone"), connHops, version), false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.channel.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+func TestCounterpartyValidateBasic(t *testing.T) {
+ testCases := []struct {
+ name string
+ counterparty types.Counterparty
+ expPass bool
+ }{
+ {"valid counterparty", types.Counterparty{"portidone", "channelidone"}, true},
+ {"invalid port id", types.Counterparty{"(InvalidPort)", "channelidone"}, false},
+ {"invalid channel id", types.Counterparty{"portidone", "(InvalidChannel)"}, false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.counterparty.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ require.Error(t, err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+// tests acknowledgement.ValidateBasic and acknowledgement.GetBytes
+func (suite TypesTestSuite) TestAcknowledgement() {
+ testCases := []struct {
+ name string
+ ack types.Acknowledgement
+ expPass bool
+ }{
+ {
+ "valid successful ack",
+ types.NewResultAcknowledgement([]byte("success")),
+ true,
+ },
+ {
+ "valid failed ack",
+ types.NewErrorAcknowledgement("error"),
+ true,
+ },
+ {
+ "empty successful ack",
+ types.NewResultAcknowledgement([]byte{}),
+ false,
+ },
+ {
+ "empty faied ack",
+ types.NewErrorAcknowledgement(" "),
+ false,
+ },
+ {
+ "nil response",
+ types.Acknowledgement{
+ Response: nil,
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ err := tc.ack.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+
+ // expect all acks to be able to be marshaled
+ suite.NotPanics(func() {
+ bz := tc.ack.GetBytes()
+ suite.Require().NotNil(bz)
+ })
+ })
+ }
+
+}
diff --git a/core/04-channel/types/codec.go b/core/04-channel/types/codec.go
new file mode 100644
index 00000000..a74f0a7f
--- /dev/null
+++ b/core/04-channel/types/codec.go
@@ -0,0 +1,60 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/msgservice"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces register the ibc channel submodule interfaces to protobuf
+// Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterInterface(
+ "ibc.core.channel.v1.ChannelI",
+ (*exported.ChannelI)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.channel.v1.CounterpartyChannelI",
+ (*exported.CounterpartyChannelI)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.channel.v1.PacketI",
+ (*exported.PacketI)(nil),
+ )
+ registry.RegisterImplementations(
+ (*exported.ChannelI)(nil),
+ &Channel{},
+ )
+ registry.RegisterImplementations(
+ (*exported.CounterpartyChannelI)(nil),
+ &Counterparty{},
+ )
+ registry.RegisterImplementations(
+ (*exported.PacketI)(nil),
+ &Packet{},
+ )
+ registry.RegisterImplementations(
+ (*sdk.Msg)(nil),
+ &MsgChannelOpenInit{},
+ &MsgChannelOpenTry{},
+ &MsgChannelOpenAck{},
+ &MsgChannelOpenConfirm{},
+ &MsgChannelCloseInit{},
+ &MsgChannelCloseConfirm{},
+ &MsgRecvPacket{},
+ &MsgAcknowledgement{},
+ &MsgTimeout{},
+ &MsgTimeoutOnClose{},
+ )
+
+ msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc)
+}
+
+// SubModuleCdc references the global x/ibc/core/04-channel module codec. Note, the codec should
+// ONLY be used in certain instances of tests and for JSON encoding.
+//
+// The actual codec used for serialization should be provided to x/ibc/core/04-channel and
+// defined at the application level.
+var SubModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry())
diff --git a/core/04-channel/types/errors.go b/core/04-channel/types/errors.go
new file mode 100644
index 00000000..82cf7730
--- /dev/null
+++ b/core/04-channel/types/errors.go
@@ -0,0 +1,28 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// IBC channel sentinel errors
+var (
+ ErrChannelExists = sdkerrors.Register(SubModuleName, 2, "channel already exists")
+ ErrChannelNotFound = sdkerrors.Register(SubModuleName, 3, "channel not found")
+ ErrInvalidChannel = sdkerrors.Register(SubModuleName, 4, "invalid channel")
+ ErrInvalidChannelState = sdkerrors.Register(SubModuleName, 5, "invalid channel state")
+ ErrInvalidChannelOrdering = sdkerrors.Register(SubModuleName, 6, "invalid channel ordering")
+ ErrInvalidCounterparty = sdkerrors.Register(SubModuleName, 7, "invalid counterparty channel")
+ ErrInvalidChannelCapability = sdkerrors.Register(SubModuleName, 8, "invalid channel capability")
+ ErrChannelCapabilityNotFound = sdkerrors.Register(SubModuleName, 9, "channel capability not found")
+ ErrSequenceSendNotFound = sdkerrors.Register(SubModuleName, 10, "sequence send not found")
+ ErrSequenceReceiveNotFound = sdkerrors.Register(SubModuleName, 11, "sequence receive not found")
+ ErrSequenceAckNotFound = sdkerrors.Register(SubModuleName, 12, "sequence acknowledgement not found")
+ ErrInvalidPacket = sdkerrors.Register(SubModuleName, 13, "invalid packet")
+ ErrPacketTimeout = sdkerrors.Register(SubModuleName, 14, "packet timeout")
+ ErrTooManyConnectionHops = sdkerrors.Register(SubModuleName, 15, "too many connection hops")
+ ErrInvalidAcknowledgement = sdkerrors.Register(SubModuleName, 16, "invalid acknowledgement")
+ ErrPacketCommitmentNotFound = sdkerrors.Register(SubModuleName, 17, "packet commitment not found")
+ ErrPacketReceived = sdkerrors.Register(SubModuleName, 18, "packet already received")
+ ErrAcknowledgementExists = sdkerrors.Register(SubModuleName, 19, "acknowledgement for packet already exists")
+ ErrInvalidChannelIdentifier = sdkerrors.Register(SubModuleName, 20, "invalid channel identifier")
+)
diff --git a/core/04-channel/types/events.go b/core/04-channel/types/events.go
new file mode 100644
index 00000000..b9ddb305
--- /dev/null
+++ b/core/04-channel/types/events.go
@@ -0,0 +1,46 @@
+package types
+
+import (
+ "fmt"
+
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// IBC channel events
+const (
+ AttributeKeyConnectionID = "connection_id"
+ AttributeKeyPortID = "port_id"
+ AttributeKeyChannelID = "channel_id"
+ AttributeCounterpartyPortID = "counterparty_port_id"
+ AttributeCounterpartyChannelID = "counterparty_channel_id"
+
+ EventTypeSendPacket = "send_packet"
+ EventTypeRecvPacket = "recv_packet"
+ EventTypeWriteAck = "write_acknowledgement"
+ EventTypeAcknowledgePacket = "acknowledge_packet"
+ EventTypeTimeoutPacket = "timeout_packet"
+
+ AttributeKeyData = "packet_data"
+ AttributeKeyAck = "packet_ack"
+ AttributeKeyTimeoutHeight = "packet_timeout_height"
+ AttributeKeyTimeoutTimestamp = "packet_timeout_timestamp"
+ AttributeKeySequence = "packet_sequence"
+ AttributeKeySrcPort = "packet_src_port"
+ AttributeKeySrcChannel = "packet_src_channel"
+ AttributeKeyDstPort = "packet_dst_port"
+ AttributeKeyDstChannel = "packet_dst_channel"
+ AttributeKeyChannelOrdering = "packet_channel_ordering"
+ AttributeKeyConnection = "packet_connection"
+)
+
+// IBC channel events vars
+var (
+ EventTypeChannelOpenInit = MsgChannelOpenInit{}.Type()
+ EventTypeChannelOpenTry = MsgChannelOpenTry{}.Type()
+ EventTypeChannelOpenAck = MsgChannelOpenAck{}.Type()
+ EventTypeChannelOpenConfirm = MsgChannelOpenConfirm{}.Type()
+ EventTypeChannelCloseInit = MsgChannelCloseInit{}.Type()
+ EventTypeChannelCloseConfirm = MsgChannelCloseConfirm{}.Type()
+
+ AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName)
+)
diff --git a/core/04-channel/types/expected_keepers.go b/core/04-channel/types/expected_keepers.go
new file mode 100644
index 00000000..d3b74b7e
--- /dev/null
+++ b/core/04-channel/types/expected_keepers.go
@@ -0,0 +1,76 @@
+package types
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// ClientKeeper expected account IBC client keeper
+type ClientKeeper interface {
+ GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool)
+ GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool)
+}
+
+// ConnectionKeeper expected account IBC connection keeper
+type ConnectionKeeper interface {
+ GetConnection(ctx sdk.Context, connectionID string) (connectiontypes.ConnectionEnd, bool)
+ GetTimestampAtHeight(
+ ctx sdk.Context,
+ connection connectiontypes.ConnectionEnd,
+ height exported.Height,
+ ) (uint64, error)
+ VerifyChannelState(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ channel exported.ChannelI,
+ ) error
+ VerifyPacketCommitment(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ commitmentBytes []byte,
+ ) error
+ VerifyPacketAcknowledgement(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ acknowledgement []byte,
+ ) error
+ VerifyPacketReceiptAbsence(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ ) error
+ VerifyNextSequenceRecv(
+ ctx sdk.Context,
+ connection exported.ConnectionI,
+ height exported.Height,
+ proof []byte,
+ portID,
+ channelID string,
+ nextSequenceRecv uint64,
+ ) error
+}
+
+// PortKeeper expected account IBC port keeper
+type PortKeeper interface {
+ Authenticate(ctx sdk.Context, key *capabilitytypes.Capability, portID string) bool
+}
diff --git a/core/04-channel/types/genesis.go b/core/04-channel/types/genesis.go
new file mode 100644
index 00000000..2c431e97
--- /dev/null
+++ b/core/04-channel/types/genesis.go
@@ -0,0 +1,156 @@
+package types
+
+import (
+ "errors"
+ "fmt"
+
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// NewPacketState creates a new PacketState instance.
+func NewPacketState(portID, channelID string, seq uint64, data []byte) PacketState {
+ return PacketState{
+ PortId: portID,
+ ChannelId: channelID,
+ Sequence: seq,
+ Data: data,
+ }
+}
+
+// Validate performs basic validation of fields returning an error upon any
+// failure.
+func (pa PacketState) Validate() error {
+ if pa.Data == nil {
+ return errors.New("data bytes cannot be nil")
+ }
+ return validateGenFields(pa.PortId, pa.ChannelId, pa.Sequence)
+}
+
+// NewPacketSequence creates a new PacketSequences instance.
+func NewPacketSequence(portID, channelID string, seq uint64) PacketSequence {
+ return PacketSequence{
+ PortId: portID,
+ ChannelId: channelID,
+ Sequence: seq,
+ }
+}
+
+// Validate performs basic validation of fields returning an error upon any
+// failure.
+func (ps PacketSequence) Validate() error {
+ return validateGenFields(ps.PortId, ps.ChannelId, ps.Sequence)
+}
+
+// NewGenesisState creates a GenesisState instance.
+func NewGenesisState(
+ channels []IdentifiedChannel, acks, receipts, commitments []PacketState,
+ sendSeqs, recvSeqs, ackSeqs []PacketSequence, nextChannelSequence uint64,
+) GenesisState {
+ return GenesisState{
+ Channels: channels,
+ Acknowledgements: acks,
+ Commitments: commitments,
+ SendSequences: sendSeqs,
+ RecvSequences: recvSeqs,
+ AckSequences: ackSeqs,
+ NextChannelSequence: nextChannelSequence,
+ }
+}
+
+// DefaultGenesisState returns the ibc channel submodule's default genesis state.
+func DefaultGenesisState() GenesisState {
+ return GenesisState{
+ Channels: []IdentifiedChannel{},
+ Acknowledgements: []PacketState{},
+ Receipts: []PacketState{},
+ Commitments: []PacketState{},
+ SendSequences: []PacketSequence{},
+ RecvSequences: []PacketSequence{},
+ AckSequences: []PacketSequence{},
+ NextChannelSequence: 0,
+ }
+}
+
+// Validate performs basic genesis state validation returning an error upon any
+// failure.
+func (gs GenesisState) Validate() error {
+ // keep track of the max sequence to ensure it is less than
+ // the next sequence used in creating connection identifers.
+ var maxSequence uint64 = 0
+
+ for i, channel := range gs.Channels {
+ sequence, err := ParseChannelSequence(channel.ChannelId)
+ if err != nil {
+ return err
+ }
+
+ if sequence > maxSequence {
+ maxSequence = sequence
+ }
+
+ if err := channel.ValidateBasic(); err != nil {
+ return fmt.Errorf("invalid channel %v channel index %d: %w", channel, i, err)
+ }
+ }
+
+ if maxSequence != 0 && maxSequence >= gs.NextChannelSequence {
+ return fmt.Errorf("next channel sequence %d must be greater than maximum sequence used in channel identifier %d", gs.NextChannelSequence, maxSequence)
+ }
+
+ for i, ack := range gs.Acknowledgements {
+ if err := ack.Validate(); err != nil {
+ return fmt.Errorf("invalid acknowledgement %v ack index %d: %w", ack, i, err)
+ }
+ if len(ack.Data) == 0 {
+ return fmt.Errorf("invalid acknowledgement %v ack index %d: data bytes cannot be empty", ack, i)
+ }
+ }
+
+ for i, receipt := range gs.Receipts {
+ if err := receipt.Validate(); err != nil {
+ return fmt.Errorf("invalid acknowledgement %v ack index %d: %w", receipt, i, err)
+ }
+ }
+
+ for i, commitment := range gs.Commitments {
+ if err := commitment.Validate(); err != nil {
+ return fmt.Errorf("invalid commitment %v index %d: %w", commitment, i, err)
+ }
+ if len(commitment.Data) == 0 {
+ return fmt.Errorf("invalid acknowledgement %v ack index %d: data bytes cannot be empty", commitment, i)
+ }
+ }
+
+ for i, ss := range gs.SendSequences {
+ if err := ss.Validate(); err != nil {
+ return fmt.Errorf("invalid send sequence %v index %d: %w", ss, i, err)
+ }
+ }
+
+ for i, rs := range gs.RecvSequences {
+ if err := rs.Validate(); err != nil {
+ return fmt.Errorf("invalid receive sequence %v index %d: %w", rs, i, err)
+ }
+ }
+
+ for i, as := range gs.AckSequences {
+ if err := as.Validate(); err != nil {
+ return fmt.Errorf("invalid acknowledgement sequence %v index %d: %w", as, i, err)
+ }
+ }
+
+ return nil
+}
+
+func validateGenFields(portID, channelID string, sequence uint64) error {
+ if err := host.PortIdentifierValidator(portID); err != nil {
+ return fmt.Errorf("invalid port Id: %w", err)
+ }
+ if err := host.ChannelIdentifierValidator(channelID); err != nil {
+ return fmt.Errorf("invalid channel Id: %w", err)
+ }
+ if sequence == 0 {
+ return errors.New("sequence cannot be 0")
+ }
+ return nil
+}
diff --git a/core/04-channel/types/genesis.pb.go b/core/04-channel/types/genesis.pb.go
new file mode 100644
index 00000000..9c55fc20
--- /dev/null
+++ b/core/04-channel/types/genesis.pb.go
@@ -0,0 +1,1017 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/channel/v1/genesis.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibc channel submodule's genesis state.
+type GenesisState struct {
+ Channels []IdentifiedChannel `protobuf:"bytes,1,rep,name=channels,proto3,casttype=IdentifiedChannel" json:"channels"`
+ Acknowledgements []PacketState `protobuf:"bytes,2,rep,name=acknowledgements,proto3" json:"acknowledgements"`
+ Commitments []PacketState `protobuf:"bytes,3,rep,name=commitments,proto3" json:"commitments"`
+ Receipts []PacketState `protobuf:"bytes,4,rep,name=receipts,proto3" json:"receipts"`
+ SendSequences []PacketSequence `protobuf:"bytes,5,rep,name=send_sequences,json=sendSequences,proto3" json:"send_sequences" yaml:"send_sequences"`
+ RecvSequences []PacketSequence `protobuf:"bytes,6,rep,name=recv_sequences,json=recvSequences,proto3" json:"recv_sequences" yaml:"recv_sequences"`
+ AckSequences []PacketSequence `protobuf:"bytes,7,rep,name=ack_sequences,json=ackSequences,proto3" json:"ack_sequences" yaml:"ack_sequences"`
+ // the sequence for the next generated channel identifier
+ NextChannelSequence uint64 `protobuf:"varint,8,opt,name=next_channel_sequence,json=nextChannelSequence,proto3" json:"next_channel_sequence,omitempty" yaml:"next_channel_sequence"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c4d4e081eaaab7c3, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetChannels() []IdentifiedChannel {
+ if m != nil {
+ return m.Channels
+ }
+ return nil
+}
+
+func (m *GenesisState) GetAcknowledgements() []PacketState {
+ if m != nil {
+ return m.Acknowledgements
+ }
+ return nil
+}
+
+func (m *GenesisState) GetCommitments() []PacketState {
+ if m != nil {
+ return m.Commitments
+ }
+ return nil
+}
+
+func (m *GenesisState) GetReceipts() []PacketState {
+ if m != nil {
+ return m.Receipts
+ }
+ return nil
+}
+
+func (m *GenesisState) GetSendSequences() []PacketSequence {
+ if m != nil {
+ return m.SendSequences
+ }
+ return nil
+}
+
+func (m *GenesisState) GetRecvSequences() []PacketSequence {
+ if m != nil {
+ return m.RecvSequences
+ }
+ return nil
+}
+
+func (m *GenesisState) GetAckSequences() []PacketSequence {
+ if m != nil {
+ return m.AckSequences
+ }
+ return nil
+}
+
+func (m *GenesisState) GetNextChannelSequence() uint64 {
+ if m != nil {
+ return m.NextChannelSequence
+ }
+ return 0
+}
+
+// PacketSequence defines the genesis type necessary to retrieve and store
+// next send and receive sequences.
+type PacketSequence struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+ Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"`
+}
+
+func (m *PacketSequence) Reset() { *m = PacketSequence{} }
+func (m *PacketSequence) String() string { return proto.CompactTextString(m) }
+func (*PacketSequence) ProtoMessage() {}
+func (*PacketSequence) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c4d4e081eaaab7c3, []int{1}
+}
+func (m *PacketSequence) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketSequence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketSequence.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketSequence) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketSequence.Merge(m, src)
+}
+func (m *PacketSequence) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketSequence) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketSequence.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketSequence proto.InternalMessageInfo
+
+func (m *PacketSequence) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *PacketSequence) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *PacketSequence) GetSequence() uint64 {
+ if m != nil {
+ return m.Sequence
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "ibcgo.core.channel.v1.GenesisState")
+ proto.RegisterType((*PacketSequence)(nil), "ibcgo.core.channel.v1.PacketSequence")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/channel/v1/genesis.proto", fileDescriptor_c4d4e081eaaab7c3)
+}
+
+var fileDescriptor_c4d4e081eaaab7c3 = []byte{
+ // 497 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcf, 0x6e, 0xd3, 0x30,
+ 0x1c, 0x80, 0x9b, 0xb5, 0xeb, 0x3a, 0x6f, 0xad, 0x98, 0x59, 0xa5, 0x50, 0x8d, 0xa4, 0x32, 0x42,
+ 0xaa, 0x84, 0x96, 0x30, 0xd8, 0x89, 0x63, 0x40, 0x82, 0x72, 0x42, 0x61, 0x27, 0x2e, 0x55, 0xea,
+ 0xfc, 0x48, 0xad, 0x36, 0x71, 0x89, 0xbd, 0xc2, 0x9e, 0x02, 0x1e, 0x6b, 0xc7, 0x1d, 0x39, 0x45,
+ 0xa8, 0x7d, 0x02, 0x7a, 0xe4, 0x84, 0xe2, 0xfc, 0x69, 0xab, 0x6d, 0x48, 0x15, 0xb7, 0xd8, 0xfe,
+ 0xfc, 0x7d, 0xbf, 0x43, 0x8c, 0x9e, 0xb0, 0x21, 0x0d, 0xb8, 0x4d, 0x79, 0x0c, 0x36, 0x1d, 0x79,
+ 0x51, 0x04, 0x13, 0x7b, 0x76, 0x66, 0x07, 0x10, 0x81, 0x60, 0xc2, 0x9a, 0xc6, 0x5c, 0x72, 0xdc,
+ 0x56, 0x90, 0x95, 0x42, 0x56, 0x0e, 0x59, 0xb3, 0xb3, 0xce, 0x71, 0xc0, 0x03, 0xae, 0x08, 0x3b,
+ 0xfd, 0xca, 0xe0, 0xce, 0x3d, 0xc6, 0xe2, 0x9e, 0x82, 0xc8, 0xef, 0x5d, 0x74, 0xf8, 0x36, 0x6b,
+ 0x7c, 0x94, 0x9e, 0x04, 0x3c, 0x40, 0x8d, 0x9c, 0x10, 0xba, 0xd6, 0xad, 0xf6, 0x0e, 0x5e, 0xf4,
+ 0xac, 0x3b, 0xab, 0x56, 0xdf, 0x87, 0x48, 0xb2, 0xcf, 0x0c, 0xfc, 0xd7, 0xd9, 0xa6, 0xf3, 0xe8,
+ 0x3a, 0x31, 0x2b, 0x7f, 0x12, 0xf3, 0xe8, 0xd6, 0x91, 0x5b, 0x4a, 0xf1, 0x05, 0x7a, 0xe0, 0xd1,
+ 0x71, 0xc4, 0xbf, 0x4e, 0xc0, 0x0f, 0x20, 0x84, 0x48, 0x0a, 0x7d, 0x47, 0x85, 0xc8, 0x3d, 0xa1,
+ 0x0f, 0x1e, 0x1d, 0x83, 0x54, 0xe3, 0x39, 0xb5, 0x34, 0xe1, 0xde, 0x32, 0xe0, 0xf7, 0xe8, 0x80,
+ 0xf2, 0x30, 0x64, 0x32, 0x13, 0x56, 0xb7, 0x14, 0xae, 0x5f, 0xc6, 0x6f, 0x50, 0x23, 0x06, 0x0a,
+ 0x6c, 0x2a, 0x85, 0x5e, 0xdb, 0x52, 0x54, 0xde, 0xc4, 0x63, 0xd4, 0x12, 0x10, 0xf9, 0x03, 0x01,
+ 0x5f, 0x2e, 0x21, 0xa2, 0x20, 0xf4, 0x5d, 0xe5, 0x7a, 0xfa, 0x6f, 0x57, 0x4e, 0x3b, 0x8f, 0x53,
+ 0xdd, 0x32, 0x31, 0xdb, 0x57, 0x5e, 0x38, 0x79, 0x45, 0x36, 0x55, 0xc4, 0x6d, 0xa6, 0x1b, 0x05,
+ 0xac, 0x62, 0x31, 0xd0, 0xd9, 0x5a, 0xac, 0xfe, 0x1f, 0xb1, 0x4d, 0x15, 0x71, 0x9b, 0xe9, 0xc6,
+ 0x2a, 0x36, 0x42, 0x4d, 0x8f, 0x8e, 0xd7, 0x5a, 0x7b, 0xdb, 0xb4, 0x4e, 0xf2, 0xd6, 0x71, 0xd6,
+ 0xda, 0x30, 0x11, 0xf7, 0xd0, 0xa3, 0xe3, 0x55, 0xe9, 0x02, 0xb5, 0x23, 0xf8, 0x26, 0x07, 0xb9,
+ 0xad, 0x04, 0xf5, 0x46, 0x57, 0xeb, 0xd5, 0x9c, 0xee, 0x32, 0x31, 0x4f, 0x32, 0xcd, 0x9d, 0x18,
+ 0x71, 0x1f, 0xa6, 0xfb, 0xf9, 0x3f, 0x58, 0x68, 0xc9, 0x77, 0x0d, 0xb5, 0x36, 0x87, 0xc2, 0xcf,
+ 0xd0, 0xde, 0x94, 0xc7, 0x72, 0xc0, 0x7c, 0x5d, 0xeb, 0x6a, 0xbd, 0x7d, 0x07, 0x2f, 0x13, 0xb3,
+ 0x95, 0xa9, 0xf3, 0x03, 0xe2, 0xd6, 0xd3, 0xaf, 0xbe, 0x8f, 0xcf, 0x11, 0x2a, 0x4a, 0xcc, 0xd7,
+ 0x77, 0x14, 0xdf, 0x5e, 0x26, 0xe6, 0x51, 0xc6, 0xaf, 0xce, 0x88, 0xbb, 0x9f, 0x2f, 0xfa, 0x3e,
+ 0xee, 0xa0, 0x46, 0x39, 0x7e, 0x35, 0x1d, 0xdf, 0x2d, 0xd7, 0xce, 0xbb, 0xeb, 0xb9, 0xa1, 0xdd,
+ 0xcc, 0x0d, 0xed, 0xd7, 0xdc, 0xd0, 0x7e, 0x2c, 0x8c, 0xca, 0xcd, 0xc2, 0xa8, 0xfc, 0x5c, 0x18,
+ 0x95, 0x4f, 0x56, 0xc0, 0xe4, 0xe8, 0x72, 0x68, 0x51, 0x1e, 0xda, 0x94, 0x8b, 0x90, 0x0b, 0x9b,
+ 0x0d, 0xe9, 0x69, 0xf1, 0xae, 0x9f, 0x9f, 0x9f, 0x16, 0x4f, 0x5b, 0x5e, 0x4d, 0x41, 0x0c, 0xeb,
+ 0xea, 0x59, 0xbf, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x02, 0xd2, 0xd3, 0x2f, 0x4f, 0x04, 0x00,
+ 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NextChannelSequence != 0 {
+ i = encodeVarintGenesis(dAtA, i, uint64(m.NextChannelSequence))
+ i--
+ dAtA[i] = 0x40
+ }
+ if len(m.AckSequences) > 0 {
+ for iNdEx := len(m.AckSequences) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AckSequences[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.RecvSequences) > 0 {
+ for iNdEx := len(m.RecvSequences) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.RecvSequences[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.SendSequences) > 0 {
+ for iNdEx := len(m.SendSequences) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.SendSequences[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.Receipts) > 0 {
+ for iNdEx := len(m.Receipts) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Receipts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Commitments) > 0 {
+ for iNdEx := len(m.Commitments) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Commitments[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Acknowledgements) > 0 {
+ for iNdEx := len(m.Acknowledgements) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Acknowledgements[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Channels) > 0 {
+ for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Channels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketSequence) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketSequence) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketSequence) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ i = encodeVarintGenesis(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Channels) > 0 {
+ for _, e := range m.Channels {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.Acknowledgements) > 0 {
+ for _, e := range m.Acknowledgements {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.Commitments) > 0 {
+ for _, e := range m.Commitments {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.Receipts) > 0 {
+ for _, e := range m.Receipts {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.SendSequences) > 0 {
+ for _, e := range m.SendSequences {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.RecvSequences) > 0 {
+ for _, e := range m.RecvSequences {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if len(m.AckSequences) > 0 {
+ for _, e := range m.AckSequences {
+ l = e.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ }
+ if m.NextChannelSequence != 0 {
+ n += 1 + sovGenesis(uint64(m.NextChannelSequence))
+ }
+ return n
+}
+
+func (m *PacketSequence) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovGenesis(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovGenesis(uint64(m.Sequence))
+ }
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Channels = append(m.Channels, IdentifiedChannel{})
+ if err := m.Channels[len(m.Channels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgements", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Acknowledgements = append(m.Acknowledgements, PacketState{})
+ if err := m.Acknowledgements[len(m.Acknowledgements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commitments", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commitments = append(m.Commitments, PacketState{})
+ if err := m.Commitments[len(m.Commitments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Receipts", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Receipts = append(m.Receipts, PacketState{})
+ if err := m.Receipts[len(m.Receipts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SendSequences", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SendSequences = append(m.SendSequences, PacketSequence{})
+ if err := m.SendSequences[len(m.SendSequences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecvSequences", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RecvSequences = append(m.RecvSequences, PacketSequence{})
+ if err := m.RecvSequences[len(m.RecvSequences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AckSequences", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AckSequences = append(m.AckSequences, PacketSequence{})
+ if err := m.AckSequences[len(m.AckSequences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextChannelSequence", wireType)
+ }
+ m.NextChannelSequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextChannelSequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketSequence) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketSequence: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketSequence: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/04-channel/types/genesis_test.go b/core/04-channel/types/genesis_test.go
new file mode 100644
index 00000000..a0d21007
--- /dev/null
+++ b/core/04-channel/types/genesis_test.go
@@ -0,0 +1,225 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+const (
+ testPort1 = "firstport"
+ testPort2 = "secondport"
+ testConnectionIDA = "connectionidatob"
+
+ testChannel1 = "channel-0"
+ testChannel2 = "channel-1"
+
+ testChannelOrder = types.ORDERED
+ testChannelVersion = "1.0"
+)
+
+func TestValidateGenesis(t *testing.T) {
+ counterparty1 := types.NewCounterparty(testPort1, testChannel1)
+ counterparty2 := types.NewCounterparty(testPort2, testChannel2)
+ testCases := []struct {
+ name string
+ genState types.GenesisState
+ expPass bool
+ }{
+ {
+ name: "default",
+ genState: types.DefaultGenesisState(),
+ expPass: true,
+ },
+ {
+ name: "valid genesis",
+ genState: types.NewGenesisState(
+ []types.IdentifiedChannel{
+ types.NewIdentifiedChannel(
+ testPort1, testChannel1, types.NewChannel(
+ types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ types.NewIdentifiedChannel(
+ testPort2, testChannel2, types.NewChannel(
+ types.INIT, testChannelOrder, counterparty1, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, []byte("ack")),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, []byte("")),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort1, testChannel1, 1, []byte("commit_hash")),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort1, testChannel1, 1),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort2, testChannel2, 1),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort2, testChannel2, 1),
+ },
+ 2,
+ ),
+ expPass: true,
+ },
+ {
+ name: "invalid channel",
+ genState: types.GenesisState{
+ Channels: []types.IdentifiedChannel{
+ types.NewIdentifiedChannel(
+ testPort1, "(testChannel1)", types.NewChannel(
+ types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid ack",
+ genState: types.GenesisState{
+ Acknowledgements: []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, nil),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid commitment",
+ genState: types.GenesisState{
+ Commitments: []types.PacketState{
+ types.NewPacketState(testPort1, testChannel1, 1, nil),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid send seq",
+ genState: types.GenesisState{
+ SendSequences: []types.PacketSequence{
+ types.NewPacketSequence(testPort1, testChannel1, 0),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid recv seq",
+ genState: types.GenesisState{
+ RecvSequences: []types.PacketSequence{
+ types.NewPacketSequence(testPort1, "(testChannel1)", 1),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid recv seq 2",
+ genState: types.GenesisState{
+ RecvSequences: []types.PacketSequence{
+ types.NewPacketSequence("(testPort1)", testChannel1, 1),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid ack seq",
+ genState: types.GenesisState{
+ AckSequences: []types.PacketSequence{
+ types.NewPacketSequence(testPort1, "(testChannel1)", 1),
+ },
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid channel identifier",
+ genState: types.NewGenesisState(
+ []types.IdentifiedChannel{
+ types.NewIdentifiedChannel(
+ testPort1, "chan-0", types.NewChannel(
+ types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ types.NewIdentifiedChannel(
+ testPort2, testChannel2, types.NewChannel(
+ types.INIT, testChannelOrder, counterparty1, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, []byte("ack")),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, []byte("")),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort1, testChannel1, 1, []byte("commit_hash")),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort1, testChannel1, 1),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort2, testChannel2, 1),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort2, testChannel2, 1),
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ {
+ name: "next channel sequence is less than maximum channel identifier sequence used",
+ genState: types.NewGenesisState(
+ []types.IdentifiedChannel{
+ types.NewIdentifiedChannel(
+ testPort1, "channel-10", types.NewChannel(
+ types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ types.NewIdentifiedChannel(
+ testPort2, testChannel2, types.NewChannel(
+ types.INIT, testChannelOrder, counterparty1, []string{testConnectionIDA}, testChannelVersion,
+ ),
+ ),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, []byte("ack")),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort2, testChannel2, 1, []byte("")),
+ },
+ []types.PacketState{
+ types.NewPacketState(testPort1, testChannel1, 1, []byte("commit_hash")),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort1, testChannel1, 1),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort2, testChannel2, 1),
+ },
+ []types.PacketSequence{
+ types.NewPacketSequence(testPort2, testChannel2, 1),
+ },
+ 0,
+ ),
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ err := tc.genState.Validate()
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/core/04-channel/types/keys.go b/core/04-channel/types/keys.go
new file mode 100644
index 00000000..d3a6cde2
--- /dev/null
+++ b/core/04-channel/types/keys.go
@@ -0,0 +1,61 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ // SubModuleName defines the IBC channels name
+ SubModuleName = "channel"
+
+ // StoreKey is the store key string for IBC channels
+ StoreKey = SubModuleName
+
+ // RouterKey is the message route for IBC channels
+ RouterKey = SubModuleName
+
+ // QuerierRoute is the querier route for IBC channels
+ QuerierRoute = SubModuleName
+
+ // KeyNextChannelSequence is the key used to store the next channel sequence in
+ // the keeper.
+ KeyNextChannelSequence = "nextChannelSequence"
+
+ // ChannelPrefix is the prefix used when creating a channel identifier
+ ChannelPrefix = "channel-"
+)
+
+// FormatChannelIdentifier returns the channel identifier with the sequence appended.
+// This is a SDK specific format not enforced by IBC protocol.
+func FormatChannelIdentifier(sequence uint64) string {
+ return fmt.Sprintf("%s%d", ChannelPrefix, sequence)
+}
+
+// IsChannelIDFormat checks if a channelID is in the format required on the SDK for
+// parsing channel identifiers. The channel identifier must be in the form: `channel-{N}
+var IsChannelIDFormat = regexp.MustCompile(`^channel-[0-9]{1,20}$`).MatchString
+
+// IsValidChannelID checks if a channelID is valid and can be parsed to the channel
+// identifier format.
+func IsValidChannelID(channelID string) bool {
+ _, err := ParseChannelSequence(channelID)
+ return err == nil
+}
+
+// ParseChannelSequence parses the channel sequence from the channel identifier.
+func ParseChannelSequence(channelID string) (uint64, error) {
+ if !IsChannelIDFormat(channelID) {
+ return 0, sdkerrors.Wrap(host.ErrInvalidID, "channel identifier is not in the format: `channel-{N}`")
+ }
+
+ sequence, err := host.ParseIdentifier(channelID, ChannelPrefix)
+ if err != nil {
+ return 0, sdkerrors.Wrap(err, "invalid channel identifier")
+ }
+
+ return sequence, nil
+}
diff --git a/core/04-channel/types/keys_test.go b/core/04-channel/types/keys_test.go
new file mode 100644
index 00000000..9bc6500b
--- /dev/null
+++ b/core/04-channel/types/keys_test.go
@@ -0,0 +1,47 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// tests ParseChannelSequence and IsValidChannelID
+func TestParseChannelSequence(t *testing.T) {
+ testCases := []struct {
+ name string
+ channelID string
+ expSeq uint64
+ expPass bool
+ }{
+ {"valid 0", "channel-0", 0, true},
+ {"valid 1", "channel-1", 1, true},
+ {"valid large sequence", "channel-234568219356718293", 234568219356718293, true},
+ // one above uint64 max
+ {"invalid uint64", "channel-18446744073709551616", 0, false},
+ // uint64 == 20 characters
+ {"invalid large sequence", "channel-2345682193567182931243", 0, false},
+ {"capital prefix", "Channel-0", 0, false},
+ {"missing dash", "channel0", 0, false},
+ {"blank id", " ", 0, false},
+ {"empty id", "", 0, false},
+ {"negative sequence", "channel--1", 0, false},
+ }
+
+ for _, tc := range testCases {
+
+ seq, err := types.ParseChannelSequence(tc.channelID)
+ valid := types.IsValidChannelID(tc.channelID)
+ require.Equal(t, tc.expSeq, seq)
+
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ require.True(t, valid)
+ } else {
+ require.Error(t, err, tc.name)
+ require.False(t, valid)
+ }
+ }
+}
diff --git a/core/04-channel/types/msgs.go b/core/04-channel/types/msgs.go
new file mode 100644
index 00000000..da14a310
--- /dev/null
+++ b/core/04-channel/types/msgs.go
@@ -0,0 +1,652 @@
+package types
+
+import (
+ "encoding/base64"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+var _ sdk.Msg = &MsgChannelOpenInit{}
+
+// NewMsgChannelOpenInit creates a new MsgChannelOpenInit. It sets the counterparty channel
+// identifier to be empty.
+// nolint:interfacer
+func NewMsgChannelOpenInit(
+ portID, version string, channelOrder Order, connectionHops []string,
+ counterpartyPortID string, signer sdk.AccAddress,
+) *MsgChannelOpenInit {
+ counterparty := NewCounterparty(counterpartyPortID, "")
+ channel := NewChannel(INIT, channelOrder, counterparty, connectionHops, version)
+ return &MsgChannelOpenInit{
+ PortId: portID,
+ Channel: channel,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgChannelOpenInit) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgChannelOpenInit) Type() string {
+ return "channel_open_init"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgChannelOpenInit) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ if msg.Channel.State != INIT {
+ return sdkerrors.Wrapf(ErrInvalidChannelState,
+ "channel state must be INIT in MsgChannelOpenInit. expected: %s, got: %s",
+ INIT, msg.Channel.State,
+ )
+ }
+ if msg.Channel.Counterparty.ChannelId != "" {
+ return sdkerrors.Wrap(ErrInvalidCounterparty, "counterparty channel identifier must be empty")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Channel.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgChannelOpenInit) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgChannelOpenInit) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+var _ sdk.Msg = &MsgChannelOpenTry{}
+
+// NewMsgChannelOpenTry creates a new MsgChannelOpenTry instance
+// nolint:interfacer
+func NewMsgChannelOpenTry(
+ portID, previousChannelID, version string, channelOrder Order, connectionHops []string,
+ counterpartyPortID, counterpartyChannelID, counterpartyVersion string,
+ proofInit []byte, proofHeight clienttypes.Height, signer sdk.AccAddress,
+) *MsgChannelOpenTry {
+ counterparty := NewCounterparty(counterpartyPortID, counterpartyChannelID)
+ channel := NewChannel(TRYOPEN, channelOrder, counterparty, connectionHops, version)
+ return &MsgChannelOpenTry{
+ PortId: portID,
+ PreviousChannelId: previousChannelID,
+ Channel: channel,
+ CounterpartyVersion: counterpartyVersion,
+ ProofInit: proofInit,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgChannelOpenTry) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgChannelOpenTry) Type() string {
+ return "channel_open_try"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgChannelOpenTry) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ if msg.PreviousChannelId != "" {
+ if !IsValidChannelID(msg.PreviousChannelId) {
+ return sdkerrors.Wrap(ErrInvalidChannelIdentifier, "invalid previous channel ID")
+ }
+ }
+ if len(msg.ProofInit) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof init")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ if msg.Channel.State != TRYOPEN {
+ return sdkerrors.Wrapf(ErrInvalidChannelState,
+ "channel state must be TRYOPEN in MsgChannelOpenTry. expected: %s, got: %s",
+ TRYOPEN, msg.Channel.State,
+ )
+ }
+ // counterparty validate basic allows empty counterparty channel identifiers
+ if err := host.ChannelIdentifierValidator(msg.Channel.Counterparty.ChannelId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty channel ID")
+ }
+
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Channel.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgChannelOpenTry) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgChannelOpenTry) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+var _ sdk.Msg = &MsgChannelOpenAck{}
+
+// NewMsgChannelOpenAck creates a new MsgChannelOpenAck instance
+// nolint:interfacer
+func NewMsgChannelOpenAck(
+ portID, channelID, counterpartyChannelID string, cpv string, proofTry []byte, proofHeight clienttypes.Height,
+ signer sdk.AccAddress,
+) *MsgChannelOpenAck {
+ return &MsgChannelOpenAck{
+ PortId: portID,
+ ChannelId: channelID,
+ CounterpartyChannelId: counterpartyChannelID,
+ CounterpartyVersion: cpv,
+ ProofTry: proofTry,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgChannelOpenAck) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgChannelOpenAck) Type() string {
+ return "channel_open_ack"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgChannelOpenAck) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ if !IsValidChannelID(msg.ChannelId) {
+ return ErrInvalidChannelIdentifier
+ }
+ if err := host.ChannelIdentifierValidator(msg.CounterpartyChannelId); err != nil {
+ return sdkerrors.Wrap(err, "invalid counterparty channel ID")
+ }
+ if len(msg.ProofTry) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof try")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return nil
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgChannelOpenAck) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgChannelOpenAck) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+var _ sdk.Msg = &MsgChannelOpenConfirm{}
+
+// NewMsgChannelOpenConfirm creates a new MsgChannelOpenConfirm instance
+// nolint:interfacer
+func NewMsgChannelOpenConfirm(
+ portID, channelID string, proofAck []byte, proofHeight clienttypes.Height,
+ signer sdk.AccAddress,
+) *MsgChannelOpenConfirm {
+ return &MsgChannelOpenConfirm{
+ PortId: portID,
+ ChannelId: channelID,
+ ProofAck: proofAck,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgChannelOpenConfirm) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgChannelOpenConfirm) Type() string {
+ return "channel_open_confirm"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgChannelOpenConfirm) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ if !IsValidChannelID(msg.ChannelId) {
+ return ErrInvalidChannelIdentifier
+ }
+ if len(msg.ProofAck) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof ack")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return nil
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgChannelOpenConfirm) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgChannelOpenConfirm) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+var _ sdk.Msg = &MsgChannelCloseInit{}
+
+// NewMsgChannelCloseInit creates a new MsgChannelCloseInit instance
+// nolint:interfacer
+func NewMsgChannelCloseInit(
+ portID string, channelID string, signer sdk.AccAddress,
+) *MsgChannelCloseInit {
+ return &MsgChannelCloseInit{
+ PortId: portID,
+ ChannelId: channelID,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgChannelCloseInit) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgChannelCloseInit) Type() string {
+ return "channel_close_init"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgChannelCloseInit) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ if !IsValidChannelID(msg.ChannelId) {
+ return ErrInvalidChannelIdentifier
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return nil
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgChannelCloseInit) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgChannelCloseInit) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+var _ sdk.Msg = &MsgChannelCloseConfirm{}
+
+// NewMsgChannelCloseConfirm creates a new MsgChannelCloseConfirm instance
+// nolint:interfacer
+func NewMsgChannelCloseConfirm(
+ portID, channelID string, proofInit []byte, proofHeight clienttypes.Height,
+ signer sdk.AccAddress,
+) *MsgChannelCloseConfirm {
+ return &MsgChannelCloseConfirm{
+ PortId: portID,
+ ChannelId: channelID,
+ ProofInit: proofInit,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgChannelCloseConfirm) Route() string {
+ return host.RouterKey
+}
+
+// Type implements sdk.Msg
+func (msg MsgChannelCloseConfirm) Type() string {
+ return "channel_close_confirm"
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgChannelCloseConfirm) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(msg.PortId); err != nil {
+ return sdkerrors.Wrap(err, "invalid port ID")
+ }
+ if !IsValidChannelID(msg.ChannelId) {
+ return ErrInvalidChannelIdentifier
+ }
+ if len(msg.ProofInit) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof init")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return nil
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgChannelCloseConfirm) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgChannelCloseConfirm) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+var _ sdk.Msg = &MsgRecvPacket{}
+
+// NewMsgRecvPacket constructs new MsgRecvPacket
+// nolint:interfacer
+func NewMsgRecvPacket(
+ packet Packet, proofCommitment []byte, proofHeight clienttypes.Height,
+ signer sdk.AccAddress,
+) *MsgRecvPacket {
+ return &MsgRecvPacket{
+ Packet: packet,
+ ProofCommitment: proofCommitment,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgRecvPacket) Route() string {
+ return host.RouterKey
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgRecvPacket) ValidateBasic() error {
+ if len(msg.ProofCommitment) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Packet.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgRecvPacket) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetDataSignBytes returns the base64-encoded bytes used for the
+// data field when signing the packet.
+func (msg MsgRecvPacket) GetDataSignBytes() []byte {
+ s := "\"" + base64.StdEncoding.EncodeToString(msg.Packet.Data) + "\""
+ return []byte(s)
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgRecvPacket) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+// Type implements sdk.Msg
+func (msg MsgRecvPacket) Type() string {
+ return "recv_packet"
+}
+
+var _ sdk.Msg = &MsgTimeout{}
+
+// NewMsgTimeout constructs new MsgTimeout
+// nolint:interfacer
+func NewMsgTimeout(
+ packet Packet, nextSequenceRecv uint64, proofUnreceived []byte,
+ proofHeight clienttypes.Height, signer sdk.AccAddress,
+) *MsgTimeout {
+ return &MsgTimeout{
+ Packet: packet,
+ NextSequenceRecv: nextSequenceRecv,
+ ProofUnreceived: proofUnreceived,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgTimeout) Route() string {
+ return host.RouterKey
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgTimeout) ValidateBasic() error {
+ if len(msg.ProofUnreceived) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty unreceived proof")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ if msg.NextSequenceRecv == 0 {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "next sequence receive cannot be 0")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Packet.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgTimeout) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgTimeout) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+// Type implements sdk.Msg
+func (msg MsgTimeout) Type() string {
+ return "timeout_packet"
+}
+
+// NewMsgTimeoutOnClose constructs new MsgTimeoutOnClose
+// nolint:interfacer
+func NewMsgTimeoutOnClose(
+ packet Packet, nextSequenceRecv uint64,
+ proofUnreceived, proofClose []byte,
+ proofHeight clienttypes.Height, signer sdk.AccAddress,
+) *MsgTimeoutOnClose {
+ return &MsgTimeoutOnClose{
+ Packet: packet,
+ NextSequenceRecv: nextSequenceRecv,
+ ProofUnreceived: proofUnreceived,
+ ProofClose: proofClose,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgTimeoutOnClose) Route() string {
+ return host.RouterKey
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgTimeoutOnClose) ValidateBasic() error {
+ if msg.NextSequenceRecv == 0 {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "next sequence receive cannot be 0")
+ }
+ if len(msg.ProofUnreceived) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof")
+ }
+ if len(msg.ProofClose) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of closed counterparty channel end")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Packet.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgTimeoutOnClose) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgTimeoutOnClose) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+// Type implements sdk.Msg
+func (msg MsgTimeoutOnClose) Type() string {
+ return "timeout_on_close_packet"
+}
+
+var _ sdk.Msg = &MsgAcknowledgement{}
+
+// NewMsgAcknowledgement constructs a new MsgAcknowledgement
+// nolint:interfacer
+func NewMsgAcknowledgement(
+ packet Packet,
+ ack, proofAcked []byte,
+ proofHeight clienttypes.Height,
+ signer sdk.AccAddress,
+) *MsgAcknowledgement {
+ return &MsgAcknowledgement{
+ Packet: packet,
+ Acknowledgement: ack,
+ ProofAcked: proofAcked,
+ ProofHeight: proofHeight,
+ Signer: signer.String(),
+ }
+}
+
+// Route implements sdk.Msg
+func (msg MsgAcknowledgement) Route() string {
+ return host.RouterKey
+}
+
+// ValidateBasic implements sdk.Msg
+func (msg MsgAcknowledgement) ValidateBasic() error {
+ if len(msg.ProofAcked) == 0 {
+ return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof")
+ }
+ if msg.ProofHeight.IsZero() {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero")
+ }
+ if len(msg.Acknowledgement) == 0 {
+ return sdkerrors.Wrap(ErrInvalidAcknowledgement, "ack bytes cannot be empty")
+ }
+ _, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err)
+ }
+ return msg.Packet.ValidateBasic()
+}
+
+// GetSignBytes implements sdk.Msg. The function will panic since it is used
+// for amino transaction verification which IBC does not support.
+func (msg MsgAcknowledgement) GetSignBytes() []byte {
+ panic("IBC messages do not support amino")
+}
+
+// GetSigners implements sdk.Msg
+func (msg MsgAcknowledgement) GetSigners() []sdk.AccAddress {
+ signer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ panic(err)
+ }
+ return []sdk.AccAddress{signer}
+}
+
+// Type implements sdk.Msg
+func (msg MsgAcknowledgement) Type() string {
+ return "acknowledge_packet"
+}
diff --git a/core/04-channel/types/msgs_test.go b/core/04-channel/types/msgs_test.go
new file mode 100644
index 00000000..9c27fd69
--- /dev/null
+++ b/core/04-channel/types/msgs_test.go
@@ -0,0 +1,446 @@
+package types_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/store/iavl"
+ "github.com/cosmos/cosmos-sdk/store/rootmulti"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+const (
+ // valid constatns used for testing
+ portid = "testportid"
+ chanid = "channel-0"
+ cpportid = "testcpport"
+ cpchanid = "testcpchannel"
+
+ version = "1.0"
+
+ // invalid constants used for testing
+ invalidPort = "(invalidport1)"
+ invalidShortPort = "p"
+ invalidLongPort = "invalidlongportinvalidlongportinvalidlongportidinvalidlongportidinvalid"
+
+ invalidChannel = "(invalidchannel1)"
+ invalidShortChannel = "invalid"
+ invalidLongChannel = "invalidlongchannelinvalidlongchannelinvalidlongchannelinvalidlongchannel"
+
+ invalidConnection = "(invalidconnection1)"
+ invalidShortConnection = "invalidcn"
+ invalidLongConnection = "invalidlongconnectioninvalidlongconnectioninvalidlongconnectioninvalid"
+)
+
+// define variables used for testing
+var (
+ height = clienttypes.NewHeight(0, 1)
+ timeoutHeight = clienttypes.NewHeight(0, 100)
+ timeoutTimestamp = uint64(100)
+ disabledTimeout = clienttypes.ZeroHeight()
+ validPacketData = []byte("testdata")
+ unknownPacketData = []byte("unknown")
+
+ packet = types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp)
+ invalidPacket = types.NewPacket(unknownPacketData, 0, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp)
+
+ emptyProof = []byte{}
+ invalidProofs1 = exported.Proof(nil)
+ invalidProofs2 = emptyProof
+
+ addr = sdk.AccAddress("testaddr111111111111")
+ emptyAddr sdk.AccAddress
+
+ connHops = []string{"testconnection"}
+ invalidConnHops = []string{"testconnection", "testconnection"}
+ invalidShortConnHops = []string{invalidShortConnection}
+ invalidLongConnHops = []string{invalidLongConnection}
+)
+
+type TypesTestSuite struct {
+ suite.Suite
+
+ proof []byte
+}
+
+func (suite *TypesTestSuite) SetupTest() {
+ app := simapp.Setup(false)
+ db := dbm.NewMemDB()
+ store := rootmulti.NewStore(db)
+ storeKey := storetypes.NewKVStoreKey("iavlStoreKey")
+
+ store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil)
+ store.LoadVersion(0)
+ iavlStore := store.GetCommitStore(storeKey).(*iavl.Store)
+
+ iavlStore.Set([]byte("KEY"), []byte("VALUE"))
+ _ = store.Commit()
+
+ res := store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("KEY"),
+ Prove: true,
+ })
+
+ merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
+ suite.Require().NoError(err)
+ proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof)
+ suite.Require().NoError(err)
+
+ suite.proof = proof
+}
+
+func TestTypesTestSuite(t *testing.T) {
+ suite.Run(t, new(TypesTestSuite))
+}
+
+func (suite *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() {
+ counterparty := types.NewCounterparty(cpportid, cpchanid)
+ tryOpenChannel := types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, connHops, version)
+
+ testCases := []struct {
+ name string
+ msg *types.MsgChannelOpenInit
+ expPass bool
+ }{
+ {"", types.NewMsgChannelOpenInit(portid, version, types.ORDERED, connHops, cpportid, addr), true},
+ {"too short port id", types.NewMsgChannelOpenInit(invalidShortPort, version, types.ORDERED, connHops, cpportid, addr), false},
+ {"too long port id", types.NewMsgChannelOpenInit(invalidLongPort, version, types.ORDERED, connHops, cpportid, addr), false},
+ {"port id contains non-alpha", types.NewMsgChannelOpenInit(invalidPort, version, types.ORDERED, connHops, cpportid, addr), false},
+ {"invalid channel order", types.NewMsgChannelOpenInit(portid, version, types.Order(3), connHops, cpportid, addr), false},
+ {"connection hops more than 1 ", types.NewMsgChannelOpenInit(portid, version, types.ORDERED, invalidConnHops, cpportid, addr), false},
+ {"too short connection id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, invalidShortConnHops, cpportid, addr), false},
+ {"too long connection id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, invalidLongConnHops, cpportid, addr), false},
+ {"connection id contains non-alpha", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, []string{invalidConnection}, cpportid, addr), false},
+ {"", types.NewMsgChannelOpenInit(portid, "", types.UNORDERED, connHops, cpportid, addr), true},
+ {"invalid counterparty port id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, connHops, invalidPort, addr), false},
+ {"channel not in INIT state", &types.MsgChannelOpenInit{portid, tryOpenChannel, addr.String()}, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() {
+ counterparty := types.NewCounterparty(cpportid, cpchanid)
+ initChannel := types.NewChannel(types.INIT, types.ORDERED, counterparty, connHops, version)
+
+ testCases := []struct {
+ name string
+ msg *types.MsgChannelOpenTry
+ expPass bool
+ }{
+ {"", types.NewMsgChannelOpenTry(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true},
+ {"too short port id", types.NewMsgChannelOpenTry(invalidShortPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"too long port id", types.NewMsgChannelOpenTry(invalidLongPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"port id contains non-alpha", types.NewMsgChannelOpenTry(invalidPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"too short channel id", types.NewMsgChannelOpenTry(portid, invalidShortChannel, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"too long channel id", types.NewMsgChannelOpenTry(portid, invalidLongChannel, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"channel id contains non-alpha", types.NewMsgChannelOpenTry(portid, invalidChannel, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"", types.NewMsgChannelOpenTry(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, "", suite.proof, height, addr), true},
+ {"proof height is zero", types.NewMsgChannelOpenTry(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ {"invalid channel order", types.NewMsgChannelOpenTry(portid, chanid, version, types.Order(4), connHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"connection hops more than 1 ", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, invalidConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"too short connection id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, invalidShortConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"too long connection id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, invalidLongConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"connection id contains non-alpha", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, []string{invalidConnection}, cpportid, cpchanid, version, suite.proof, height, addr), false},
+ {"", types.NewMsgChannelOpenTry(portid, chanid, "", types.UNORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true},
+ {"invalid counterparty port id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, invalidPort, cpchanid, version, suite.proof, height, addr), false},
+ {"invalid counterparty channel id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, cpportid, invalidChannel, version, suite.proof, height, addr), false},
+ {"empty proof", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, cpportid, cpchanid, version, emptyProof, height, addr), false},
+ {"channel not in TRYOPEN state", &types.MsgChannelOpenTry{portid, chanid, initChannel, version, suite.proof, height, addr.String()}, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgChannelOpenAck
+ expPass bool
+ }{
+ {"", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, height, addr), true},
+ {"too short port id", types.NewMsgChannelOpenAck(invalidShortPort, chanid, chanid, version, suite.proof, height, addr), false},
+ {"too long port id", types.NewMsgChannelOpenAck(invalidLongPort, chanid, chanid, version, suite.proof, height, addr), false},
+ {"port id contains non-alpha", types.NewMsgChannelOpenAck(invalidPort, chanid, chanid, version, suite.proof, height, addr), false},
+ {"too short channel id", types.NewMsgChannelOpenAck(portid, invalidShortChannel, chanid, version, suite.proof, height, addr), false},
+ {"too long channel id", types.NewMsgChannelOpenAck(portid, invalidLongChannel, chanid, version, suite.proof, height, addr), false},
+ {"channel id contains non-alpha", types.NewMsgChannelOpenAck(portid, invalidChannel, chanid, version, suite.proof, height, addr), false},
+ {"", types.NewMsgChannelOpenAck(portid, chanid, chanid, "", suite.proof, height, addr), true},
+ {"empty proof", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, emptyProof, height, addr), false},
+ {"proof height is zero", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ {"invalid counterparty channel id", types.NewMsgChannelOpenAck(portid, chanid, invalidShortChannel, version, suite.proof, height, addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgChannelOpenConfirm
+ expPass bool
+ }{
+ {"", types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, height, addr), true},
+ {"too short port id", types.NewMsgChannelOpenConfirm(invalidShortPort, chanid, suite.proof, height, addr), false},
+ {"too long port id", types.NewMsgChannelOpenConfirm(invalidLongPort, chanid, suite.proof, height, addr), false},
+ {"port id contains non-alpha", types.NewMsgChannelOpenConfirm(invalidPort, chanid, suite.proof, height, addr), false},
+ {"too short channel id", types.NewMsgChannelOpenConfirm(portid, invalidShortChannel, suite.proof, height, addr), false},
+ {"too long channel id", types.NewMsgChannelOpenConfirm(portid, invalidLongChannel, suite.proof, height, addr), false},
+ {"channel id contains non-alpha", types.NewMsgChannelOpenConfirm(portid, invalidChannel, suite.proof, height, addr), false},
+ {"empty proof", types.NewMsgChannelOpenConfirm(portid, chanid, emptyProof, height, addr), false},
+ {"proof height is zero", types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgChannelCloseInitValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgChannelCloseInit
+ expPass bool
+ }{
+ {"", types.NewMsgChannelCloseInit(portid, chanid, addr), true},
+ {"too short port id", types.NewMsgChannelCloseInit(invalidShortPort, chanid, addr), false},
+ {"too long port id", types.NewMsgChannelCloseInit(invalidLongPort, chanid, addr), false},
+ {"port id contains non-alpha", types.NewMsgChannelCloseInit(invalidPort, chanid, addr), false},
+ {"too short channel id", types.NewMsgChannelCloseInit(portid, invalidShortChannel, addr), false},
+ {"too long channel id", types.NewMsgChannelCloseInit(portid, invalidLongChannel, addr), false},
+ {"channel id contains non-alpha", types.NewMsgChannelCloseInit(portid, invalidChannel, addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgChannelCloseConfirm
+ expPass bool
+ }{
+ {"", types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, height, addr), true},
+ {"too short port id", types.NewMsgChannelCloseConfirm(invalidShortPort, chanid, suite.proof, height, addr), false},
+ {"too long port id", types.NewMsgChannelCloseConfirm(invalidLongPort, chanid, suite.proof, height, addr), false},
+ {"port id contains non-alpha", types.NewMsgChannelCloseConfirm(invalidPort, chanid, suite.proof, height, addr), false},
+ {"too short channel id", types.NewMsgChannelCloseConfirm(portid, invalidShortChannel, suite.proof, height, addr), false},
+ {"too long channel id", types.NewMsgChannelCloseConfirm(portid, invalidLongChannel, suite.proof, height, addr), false},
+ {"channel id contains non-alpha", types.NewMsgChannelCloseConfirm(portid, invalidChannel, suite.proof, height, addr), false},
+ {"empty proof", types.NewMsgChannelCloseConfirm(portid, chanid, emptyProof, height, addr), false},
+ {"proof height is zero", types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgRecvPacketType() {
+ msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr)
+
+ suite.Equal("recv_packet", msg.Type())
+}
+
+func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgRecvPacket
+ expPass bool
+ }{
+ {"success", types.NewMsgRecvPacket(packet, suite.proof, height, addr), true},
+ {"proof height is zero", types.NewMsgRecvPacket(packet, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ {"proof contain empty proof", types.NewMsgRecvPacket(packet, emptyProof, height, addr), false},
+ {"missing signer address", types.NewMsgRecvPacket(packet, suite.proof, height, emptyAddr), false},
+ {"invalid packet", types.NewMsgRecvPacket(invalidPacket, suite.proof, height, addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.NoError(err)
+ } else {
+ suite.Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgRecvPacketGetSigners() {
+ msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr)
+ res := msg.GetSigners()
+
+ expected := "[7465737461646472313131313131313131313131]"
+ suite.Equal(expected, fmt.Sprintf("%v", res))
+}
+
+func (suite *TypesTestSuite) TestMsgTimeoutValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgTimeout
+ expPass bool
+ }{
+ {"success", types.NewMsgTimeout(packet, 1, suite.proof, height, addr), true},
+ {"proof height must be > 0", types.NewMsgTimeout(packet, 1, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ {"seq 0", types.NewMsgTimeout(packet, 0, suite.proof, height, addr), false},
+ {"missing signer address", types.NewMsgTimeout(packet, 1, suite.proof, height, emptyAddr), false},
+ {"cannot submit an empty proof", types.NewMsgTimeout(packet, 1, emptyProof, height, addr), false},
+ {"invalid packet", types.NewMsgTimeout(invalidPacket, 1, suite.proof, height, addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgTimeoutOnCloseValidateBasic() {
+ testCases := []struct {
+ name string
+ msg sdk.Msg
+ expPass bool
+ }{
+ {"success", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, addr), true},
+ {"seq 0", types.NewMsgTimeoutOnClose(packet, 0, suite.proof, suite.proof, height, addr), false},
+ {"empty proof", types.NewMsgTimeoutOnClose(packet, 1, emptyProof, suite.proof, height, addr), false},
+ {"empty proof close", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, emptyProof, height, addr), false},
+ {"proof height is zero", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, clienttypes.ZeroHeight(), addr), false},
+ {"signer address is empty", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, emptyAddr), false},
+ {"invalid packet", types.NewMsgTimeoutOnClose(invalidPacket, 1, suite.proof, suite.proof, height, addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *TypesTestSuite) TestMsgAcknowledgementValidateBasic() {
+ testCases := []struct {
+ name string
+ msg *types.MsgAcknowledgement
+ expPass bool
+ }{
+ {"success", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, addr), true},
+ {"proof height must be > 0", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, clienttypes.ZeroHeight(), addr), false},
+ {"empty ack", types.NewMsgAcknowledgement(packet, nil, suite.proof, height, addr), false},
+ {"missing signer address", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, emptyAddr), false},
+ {"cannot submit an empty proof", types.NewMsgAcknowledgement(packet, packet.GetData(), emptyProof, height, addr), false},
+ {"invalid packet", types.NewMsgAcknowledgement(invalidPacket, packet.GetData(), suite.proof, height, addr), false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.msg.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/core/04-channel/types/packet.go b/core/04-channel/types/packet.go
new file mode 100644
index 00000000..b5c8d180
--- /dev/null
+++ b/core/04-channel/types/packet.go
@@ -0,0 +1,112 @@
+package types
+
+import (
+ "crypto/sha256"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CommitPacket returns the packet commitment bytes. The commitment consists of:
+// sha256_hash(timeout_timestamp + timeout_height.RevisionNumber + timeout_height.RevisionHeight + sha256_hash(data))
+// from a given packet. This results in a fixed length preimage.
+// NOTE: sdk.Uint64ToBigEndian sets the uint64 to a slice of length 8.
+func CommitPacket(cdc codec.BinaryMarshaler, packet exported.PacketI) []byte {
+ timeoutHeight := packet.GetTimeoutHeight()
+
+ buf := sdk.Uint64ToBigEndian(packet.GetTimeoutTimestamp())
+
+ revisionNumber := sdk.Uint64ToBigEndian(timeoutHeight.GetRevisionNumber())
+ buf = append(buf, revisionNumber...)
+
+ revisionHeight := sdk.Uint64ToBigEndian(timeoutHeight.GetRevisionHeight())
+ buf = append(buf, revisionHeight...)
+
+ dataHash := sha256.Sum256(packet.GetData())
+ buf = append(buf, dataHash[:]...)
+
+ hash := sha256.Sum256(buf)
+ return hash[:]
+}
+
+// CommitAcknowledgement returns the hash of commitment bytes
+func CommitAcknowledgement(data []byte) []byte {
+ hash := sha256.Sum256(data)
+ return hash[:]
+}
+
+var _ exported.PacketI = (*Packet)(nil)
+
+// NewPacket creates a new Packet instance. It panics if the provided
+// packet data interface is not registered.
+func NewPacket(
+ data []byte,
+ sequence uint64, sourcePort, sourceChannel,
+ destinationPort, destinationChannel string,
+ timeoutHeight clienttypes.Height, timeoutTimestamp uint64,
+) Packet {
+ return Packet{
+ Data: data,
+ Sequence: sequence,
+ SourcePort: sourcePort,
+ SourceChannel: sourceChannel,
+ DestinationPort: destinationPort,
+ DestinationChannel: destinationChannel,
+ TimeoutHeight: timeoutHeight,
+ TimeoutTimestamp: timeoutTimestamp,
+ }
+}
+
+// GetSequence implements PacketI interface
+func (p Packet) GetSequence() uint64 { return p.Sequence }
+
+// GetSourcePort implements PacketI interface
+func (p Packet) GetSourcePort() string { return p.SourcePort }
+
+// GetSourceChannel implements PacketI interface
+func (p Packet) GetSourceChannel() string { return p.SourceChannel }
+
+// GetDestPort implements PacketI interface
+func (p Packet) GetDestPort() string { return p.DestinationPort }
+
+// GetDestChannel implements PacketI interface
+func (p Packet) GetDestChannel() string { return p.DestinationChannel }
+
+// GetData implements PacketI interface
+func (p Packet) GetData() []byte { return p.Data }
+
+// GetTimeoutHeight implements PacketI interface
+func (p Packet) GetTimeoutHeight() exported.Height { return p.TimeoutHeight }
+
+// GetTimeoutTimestamp implements PacketI interface
+func (p Packet) GetTimeoutTimestamp() uint64 { return p.TimeoutTimestamp }
+
+// ValidateBasic implements PacketI interface
+func (p Packet) ValidateBasic() error {
+ if err := host.PortIdentifierValidator(p.SourcePort); err != nil {
+ return sdkerrors.Wrap(err, "invalid source port ID")
+ }
+ if err := host.PortIdentifierValidator(p.DestinationPort); err != nil {
+ return sdkerrors.Wrap(err, "invalid destination port ID")
+ }
+ if err := host.ChannelIdentifierValidator(p.SourceChannel); err != nil {
+ return sdkerrors.Wrap(err, "invalid source channel ID")
+ }
+ if err := host.ChannelIdentifierValidator(p.DestinationChannel); err != nil {
+ return sdkerrors.Wrap(err, "invalid destination channel ID")
+ }
+ if p.Sequence == 0 {
+ return sdkerrors.Wrap(ErrInvalidPacket, "packet sequence cannot be 0")
+ }
+ if p.TimeoutHeight.IsZero() && p.TimeoutTimestamp == 0 {
+ return sdkerrors.Wrap(ErrInvalidPacket, "packet timeout height and packet timeout timestamp cannot both be 0")
+ }
+ if len(p.Data) == 0 {
+ return sdkerrors.Wrap(ErrInvalidPacket, "packet data bytes cannot be empty")
+ }
+ return nil
+}
diff --git a/core/04-channel/types/packet_test.go b/core/04-channel/types/packet_test.go
new file mode 100644
index 00000000..12ed828e
--- /dev/null
+++ b/core/04-channel/types/packet_test.go
@@ -0,0 +1,53 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+func TestCommitPacket(t *testing.T) {
+ packet := types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp)
+
+ registry := codectypes.NewInterfaceRegistry()
+ clienttypes.RegisterInterfaces(registry)
+ types.RegisterInterfaces(registry)
+
+ cdc := codec.NewProtoCodec(registry)
+
+ commitment := types.CommitPacket(cdc, &packet)
+ require.NotNil(t, commitment)
+}
+
+func TestPacketValidateBasic(t *testing.T) {
+ testCases := []struct {
+ packet types.Packet
+ expPass bool
+ errMsg string
+ }{
+ {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), true, ""},
+ {types.NewPacket(validPacketData, 0, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid sequence"},
+ {types.NewPacket(validPacketData, 1, invalidPort, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid source port"},
+ {types.NewPacket(validPacketData, 1, portid, invalidChannel, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid source channel"},
+ {types.NewPacket(validPacketData, 1, portid, chanid, invalidPort, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid destination port"},
+ {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, invalidChannel, timeoutHeight, timeoutTimestamp), false, "invalid destination channel"},
+ {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, disabledTimeout, 0), false, "disabled both timeout height and timestamp"},
+ {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, disabledTimeout, timeoutTimestamp), true, "disabled timeout height, valid timeout timestamp"},
+ {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, 0), true, "disabled timeout timestamp, valid timeout height"},
+ {types.NewPacket(unknownPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), true, ""},
+ }
+
+ for i, tc := range testCases {
+ err := tc.packet.ValidateBasic()
+ if tc.expPass {
+ require.NoError(t, err, "Msg %d failed: %s", i, tc.errMsg)
+ } else {
+ require.Error(t, err, "Invalid Msg %d passed: %s", i, tc.errMsg)
+ }
+ }
+}
diff --git a/core/04-channel/types/query.go b/core/04-channel/types/query.go
new file mode 100644
index 00000000..d1536dfc
--- /dev/null
+++ b/core/04-channel/types/query.go
@@ -0,0 +1,94 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ _ codectypes.UnpackInterfacesMessage = QueryChannelClientStateResponse{}
+ _ codectypes.UnpackInterfacesMessage = QueryChannelConsensusStateResponse{}
+)
+
+// NewQueryChannelResponse creates a new QueryChannelResponse instance
+func NewQueryChannelResponse(channel Channel, proof []byte, height clienttypes.Height) *QueryChannelResponse {
+ return &QueryChannelResponse{
+ Channel: &channel,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// NewQueryChannelClientStateResponse creates a newQueryChannelClientStateResponse instance
+func NewQueryChannelClientStateResponse(identifiedClientState clienttypes.IdentifiedClientState, proof []byte, height clienttypes.Height) *QueryChannelClientStateResponse {
+ return &QueryChannelClientStateResponse{
+ IdentifiedClientState: &identifiedClientState,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qccsr QueryChannelClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return qccsr.IdentifiedClientState.UnpackInterfaces(unpacker)
+}
+
+// NewQueryChannelConsensusStateResponse creates a newQueryChannelConsensusStateResponse instance
+func NewQueryChannelConsensusStateResponse(clientID string, anyConsensusState *codectypes.Any, consensusStateHeight exported.Height, proof []byte, height clienttypes.Height) *QueryChannelConsensusStateResponse {
+ return &QueryChannelConsensusStateResponse{
+ ConsensusState: anyConsensusState,
+ ClientId: clientID,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces
+func (qccsr QueryChannelConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(qccsr.ConsensusState, new(exported.ConsensusState))
+}
+
+// NewQueryPacketCommitmentResponse creates a new QueryPacketCommitmentResponse instance
+func NewQueryPacketCommitmentResponse(
+ commitment []byte, proof []byte, height clienttypes.Height,
+) *QueryPacketCommitmentResponse {
+ return &QueryPacketCommitmentResponse{
+ Commitment: commitment,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// NewQueryPacketReceiptResponse creates a new QueryPacketReceiptResponse instance
+func NewQueryPacketReceiptResponse(
+ recvd bool, proof []byte, height clienttypes.Height,
+) *QueryPacketReceiptResponse {
+ return &QueryPacketReceiptResponse{
+ Received: recvd,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// NewQueryPacketAcknowledgementResponse creates a new QueryPacketAcknowledgementResponse instance
+func NewQueryPacketAcknowledgementResponse(
+ acknowledgement []byte, proof []byte, height clienttypes.Height,
+) *QueryPacketAcknowledgementResponse {
+ return &QueryPacketAcknowledgementResponse{
+ Acknowledgement: acknowledgement,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
+
+// NewQueryNextSequenceReceiveResponse creates a new QueryNextSequenceReceiveResponse instance
+func NewQueryNextSequenceReceiveResponse(
+ sequence uint64, proof []byte, height clienttypes.Height,
+) *QueryNextSequenceReceiveResponse {
+ return &QueryNextSequenceReceiveResponse{
+ NextSequenceReceive: sequence,
+ Proof: proof,
+ ProofHeight: height,
+ }
+}
diff --git a/core/04-channel/types/query.pb.go b/core/04-channel/types/query.pb.go
new file mode 100644
index 00000000..7330eaf2
--- /dev/null
+++ b/core/04-channel/types/query.pb.go
@@ -0,0 +1,7993 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/channel/v1/query.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types1 "github.com/cosmos/cosmos-sdk/codec/types"
+ query "github.com/cosmos/cosmos-sdk/types/query"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// QueryChannelRequest is the request type for the Query/Channel RPC method
+type QueryChannelRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+}
+
+func (m *QueryChannelRequest) Reset() { *m = QueryChannelRequest{} }
+func (m *QueryChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelRequest) ProtoMessage() {}
+func (*QueryChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{0}
+}
+func (m *QueryChannelRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelRequest.Merge(m, src)
+}
+func (m *QueryChannelRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelRequest proto.InternalMessageInfo
+
+func (m *QueryChannelRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryChannelRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+// QueryChannelResponse is the response type for the Query/Channel RPC method.
+// Besides the Channel end, it includes a proof and the height from which the
+// proof was retrieved.
+type QueryChannelResponse struct {
+ // channel associated with the request identifiers
+ Channel *Channel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryChannelResponse) Reset() { *m = QueryChannelResponse{} }
+func (m *QueryChannelResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelResponse) ProtoMessage() {}
+func (*QueryChannelResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{1}
+}
+func (m *QueryChannelResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelResponse.Merge(m, src)
+}
+func (m *QueryChannelResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelResponse proto.InternalMessageInfo
+
+func (m *QueryChannelResponse) GetChannel() *Channel {
+ if m != nil {
+ return m.Channel
+ }
+ return nil
+}
+
+func (m *QueryChannelResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryChannelResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryChannelsRequest is the request type for the Query/Channels RPC method
+type QueryChannelsRequest struct {
+ // pagination request
+ Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryChannelsRequest) Reset() { *m = QueryChannelsRequest{} }
+func (m *QueryChannelsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelsRequest) ProtoMessage() {}
+func (*QueryChannelsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{2}
+}
+func (m *QueryChannelsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelsRequest.Merge(m, src)
+}
+func (m *QueryChannelsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelsRequest proto.InternalMessageInfo
+
+func (m *QueryChannelsRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryChannelsResponse is the response type for the Query/Channels RPC method.
+type QueryChannelsResponse struct {
+ // list of stored channels of the chain.
+ Channels []*IdentifiedChannel `protobuf:"bytes,1,rep,name=channels,proto3" json:"channels,omitempty"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryChannelsResponse) Reset() { *m = QueryChannelsResponse{} }
+func (m *QueryChannelsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelsResponse) ProtoMessage() {}
+func (*QueryChannelsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{3}
+}
+func (m *QueryChannelsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelsResponse.Merge(m, src)
+}
+func (m *QueryChannelsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelsResponse proto.InternalMessageInfo
+
+func (m *QueryChannelsResponse) GetChannels() []*IdentifiedChannel {
+ if m != nil {
+ return m.Channels
+ }
+ return nil
+}
+
+func (m *QueryChannelsResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+func (m *QueryChannelsResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryConnectionChannelsRequest is the request type for the
+// Query/QueryConnectionChannels RPC method
+type QueryConnectionChannelsRequest struct {
+ // connection unique identifier
+ Connection string `protobuf:"bytes,1,opt,name=connection,proto3" json:"connection,omitempty"`
+ // pagination request
+ Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryConnectionChannelsRequest) Reset() { *m = QueryConnectionChannelsRequest{} }
+func (m *QueryConnectionChannelsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionChannelsRequest) ProtoMessage() {}
+func (*QueryConnectionChannelsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{4}
+}
+func (m *QueryConnectionChannelsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionChannelsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionChannelsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionChannelsRequest.Merge(m, src)
+}
+func (m *QueryConnectionChannelsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionChannelsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionChannelsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionChannelsRequest proto.InternalMessageInfo
+
+func (m *QueryConnectionChannelsRequest) GetConnection() string {
+ if m != nil {
+ return m.Connection
+ }
+ return ""
+}
+
+func (m *QueryConnectionChannelsRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryConnectionChannelsResponse is the Response type for the
+// Query/QueryConnectionChannels RPC method
+type QueryConnectionChannelsResponse struct {
+ // list of channels associated with a connection.
+ Channels []*IdentifiedChannel `protobuf:"bytes,1,rep,name=channels,proto3" json:"channels,omitempty"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryConnectionChannelsResponse) Reset() { *m = QueryConnectionChannelsResponse{} }
+func (m *QueryConnectionChannelsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryConnectionChannelsResponse) ProtoMessage() {}
+func (*QueryConnectionChannelsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{5}
+}
+func (m *QueryConnectionChannelsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryConnectionChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryConnectionChannelsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryConnectionChannelsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryConnectionChannelsResponse.Merge(m, src)
+}
+func (m *QueryConnectionChannelsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryConnectionChannelsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryConnectionChannelsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryConnectionChannelsResponse proto.InternalMessageInfo
+
+func (m *QueryConnectionChannelsResponse) GetChannels() []*IdentifiedChannel {
+ if m != nil {
+ return m.Channels
+ }
+ return nil
+}
+
+func (m *QueryConnectionChannelsResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+func (m *QueryConnectionChannelsResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryChannelClientStateRequest is the request type for the Query/ClientState
+// RPC method
+type QueryChannelClientStateRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+}
+
+func (m *QueryChannelClientStateRequest) Reset() { *m = QueryChannelClientStateRequest{} }
+func (m *QueryChannelClientStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelClientStateRequest) ProtoMessage() {}
+func (*QueryChannelClientStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{6}
+}
+func (m *QueryChannelClientStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelClientStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelClientStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelClientStateRequest.Merge(m, src)
+}
+func (m *QueryChannelClientStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelClientStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelClientStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelClientStateRequest proto.InternalMessageInfo
+
+func (m *QueryChannelClientStateRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryChannelClientStateRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+// QueryChannelClientStateResponse is the Response type for the
+// Query/QueryChannelClientState RPC method
+type QueryChannelClientStateResponse struct {
+ // client state associated with the channel
+ IdentifiedClientState *types.IdentifiedClientState `protobuf:"bytes,1,opt,name=identified_client_state,json=identifiedClientState,proto3" json:"identified_client_state,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryChannelClientStateResponse) Reset() { *m = QueryChannelClientStateResponse{} }
+func (m *QueryChannelClientStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelClientStateResponse) ProtoMessage() {}
+func (*QueryChannelClientStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{7}
+}
+func (m *QueryChannelClientStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelClientStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelClientStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelClientStateResponse.Merge(m, src)
+}
+func (m *QueryChannelClientStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelClientStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelClientStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelClientStateResponse proto.InternalMessageInfo
+
+func (m *QueryChannelClientStateResponse) GetIdentifiedClientState() *types.IdentifiedClientState {
+ if m != nil {
+ return m.IdentifiedClientState
+ }
+ return nil
+}
+
+func (m *QueryChannelClientStateResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryChannelClientStateResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryChannelConsensusStateRequest is the request type for the
+// Query/ConsensusState RPC method
+type QueryChannelConsensusStateRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // revision number of the consensus state
+ RevisionNumber uint64 `protobuf:"varint,3,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"`
+ // revision height of the consensus state
+ RevisionHeight uint64 `protobuf:"varint,4,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty"`
+}
+
+func (m *QueryChannelConsensusStateRequest) Reset() { *m = QueryChannelConsensusStateRequest{} }
+func (m *QueryChannelConsensusStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelConsensusStateRequest) ProtoMessage() {}
+func (*QueryChannelConsensusStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{8}
+}
+func (m *QueryChannelConsensusStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelConsensusStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelConsensusStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelConsensusStateRequest.Merge(m, src)
+}
+func (m *QueryChannelConsensusStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelConsensusStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelConsensusStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelConsensusStateRequest proto.InternalMessageInfo
+
+func (m *QueryChannelConsensusStateRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryChannelConsensusStateRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryChannelConsensusStateRequest) GetRevisionNumber() uint64 {
+ if m != nil {
+ return m.RevisionNumber
+ }
+ return 0
+}
+
+func (m *QueryChannelConsensusStateRequest) GetRevisionHeight() uint64 {
+ if m != nil {
+ return m.RevisionHeight
+ }
+ return 0
+}
+
+// QueryChannelClientStateResponse is the Response type for the
+// Query/QueryChannelClientState RPC method
+type QueryChannelConsensusStateResponse struct {
+ // consensus state associated with the channel
+ ConsensusState *types1.Any `protobuf:"bytes,1,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"`
+ // client ID associated with the consensus state
+ ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryChannelConsensusStateResponse) Reset() { *m = QueryChannelConsensusStateResponse{} }
+func (m *QueryChannelConsensusStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryChannelConsensusStateResponse) ProtoMessage() {}
+func (*QueryChannelConsensusStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{9}
+}
+func (m *QueryChannelConsensusStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryChannelConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryChannelConsensusStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryChannelConsensusStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryChannelConsensusStateResponse.Merge(m, src)
+}
+func (m *QueryChannelConsensusStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryChannelConsensusStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryChannelConsensusStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryChannelConsensusStateResponse proto.InternalMessageInfo
+
+func (m *QueryChannelConsensusStateResponse) GetConsensusState() *types1.Any {
+ if m != nil {
+ return m.ConsensusState
+ }
+ return nil
+}
+
+func (m *QueryChannelConsensusStateResponse) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *QueryChannelConsensusStateResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryChannelConsensusStateResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryPacketCommitmentRequest is the request type for the
+// Query/PacketCommitment RPC method
+type QueryPacketCommitmentRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // packet sequence
+ Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"`
+}
+
+func (m *QueryPacketCommitmentRequest) Reset() { *m = QueryPacketCommitmentRequest{} }
+func (m *QueryPacketCommitmentRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketCommitmentRequest) ProtoMessage() {}
+func (*QueryPacketCommitmentRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{10}
+}
+func (m *QueryPacketCommitmentRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketCommitmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketCommitmentRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketCommitmentRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketCommitmentRequest.Merge(m, src)
+}
+func (m *QueryPacketCommitmentRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketCommitmentRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketCommitmentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketCommitmentRequest proto.InternalMessageInfo
+
+func (m *QueryPacketCommitmentRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryPacketCommitmentRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryPacketCommitmentRequest) GetSequence() uint64 {
+ if m != nil {
+ return m.Sequence
+ }
+ return 0
+}
+
+// QueryPacketCommitmentResponse defines the client query response for a packet
+// which also includes a proof and the height from which the proof was
+// retrieved
+type QueryPacketCommitmentResponse struct {
+ // packet associated with the request fields
+ Commitment []byte `protobuf:"bytes,1,opt,name=commitment,proto3" json:"commitment,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryPacketCommitmentResponse) Reset() { *m = QueryPacketCommitmentResponse{} }
+func (m *QueryPacketCommitmentResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketCommitmentResponse) ProtoMessage() {}
+func (*QueryPacketCommitmentResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{11}
+}
+func (m *QueryPacketCommitmentResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketCommitmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketCommitmentResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketCommitmentResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketCommitmentResponse.Merge(m, src)
+}
+func (m *QueryPacketCommitmentResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketCommitmentResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketCommitmentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketCommitmentResponse proto.InternalMessageInfo
+
+func (m *QueryPacketCommitmentResponse) GetCommitment() []byte {
+ if m != nil {
+ return m.Commitment
+ }
+ return nil
+}
+
+func (m *QueryPacketCommitmentResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryPacketCommitmentResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryPacketCommitmentsRequest is the request type for the
+// Query/QueryPacketCommitments RPC method
+type QueryPacketCommitmentsRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // pagination request
+ Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryPacketCommitmentsRequest) Reset() { *m = QueryPacketCommitmentsRequest{} }
+func (m *QueryPacketCommitmentsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketCommitmentsRequest) ProtoMessage() {}
+func (*QueryPacketCommitmentsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{12}
+}
+func (m *QueryPacketCommitmentsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketCommitmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketCommitmentsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketCommitmentsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketCommitmentsRequest.Merge(m, src)
+}
+func (m *QueryPacketCommitmentsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketCommitmentsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketCommitmentsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketCommitmentsRequest proto.InternalMessageInfo
+
+func (m *QueryPacketCommitmentsRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryPacketCommitmentsRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryPacketCommitmentsRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryPacketCommitmentsResponse is the request type for the
+// Query/QueryPacketCommitments RPC method
+type QueryPacketCommitmentsResponse struct {
+ Commitments []*PacketState `protobuf:"bytes,1,rep,name=commitments,proto3" json:"commitments,omitempty"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryPacketCommitmentsResponse) Reset() { *m = QueryPacketCommitmentsResponse{} }
+func (m *QueryPacketCommitmentsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketCommitmentsResponse) ProtoMessage() {}
+func (*QueryPacketCommitmentsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{13}
+}
+func (m *QueryPacketCommitmentsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketCommitmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketCommitmentsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketCommitmentsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketCommitmentsResponse.Merge(m, src)
+}
+func (m *QueryPacketCommitmentsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketCommitmentsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketCommitmentsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketCommitmentsResponse proto.InternalMessageInfo
+
+func (m *QueryPacketCommitmentsResponse) GetCommitments() []*PacketState {
+ if m != nil {
+ return m.Commitments
+ }
+ return nil
+}
+
+func (m *QueryPacketCommitmentsResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+func (m *QueryPacketCommitmentsResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryPacketReceiptRequest is the request type for the
+// Query/PacketReceipt RPC method
+type QueryPacketReceiptRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // packet sequence
+ Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"`
+}
+
+func (m *QueryPacketReceiptRequest) Reset() { *m = QueryPacketReceiptRequest{} }
+func (m *QueryPacketReceiptRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketReceiptRequest) ProtoMessage() {}
+func (*QueryPacketReceiptRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{14}
+}
+func (m *QueryPacketReceiptRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketReceiptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketReceiptRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketReceiptRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketReceiptRequest.Merge(m, src)
+}
+func (m *QueryPacketReceiptRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketReceiptRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketReceiptRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketReceiptRequest proto.InternalMessageInfo
+
+func (m *QueryPacketReceiptRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryPacketReceiptRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryPacketReceiptRequest) GetSequence() uint64 {
+ if m != nil {
+ return m.Sequence
+ }
+ return 0
+}
+
+// QueryPacketReceiptResponse defines the client query response for a packet
+// receipt which also includes a proof, and the height from which the proof was
+// retrieved
+type QueryPacketReceiptResponse struct {
+ // success flag for if receipt exists
+ Received bool `protobuf:"varint,2,opt,name=received,proto3" json:"received,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryPacketReceiptResponse) Reset() { *m = QueryPacketReceiptResponse{} }
+func (m *QueryPacketReceiptResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketReceiptResponse) ProtoMessage() {}
+func (*QueryPacketReceiptResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{15}
+}
+func (m *QueryPacketReceiptResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketReceiptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketReceiptResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketReceiptResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketReceiptResponse.Merge(m, src)
+}
+func (m *QueryPacketReceiptResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketReceiptResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketReceiptResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketReceiptResponse proto.InternalMessageInfo
+
+func (m *QueryPacketReceiptResponse) GetReceived() bool {
+ if m != nil {
+ return m.Received
+ }
+ return false
+}
+
+func (m *QueryPacketReceiptResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryPacketReceiptResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryPacketAcknowledgementRequest is the request type for the
+// Query/PacketAcknowledgement RPC method
+type QueryPacketAcknowledgementRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // packet sequence
+ Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"`
+}
+
+func (m *QueryPacketAcknowledgementRequest) Reset() { *m = QueryPacketAcknowledgementRequest{} }
+func (m *QueryPacketAcknowledgementRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketAcknowledgementRequest) ProtoMessage() {}
+func (*QueryPacketAcknowledgementRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{16}
+}
+func (m *QueryPacketAcknowledgementRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketAcknowledgementRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketAcknowledgementRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketAcknowledgementRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketAcknowledgementRequest.Merge(m, src)
+}
+func (m *QueryPacketAcknowledgementRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketAcknowledgementRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketAcknowledgementRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketAcknowledgementRequest proto.InternalMessageInfo
+
+func (m *QueryPacketAcknowledgementRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryPacketAcknowledgementRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryPacketAcknowledgementRequest) GetSequence() uint64 {
+ if m != nil {
+ return m.Sequence
+ }
+ return 0
+}
+
+// QueryPacketAcknowledgementResponse defines the client query response for a
+// packet which also includes a proof and the height from which the
+// proof was retrieved
+type QueryPacketAcknowledgementResponse struct {
+ // packet associated with the request fields
+ Acknowledgement []byte `protobuf:"bytes,1,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryPacketAcknowledgementResponse) Reset() { *m = QueryPacketAcknowledgementResponse{} }
+func (m *QueryPacketAcknowledgementResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketAcknowledgementResponse) ProtoMessage() {}
+func (*QueryPacketAcknowledgementResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{17}
+}
+func (m *QueryPacketAcknowledgementResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketAcknowledgementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketAcknowledgementResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketAcknowledgementResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketAcknowledgementResponse.Merge(m, src)
+}
+func (m *QueryPacketAcknowledgementResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketAcknowledgementResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketAcknowledgementResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketAcknowledgementResponse proto.InternalMessageInfo
+
+func (m *QueryPacketAcknowledgementResponse) GetAcknowledgement() []byte {
+ if m != nil {
+ return m.Acknowledgement
+ }
+ return nil
+}
+
+func (m *QueryPacketAcknowledgementResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryPacketAcknowledgementResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+// QueryPacketAcknowledgementsRequest is the request type for the
+// Query/QueryPacketCommitments RPC method
+type QueryPacketAcknowledgementsRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // pagination request
+ Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"`
+}
+
+func (m *QueryPacketAcknowledgementsRequest) Reset() { *m = QueryPacketAcknowledgementsRequest{} }
+func (m *QueryPacketAcknowledgementsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketAcknowledgementsRequest) ProtoMessage() {}
+func (*QueryPacketAcknowledgementsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{18}
+}
+func (m *QueryPacketAcknowledgementsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketAcknowledgementsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketAcknowledgementsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketAcknowledgementsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketAcknowledgementsRequest.Merge(m, src)
+}
+func (m *QueryPacketAcknowledgementsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketAcknowledgementsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketAcknowledgementsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketAcknowledgementsRequest proto.InternalMessageInfo
+
+func (m *QueryPacketAcknowledgementsRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryPacketAcknowledgementsRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryPacketAcknowledgementsRequest) GetPagination() *query.PageRequest {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+// QueryPacketAcknowledgemetsResponse is the request type for the
+// Query/QueryPacketAcknowledgements RPC method
+type QueryPacketAcknowledgementsResponse struct {
+ Acknowledgements []*PacketState `protobuf:"bytes,1,rep,name=acknowledgements,proto3" json:"acknowledgements,omitempty"`
+ // pagination response
+ Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryPacketAcknowledgementsResponse) Reset() { *m = QueryPacketAcknowledgementsResponse{} }
+func (m *QueryPacketAcknowledgementsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryPacketAcknowledgementsResponse) ProtoMessage() {}
+func (*QueryPacketAcknowledgementsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{19}
+}
+func (m *QueryPacketAcknowledgementsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryPacketAcknowledgementsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryPacketAcknowledgementsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryPacketAcknowledgementsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryPacketAcknowledgementsResponse.Merge(m, src)
+}
+func (m *QueryPacketAcknowledgementsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryPacketAcknowledgementsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryPacketAcknowledgementsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryPacketAcknowledgementsResponse proto.InternalMessageInfo
+
+func (m *QueryPacketAcknowledgementsResponse) GetAcknowledgements() []*PacketState {
+ if m != nil {
+ return m.Acknowledgements
+ }
+ return nil
+}
+
+func (m *QueryPacketAcknowledgementsResponse) GetPagination() *query.PageResponse {
+ if m != nil {
+ return m.Pagination
+ }
+ return nil
+}
+
+func (m *QueryPacketAcknowledgementsResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryUnreceivedPacketsRequest is the request type for the
+// Query/UnreceivedPackets RPC method
+type QueryUnreceivedPacketsRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // list of packet sequences
+ PacketCommitmentSequences []uint64 `protobuf:"varint,3,rep,packed,name=packet_commitment_sequences,json=packetCommitmentSequences,proto3" json:"packet_commitment_sequences,omitempty"`
+}
+
+func (m *QueryUnreceivedPacketsRequest) Reset() { *m = QueryUnreceivedPacketsRequest{} }
+func (m *QueryUnreceivedPacketsRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryUnreceivedPacketsRequest) ProtoMessage() {}
+func (*QueryUnreceivedPacketsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{20}
+}
+func (m *QueryUnreceivedPacketsRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUnreceivedPacketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUnreceivedPacketsRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUnreceivedPacketsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUnreceivedPacketsRequest.Merge(m, src)
+}
+func (m *QueryUnreceivedPacketsRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUnreceivedPacketsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUnreceivedPacketsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUnreceivedPacketsRequest proto.InternalMessageInfo
+
+func (m *QueryUnreceivedPacketsRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryUnreceivedPacketsRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryUnreceivedPacketsRequest) GetPacketCommitmentSequences() []uint64 {
+ if m != nil {
+ return m.PacketCommitmentSequences
+ }
+ return nil
+}
+
+// QueryUnreceivedPacketsResponse is the response type for the
+// Query/UnreceivedPacketCommitments RPC method
+type QueryUnreceivedPacketsResponse struct {
+ // list of unreceived packet sequences
+ Sequences []uint64 `protobuf:"varint,1,rep,packed,name=sequences,proto3" json:"sequences,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,2,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryUnreceivedPacketsResponse) Reset() { *m = QueryUnreceivedPacketsResponse{} }
+func (m *QueryUnreceivedPacketsResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryUnreceivedPacketsResponse) ProtoMessage() {}
+func (*QueryUnreceivedPacketsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{21}
+}
+func (m *QueryUnreceivedPacketsResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUnreceivedPacketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUnreceivedPacketsResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUnreceivedPacketsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUnreceivedPacketsResponse.Merge(m, src)
+}
+func (m *QueryUnreceivedPacketsResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUnreceivedPacketsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUnreceivedPacketsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUnreceivedPacketsResponse proto.InternalMessageInfo
+
+func (m *QueryUnreceivedPacketsResponse) GetSequences() []uint64 {
+ if m != nil {
+ return m.Sequences
+ }
+ return nil
+}
+
+func (m *QueryUnreceivedPacketsResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryUnreceivedAcks is the request type for the
+// Query/UnreceivedAcks RPC method
+type QueryUnreceivedAcksRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+ // list of acknowledgement sequences
+ PacketAckSequences []uint64 `protobuf:"varint,3,rep,packed,name=packet_ack_sequences,json=packetAckSequences,proto3" json:"packet_ack_sequences,omitempty"`
+}
+
+func (m *QueryUnreceivedAcksRequest) Reset() { *m = QueryUnreceivedAcksRequest{} }
+func (m *QueryUnreceivedAcksRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryUnreceivedAcksRequest) ProtoMessage() {}
+func (*QueryUnreceivedAcksRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{22}
+}
+func (m *QueryUnreceivedAcksRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUnreceivedAcksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUnreceivedAcksRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUnreceivedAcksRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUnreceivedAcksRequest.Merge(m, src)
+}
+func (m *QueryUnreceivedAcksRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUnreceivedAcksRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUnreceivedAcksRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUnreceivedAcksRequest proto.InternalMessageInfo
+
+func (m *QueryUnreceivedAcksRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryUnreceivedAcksRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+func (m *QueryUnreceivedAcksRequest) GetPacketAckSequences() []uint64 {
+ if m != nil {
+ return m.PacketAckSequences
+ }
+ return nil
+}
+
+// QueryUnreceivedAcksResponse is the response type for the
+// Query/UnreceivedAcks RPC method
+type QueryUnreceivedAcksResponse struct {
+ // list of unreceived acknowledgement sequences
+ Sequences []uint64 `protobuf:"varint,1,rep,packed,name=sequences,proto3" json:"sequences,omitempty"`
+ // query block height
+ Height types.Height `protobuf:"bytes,2,opt,name=height,proto3" json:"height"`
+}
+
+func (m *QueryUnreceivedAcksResponse) Reset() { *m = QueryUnreceivedAcksResponse{} }
+func (m *QueryUnreceivedAcksResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryUnreceivedAcksResponse) ProtoMessage() {}
+func (*QueryUnreceivedAcksResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{23}
+}
+func (m *QueryUnreceivedAcksResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUnreceivedAcksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUnreceivedAcksResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUnreceivedAcksResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUnreceivedAcksResponse.Merge(m, src)
+}
+func (m *QueryUnreceivedAcksResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUnreceivedAcksResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUnreceivedAcksResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUnreceivedAcksResponse proto.InternalMessageInfo
+
+func (m *QueryUnreceivedAcksResponse) GetSequences() []uint64 {
+ if m != nil {
+ return m.Sequences
+ }
+ return nil
+}
+
+func (m *QueryUnreceivedAcksResponse) GetHeight() types.Height {
+ if m != nil {
+ return m.Height
+ }
+ return types.Height{}
+}
+
+// QueryNextSequenceReceiveRequest is the request type for the
+// Query/QueryNextSequenceReceiveRequest RPC method
+type QueryNextSequenceReceiveRequest struct {
+ // port unique identifier
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+ // channel unique identifier
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+}
+
+func (m *QueryNextSequenceReceiveRequest) Reset() { *m = QueryNextSequenceReceiveRequest{} }
+func (m *QueryNextSequenceReceiveRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryNextSequenceReceiveRequest) ProtoMessage() {}
+func (*QueryNextSequenceReceiveRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{24}
+}
+func (m *QueryNextSequenceReceiveRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryNextSequenceReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryNextSequenceReceiveRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryNextSequenceReceiveRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryNextSequenceReceiveRequest.Merge(m, src)
+}
+func (m *QueryNextSequenceReceiveRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryNextSequenceReceiveRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryNextSequenceReceiveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryNextSequenceReceiveRequest proto.InternalMessageInfo
+
+func (m *QueryNextSequenceReceiveRequest) GetPortId() string {
+ if m != nil {
+ return m.PortId
+ }
+ return ""
+}
+
+func (m *QueryNextSequenceReceiveRequest) GetChannelId() string {
+ if m != nil {
+ return m.ChannelId
+ }
+ return ""
+}
+
+// QuerySequenceResponse is the request type for the
+// Query/QueryNextSequenceReceiveResponse RPC method
+type QueryNextSequenceReceiveResponse struct {
+ // next sequence receive number
+ NextSequenceReceive uint64 `protobuf:"varint,1,opt,name=next_sequence_receive,json=nextSequenceReceive,proto3" json:"next_sequence_receive,omitempty"`
+ // merkle proof of existence
+ Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"`
+ // height at which the proof was retrieved
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"`
+}
+
+func (m *QueryNextSequenceReceiveResponse) Reset() { *m = QueryNextSequenceReceiveResponse{} }
+func (m *QueryNextSequenceReceiveResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryNextSequenceReceiveResponse) ProtoMessage() {}
+func (*QueryNextSequenceReceiveResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3acdacc9aeb4fa50, []int{25}
+}
+func (m *QueryNextSequenceReceiveResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryNextSequenceReceiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryNextSequenceReceiveResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryNextSequenceReceiveResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryNextSequenceReceiveResponse.Merge(m, src)
+}
+func (m *QueryNextSequenceReceiveResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryNextSequenceReceiveResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryNextSequenceReceiveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryNextSequenceReceiveResponse proto.InternalMessageInfo
+
+func (m *QueryNextSequenceReceiveResponse) GetNextSequenceReceive() uint64 {
+ if m != nil {
+ return m.NextSequenceReceive
+ }
+ return 0
+}
+
+func (m *QueryNextSequenceReceiveResponse) GetProof() []byte {
+ if m != nil {
+ return m.Proof
+ }
+ return nil
+}
+
+func (m *QueryNextSequenceReceiveResponse) GetProofHeight() types.Height {
+ if m != nil {
+ return m.ProofHeight
+ }
+ return types.Height{}
+}
+
+func init() {
+ proto.RegisterType((*QueryChannelRequest)(nil), "ibcgo.core.channel.v1.QueryChannelRequest")
+ proto.RegisterType((*QueryChannelResponse)(nil), "ibcgo.core.channel.v1.QueryChannelResponse")
+ proto.RegisterType((*QueryChannelsRequest)(nil), "ibcgo.core.channel.v1.QueryChannelsRequest")
+ proto.RegisterType((*QueryChannelsResponse)(nil), "ibcgo.core.channel.v1.QueryChannelsResponse")
+ proto.RegisterType((*QueryConnectionChannelsRequest)(nil), "ibcgo.core.channel.v1.QueryConnectionChannelsRequest")
+ proto.RegisterType((*QueryConnectionChannelsResponse)(nil), "ibcgo.core.channel.v1.QueryConnectionChannelsResponse")
+ proto.RegisterType((*QueryChannelClientStateRequest)(nil), "ibcgo.core.channel.v1.QueryChannelClientStateRequest")
+ proto.RegisterType((*QueryChannelClientStateResponse)(nil), "ibcgo.core.channel.v1.QueryChannelClientStateResponse")
+ proto.RegisterType((*QueryChannelConsensusStateRequest)(nil), "ibcgo.core.channel.v1.QueryChannelConsensusStateRequest")
+ proto.RegisterType((*QueryChannelConsensusStateResponse)(nil), "ibcgo.core.channel.v1.QueryChannelConsensusStateResponse")
+ proto.RegisterType((*QueryPacketCommitmentRequest)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentRequest")
+ proto.RegisterType((*QueryPacketCommitmentResponse)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentResponse")
+ proto.RegisterType((*QueryPacketCommitmentsRequest)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentsRequest")
+ proto.RegisterType((*QueryPacketCommitmentsResponse)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentsResponse")
+ proto.RegisterType((*QueryPacketReceiptRequest)(nil), "ibcgo.core.channel.v1.QueryPacketReceiptRequest")
+ proto.RegisterType((*QueryPacketReceiptResponse)(nil), "ibcgo.core.channel.v1.QueryPacketReceiptResponse")
+ proto.RegisterType((*QueryPacketAcknowledgementRequest)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementRequest")
+ proto.RegisterType((*QueryPacketAcknowledgementResponse)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementResponse")
+ proto.RegisterType((*QueryPacketAcknowledgementsRequest)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementsRequest")
+ proto.RegisterType((*QueryPacketAcknowledgementsResponse)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementsResponse")
+ proto.RegisterType((*QueryUnreceivedPacketsRequest)(nil), "ibcgo.core.channel.v1.QueryUnreceivedPacketsRequest")
+ proto.RegisterType((*QueryUnreceivedPacketsResponse)(nil), "ibcgo.core.channel.v1.QueryUnreceivedPacketsResponse")
+ proto.RegisterType((*QueryUnreceivedAcksRequest)(nil), "ibcgo.core.channel.v1.QueryUnreceivedAcksRequest")
+ proto.RegisterType((*QueryUnreceivedAcksResponse)(nil), "ibcgo.core.channel.v1.QueryUnreceivedAcksResponse")
+ proto.RegisterType((*QueryNextSequenceReceiveRequest)(nil), "ibcgo.core.channel.v1.QueryNextSequenceReceiveRequest")
+ proto.RegisterType((*QueryNextSequenceReceiveResponse)(nil), "ibcgo.core.channel.v1.QueryNextSequenceReceiveResponse")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/channel/v1/query.proto", fileDescriptor_3acdacc9aeb4fa50) }
+
+var fileDescriptor_3acdacc9aeb4fa50 = []byte{
+ // 1487 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcb, 0x8f, 0xdb, 0xd4,
+ 0x17, 0x9e, 0x9b, 0x99, 0xb6, 0x99, 0x33, 0xfd, 0xf5, 0x71, 0x3b, 0x69, 0xa7, 0xee, 0x34, 0x9d,
+ 0xba, 0xfa, 0xd1, 0x51, 0x4b, 0xed, 0x66, 0xfa, 0xa0, 0x54, 0x50, 0xa9, 0x2d, 0xd0, 0x0e, 0x52,
+ 0x5f, 0x2e, 0x15, 0x6d, 0x25, 0x08, 0x8e, 0x73, 0x9b, 0xb1, 0x66, 0x62, 0xbb, 0xb1, 0x93, 0xb6,
+ 0x0c, 0x41, 0x88, 0x05, 0x20, 0xc4, 0x02, 0x09, 0x01, 0x1b, 0x24, 0x36, 0x88, 0x0d, 0xea, 0x8a,
+ 0x3f, 0x80, 0x05, 0x9b, 0x2e, 0x2b, 0x15, 0x89, 0x6e, 0x78, 0x68, 0x06, 0x89, 0x45, 0xd9, 0xb2,
+ 0x61, 0x85, 0x7c, 0x1f, 0x8e, 0x9d, 0xd8, 0x9e, 0x49, 0x32, 0x91, 0x2a, 0x76, 0xf1, 0xf5, 0x3d,
+ 0xe7, 0x7e, 0xdf, 0x77, 0xee, 0x39, 0x39, 0x27, 0x81, 0xbd, 0x66, 0xc9, 0xa8, 0xd8, 0xaa, 0x61,
+ 0xd7, 0x88, 0x6a, 0xcc, 0xe9, 0x96, 0x45, 0x16, 0xd4, 0x46, 0x41, 0xbd, 0x5d, 0x27, 0xb5, 0x7b,
+ 0x8a, 0x53, 0xb3, 0x3d, 0x1b, 0xe7, 0xe8, 0x16, 0xc5, 0xdf, 0xa2, 0xf0, 0x2d, 0x4a, 0xa3, 0x20,
+ 0x45, 0x2c, 0x17, 0x4c, 0x62, 0x79, 0xbe, 0x21, 0xfb, 0xc4, 0x2c, 0xa5, 0x03, 0x86, 0xed, 0x56,
+ 0x6d, 0x57, 0x2d, 0xe9, 0x2e, 0x61, 0x2e, 0xd5, 0x46, 0xa1, 0x44, 0x3c, 0xbd, 0xa0, 0x3a, 0x7a,
+ 0xc5, 0xb4, 0x74, 0xcf, 0xb4, 0x2d, 0xbe, 0x77, 0x5f, 0x3c, 0x10, 0x71, 0x20, 0xdb, 0x34, 0x59,
+ 0xb1, 0xed, 0xca, 0x02, 0x51, 0x75, 0xc7, 0x54, 0x75, 0xcb, 0xb2, 0x3d, 0xea, 0xc1, 0xe5, 0x6f,
+ 0x77, 0xf2, 0xb7, 0xf4, 0xa9, 0x54, 0xbf, 0xa5, 0xea, 0x16, 0xe7, 0x20, 0x8d, 0x57, 0xec, 0x8a,
+ 0x4d, 0x3f, 0xaa, 0xfe, 0x27, 0xb6, 0x2a, 0x5f, 0x80, 0x6d, 0x57, 0x7c, 0x54, 0x67, 0xd9, 0x21,
+ 0x1a, 0xb9, 0x5d, 0x27, 0xae, 0x87, 0x77, 0xc0, 0x06, 0xc7, 0xae, 0x79, 0x45, 0xb3, 0x3c, 0x81,
+ 0xa6, 0xd0, 0xf4, 0xa8, 0xb6, 0xde, 0x7f, 0x9c, 0x2d, 0xe3, 0xdd, 0x00, 0x1c, 0x8f, 0xff, 0x2e,
+ 0x43, 0xdf, 0x8d, 0xf2, 0x95, 0xd9, 0xb2, 0x7c, 0x1f, 0xc1, 0x78, 0xd4, 0x9f, 0xeb, 0xd8, 0x96,
+ 0x4b, 0xf0, 0x09, 0xd8, 0xc0, 0x77, 0x51, 0x87, 0x63, 0x33, 0x79, 0x25, 0x56, 0x53, 0x45, 0x18,
+ 0x8a, 0xed, 0x78, 0x1c, 0xd6, 0x39, 0x35, 0xdb, 0xbe, 0x45, 0x0f, 0xdb, 0xa8, 0xb1, 0x07, 0xfc,
+ 0x32, 0x6c, 0xa4, 0x1f, 0x8a, 0x73, 0xc4, 0xac, 0xcc, 0x79, 0x13, 0xc3, 0xd4, 0xe9, 0x64, 0xc4,
+ 0x29, 0x8b, 0x43, 0xa3, 0xa0, 0x9c, 0xa7, 0x7b, 0xce, 0x8c, 0x3c, 0xf8, 0x75, 0xcf, 0x90, 0x36,
+ 0x46, 0xed, 0xd8, 0x92, 0xfc, 0x66, 0x14, 0xae, 0x2b, 0xf8, 0xbf, 0x02, 0xd0, 0x0a, 0x0f, 0x47,
+ 0xfc, 0x8c, 0xc2, 0x62, 0xa9, 0xf8, 0xb1, 0x54, 0xd8, 0xf5, 0xe0, 0xb1, 0x54, 0x2e, 0xeb, 0x15,
+ 0xc2, 0x6d, 0xb5, 0x90, 0xa5, 0xbc, 0x84, 0x20, 0xd7, 0x76, 0x00, 0x17, 0xe4, 0x25, 0xc8, 0x72,
+ 0x86, 0xee, 0x04, 0x9a, 0x1a, 0x9e, 0x1e, 0x9b, 0x99, 0x4e, 0x50, 0x64, 0xb6, 0x4c, 0x2c, 0xcf,
+ 0xbc, 0x65, 0x92, 0xb2, 0xd0, 0x26, 0xb0, 0xc4, 0xe7, 0x22, 0x38, 0x33, 0x14, 0xe7, 0xfe, 0x15,
+ 0x71, 0x32, 0x08, 0x61, 0xa0, 0xf8, 0x24, 0xac, 0xef, 0x5a, 0x49, 0x6e, 0x21, 0x7f, 0x84, 0x20,
+ 0xcf, 0x48, 0xda, 0x96, 0x45, 0x0c, 0xdf, 0x5f, 0xbb, 0x9e, 0x79, 0x00, 0x23, 0x78, 0xc9, 0xaf,
+ 0x54, 0x68, 0xa5, 0x4d, 0xef, 0x4c, 0xcf, 0x7a, 0xff, 0x85, 0x60, 0x4f, 0x22, 0x94, 0xff, 0x9e,
+ 0xf2, 0xd7, 0x85, 0xf0, 0x0c, 0xd5, 0x59, 0xba, 0xfb, 0xaa, 0xa7, 0x7b, 0xa4, 0xdf, 0x44, 0x5e,
+ 0x0e, 0x84, 0x8c, 0x71, 0xcd, 0x85, 0x34, 0x60, 0x87, 0x19, 0x28, 0x54, 0x64, 0x50, 0x8b, 0xae,
+ 0xbf, 0x85, 0x67, 0xcc, 0xc1, 0x78, 0x2a, 0x21, 0x59, 0x43, 0x5e, 0x73, 0x66, 0xdc, 0xf2, 0x60,
+ 0xd3, 0xff, 0x3e, 0x82, 0xbd, 0x11, 0x96, 0x3e, 0x2f, 0xcb, 0xad, 0xbb, 0x6b, 0xa1, 0x21, 0xde,
+ 0x0f, 0x9b, 0x6b, 0xa4, 0x61, 0xba, 0xa6, 0x6d, 0x15, 0xad, 0x7a, 0xb5, 0x44, 0x6a, 0x14, 0xe7,
+ 0x88, 0xb6, 0x49, 0x2c, 0x5f, 0xa4, 0xab, 0x91, 0x8d, 0x9c, 0xd0, 0x48, 0x74, 0x23, 0xc7, 0xfb,
+ 0x0b, 0x02, 0x39, 0x0d, 0x2f, 0x0f, 0xcc, 0x8b, 0xb0, 0xd9, 0x10, 0x6f, 0x22, 0x01, 0x19, 0x57,
+ 0xd8, 0xf7, 0x83, 0x22, 0xbe, 0x1f, 0x94, 0xd3, 0xd6, 0x3d, 0x6d, 0x93, 0x11, 0x71, 0x83, 0x77,
+ 0xc1, 0x28, 0x0f, 0x66, 0xc0, 0x2a, 0xcb, 0x16, 0x66, 0xcb, 0xad, 0x78, 0x0c, 0xa7, 0xc5, 0x63,
+ 0xa4, 0xb7, 0x78, 0xd4, 0x60, 0x92, 0xd2, 0xbb, 0xac, 0x1b, 0xf3, 0xc4, 0x3b, 0x6b, 0x57, 0xab,
+ 0xa6, 0x57, 0x25, 0x96, 0xd7, 0x6f, 0x24, 0x24, 0xc8, 0xba, 0xbe, 0x0b, 0xcb, 0x20, 0x3c, 0x04,
+ 0xc1, 0xb3, 0xfc, 0x15, 0x82, 0xdd, 0x09, 0x87, 0x72, 0x39, 0x69, 0xf1, 0x12, 0xab, 0xf4, 0xe0,
+ 0x8d, 0x5a, 0x68, 0x65, 0xb0, 0x57, 0xf4, 0xeb, 0x24, 0x78, 0x6e, 0xbf, 0xa2, 0x44, 0x6b, 0xee,
+ 0x70, 0xcf, 0x35, 0xf7, 0x89, 0x28, 0xff, 0x31, 0x08, 0x83, 0x92, 0x3b, 0xd6, 0xd2, 0x4b, 0x54,
+ 0x5d, 0x39, 0xa1, 0xea, 0x32, 0x37, 0xec, 0x46, 0x87, 0xcd, 0x9e, 0x8e, 0x92, 0x6b, 0xc3, 0xce,
+ 0x10, 0x59, 0x8d, 0x18, 0xc4, 0x74, 0x06, 0x7a, 0x3f, 0x3f, 0x47, 0x20, 0xc5, 0x9d, 0xc8, 0xa5,
+ 0x95, 0x20, 0x5b, 0xf3, 0x97, 0x1a, 0x84, 0xf9, 0xcd, 0x6a, 0xc1, 0xf3, 0x60, 0x73, 0xf5, 0x0e,
+ 0x2f, 0x9d, 0x0c, 0xd6, 0x69, 0x63, 0xde, 0xb2, 0xef, 0x2c, 0x90, 0x72, 0x85, 0x0c, 0x3a, 0x61,
+ 0xbf, 0x13, 0x45, 0x30, 0xe1, 0x64, 0x2e, 0xcc, 0x34, 0x6c, 0xd6, 0xa3, 0xaf, 0x78, 0xea, 0xb6,
+ 0x2f, 0x0f, 0x36, 0x7f, 0xbf, 0x49, 0x45, 0xfb, 0xd4, 0x24, 0xf1, 0x3f, 0x08, 0xf6, 0xa5, 0xc2,
+ 0xe4, 0xaa, 0x5e, 0x84, 0x2d, 0x6d, 0xf2, 0x75, 0x93, 0xce, 0x1d, 0xb6, 0x4f, 0x47, 0x4e, 0x7f,
+ 0x29, 0x6a, 0xec, 0x35, 0x4b, 0xe4, 0x0e, 0x43, 0xdd, 0x77, 0x78, 0x4e, 0xc1, 0x2e, 0x87, 0x7a,
+ 0x2a, 0xb6, 0x0a, 0x59, 0x51, 0xdc, 0x64, 0x77, 0x62, 0x78, 0x6a, 0x78, 0x7a, 0x44, 0xdb, 0xe9,
+ 0xb4, 0x15, 0xce, 0xab, 0x62, 0x83, 0xfc, 0x36, 0x2f, 0xad, 0x31, 0xc0, 0x78, 0x40, 0x26, 0x61,
+ 0xb4, 0xe5, 0x0f, 0x51, 0x7f, 0xad, 0x85, 0x90, 0x2a, 0x99, 0xae, 0x55, 0xf9, 0x40, 0x14, 0x9e,
+ 0xd6, 0xe1, 0xa7, 0x8d, 0xf9, 0xbe, 0x25, 0x39, 0x0c, 0xe3, 0x5c, 0x12, 0xdd, 0x98, 0xef, 0xd0,
+ 0x02, 0x3b, 0xe2, 0xfe, 0xb5, 0x44, 0xb8, 0x03, 0xbb, 0x62, 0x71, 0x0c, 0x5c, 0x81, 0x1b, 0xbc,
+ 0x07, 0xbe, 0x48, 0xee, 0x06, 0x31, 0xd1, 0x18, 0x84, 0x7e, 0xfb, 0xeb, 0xef, 0x11, 0x4c, 0x25,
+ 0xfb, 0xe6, 0xcc, 0x66, 0x20, 0x67, 0x91, 0xbb, 0xad, 0x0b, 0x53, 0xe4, 0xfc, 0xe9, 0x51, 0x23,
+ 0xda, 0x36, 0xab, 0xd3, 0x76, 0xa0, 0xc5, 0x6c, 0xe6, 0x87, 0xed, 0xb0, 0x8e, 0xa2, 0xc6, 0xdf,
+ 0x22, 0xd8, 0xc0, 0x9b, 0x50, 0x7c, 0x20, 0x21, 0xf3, 0x63, 0x7e, 0x58, 0x90, 0x0e, 0xae, 0x6a,
+ 0x2f, 0xe3, 0x2f, 0x9f, 0x79, 0xff, 0xd1, 0x1f, 0x9f, 0x65, 0x5e, 0xc0, 0x27, 0x55, 0xb3, 0x64,
+ 0x24, 0xfd, 0x2e, 0xe2, 0xaa, 0x8b, 0x2d, 0xa1, 0x9b, 0xaa, 0x2f, 0xbf, 0xab, 0x2e, 0xf2, 0xa0,
+ 0x34, 0xf1, 0x27, 0x08, 0xb2, 0x62, 0x04, 0xc4, 0xab, 0x39, 0x5d, 0x5c, 0x70, 0xe9, 0xd9, 0xd5,
+ 0x6d, 0xe6, 0x58, 0xff, 0x4f, 0xb1, 0xee, 0xc1, 0xbb, 0x53, 0xb1, 0xe2, 0x1f, 0x11, 0xe0, 0xce,
+ 0xd9, 0x14, 0x1f, 0x4b, 0x3d, 0x2b, 0x69, 0xac, 0x96, 0x8e, 0x77, 0x6b, 0xc6, 0xc1, 0x9e, 0xa2,
+ 0x60, 0x4f, 0xe0, 0xe3, 0xf1, 0x60, 0x03, 0x43, 0x5f, 0xdb, 0xe0, 0xa1, 0xd9, 0x62, 0xf1, 0x93,
+ 0xcf, 0xa2, 0x63, 0x30, 0x5c, 0x81, 0x45, 0xd2, 0x8c, 0xba, 0x02, 0x8b, 0xc4, 0xf9, 0x53, 0xbe,
+ 0x44, 0x59, 0xcc, 0xe2, 0x73, 0xbd, 0x5f, 0x0f, 0x35, 0x3c, 0xb5, 0xe2, 0x2f, 0x32, 0x90, 0x8b,
+ 0x9d, 0xac, 0xf0, 0x89, 0xd5, 0x40, 0x8c, 0x1b, 0x1e, 0xa5, 0xe7, 0x7b, 0xb0, 0xe4, 0xfc, 0x3e,
+ 0x44, 0x94, 0xe0, 0x7b, 0x08, 0xbf, 0xdb, 0x0f, 0xc3, 0xe8, 0x24, 0xa8, 0x8a, 0x91, 0x52, 0x5d,
+ 0x6c, 0x1b, 0x4e, 0x9b, 0x2a, 0x2b, 0x0e, 0xa1, 0x17, 0x6c, 0xa1, 0x89, 0x7f, 0x43, 0xb0, 0xa5,
+ 0xbd, 0xbb, 0xc7, 0x47, 0xd2, 0x98, 0x25, 0x4c, 0x70, 0xd2, 0xd1, 0xee, 0x8c, 0xb8, 0x12, 0x6f,
+ 0x51, 0x21, 0x6e, 0xe2, 0xeb, 0x7d, 0xe8, 0xd0, 0xf1, 0x3d, 0xec, 0xaa, 0x8b, 0xa2, 0xb0, 0x36,
+ 0xf1, 0xcf, 0x08, 0xb6, 0x76, 0xcc, 0x2f, 0xb8, 0x2b, 0xb4, 0x41, 0x56, 0x1e, 0xeb, 0xd2, 0x8a,
+ 0x93, 0xbc, 0x46, 0x49, 0x5e, 0xc2, 0x17, 0xd6, 0x94, 0x24, 0x7e, 0x84, 0xe0, 0x7f, 0x91, 0xd1,
+ 0x01, 0x1f, 0x5e, 0x19, 0x5f, 0x74, 0xae, 0x91, 0x0a, 0x5d, 0x58, 0x70, 0x36, 0x6f, 0x50, 0x36,
+ 0xaf, 0xe3, 0x6b, 0xfd, 0xb3, 0xa9, 0x31, 0xd7, 0x91, 0x78, 0xfd, 0x89, 0x20, 0x17, 0xdb, 0xaa,
+ 0xa6, 0xa7, 0x6a, 0xda, 0xb0, 0x92, 0x9e, 0xaa, 0xa9, 0xc3, 0x86, 0x7c, 0x83, 0xb2, 0xbd, 0x8a,
+ 0xaf, 0xf4, 0xcf, 0x56, 0x37, 0xe6, 0x23, 0x4c, 0x9f, 0x20, 0xd8, 0x1e, 0xdf, 0x94, 0xe3, 0xee,
+ 0x01, 0x07, 0x77, 0xf4, 0x64, 0x2f, 0xa6, 0x9c, 0xec, 0x4d, 0x4a, 0xf6, 0x35, 0xac, 0xad, 0x09,
+ 0xd9, 0x28, 0xa5, 0x8f, 0x33, 0xb0, 0xb5, 0xa3, 0xd9, 0x4d, 0xcf, 0xc3, 0xa4, 0xa6, 0x3d, 0x3d,
+ 0x0f, 0x13, 0x3b, 0xea, 0x35, 0x2a, 0xbb, 0x71, 0xe5, 0x26, 0x65, 0x14, 0x68, 0xaa, 0xf5, 0x00,
+ 0x50, 0xd1, 0xe1, 0xb4, 0xff, 0x46, 0xb0, 0x29, 0xda, 0xf4, 0xe2, 0xc2, 0xea, 0x38, 0x85, 0x1a,
+ 0x75, 0x69, 0xa6, 0x1b, 0x13, 0xae, 0xc1, 0x3b, 0x54, 0x82, 0x06, 0xf6, 0x06, 0xa3, 0x40, 0xa4,
+ 0xf3, 0x8f, 0x50, 0xf7, 0x6f, 0x3f, 0x7e, 0x8c, 0x60, 0x5b, 0x4c, 0x5f, 0x8c, 0x53, 0x1b, 0x85,
+ 0xe4, 0x26, 0x5d, 0x7a, 0xae, 0x6b, 0x3b, 0x2e, 0xc3, 0x65, 0x2a, 0xc3, 0xab, 0xf8, 0x7c, 0x1f,
+ 0x32, 0x44, 0x3a, 0xf8, 0x33, 0xe7, 0x1f, 0x2c, 0xe5, 0xd1, 0xc3, 0xa5, 0x3c, 0xfa, 0x7d, 0x29,
+ 0x8f, 0x3e, 0x5d, 0xce, 0x0f, 0x3d, 0x5c, 0xce, 0x0f, 0x3d, 0x5e, 0xce, 0x0f, 0xdd, 0x54, 0x2a,
+ 0xa6, 0x37, 0x57, 0x2f, 0x29, 0x86, 0x5d, 0x55, 0xf9, 0x9f, 0x86, 0x66, 0xc9, 0x38, 0x24, 0xfe,
+ 0x10, 0x3c, 0x7c, 0xf4, 0x90, 0x38, 0xda, 0xbb, 0xe7, 0x10, 0xb7, 0xb4, 0x9e, 0xfe, 0x86, 0x7b,
+ 0xe4, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x58, 0xb6, 0x16, 0xbf, 0x1c, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// QueryClient is the client API for Query service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryClient interface {
+ // Channel queries an IBC Channel.
+ Channel(ctx context.Context, in *QueryChannelRequest, opts ...grpc.CallOption) (*QueryChannelResponse, error)
+ // Channels queries all the IBC channels of a chain.
+ Channels(ctx context.Context, in *QueryChannelsRequest, opts ...grpc.CallOption) (*QueryChannelsResponse, error)
+ // ConnectionChannels queries all the channels associated with a connection
+ // end.
+ ConnectionChannels(ctx context.Context, in *QueryConnectionChannelsRequest, opts ...grpc.CallOption) (*QueryConnectionChannelsResponse, error)
+ // ChannelClientState queries for the client state for the channel associated
+ // with the provided channel identifiers.
+ ChannelClientState(ctx context.Context, in *QueryChannelClientStateRequest, opts ...grpc.CallOption) (*QueryChannelClientStateResponse, error)
+ // ChannelConsensusState queries for the consensus state for the channel
+ // associated with the provided channel identifiers.
+ ChannelConsensusState(ctx context.Context, in *QueryChannelConsensusStateRequest, opts ...grpc.CallOption) (*QueryChannelConsensusStateResponse, error)
+ // PacketCommitment queries a stored packet commitment hash.
+ PacketCommitment(ctx context.Context, in *QueryPacketCommitmentRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentResponse, error)
+ // PacketCommitments returns all the packet commitments hashes associated
+ // with a channel.
+ PacketCommitments(ctx context.Context, in *QueryPacketCommitmentsRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentsResponse, error)
+ // PacketReceipt queries if a given packet sequence has been received on the
+ // queried chain
+ PacketReceipt(ctx context.Context, in *QueryPacketReceiptRequest, opts ...grpc.CallOption) (*QueryPacketReceiptResponse, error)
+ // PacketAcknowledgement queries a stored packet acknowledgement hash.
+ PacketAcknowledgement(ctx context.Context, in *QueryPacketAcknowledgementRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementResponse, error)
+ // PacketAcknowledgements returns all the packet acknowledgements associated
+ // with a channel.
+ PacketAcknowledgements(ctx context.Context, in *QueryPacketAcknowledgementsRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementsResponse, error)
+ // UnreceivedPackets returns all the unreceived IBC packets associated with a
+ // channel and sequences.
+ UnreceivedPackets(ctx context.Context, in *QueryUnreceivedPacketsRequest, opts ...grpc.CallOption) (*QueryUnreceivedPacketsResponse, error)
+ // UnreceivedAcks returns all the unreceived IBC acknowledgements associated
+ // with a channel and sequences.
+ UnreceivedAcks(ctx context.Context, in *QueryUnreceivedAcksRequest, opts ...grpc.CallOption) (*QueryUnreceivedAcksResponse, error)
+ // NextSequenceReceive returns the next receive sequence for a given channel.
+ NextSequenceReceive(ctx context.Context, in *QueryNextSequenceReceiveRequest, opts ...grpc.CallOption) (*QueryNextSequenceReceiveResponse, error)
+}
+
+type queryClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewQueryClient(cc grpc1.ClientConn) QueryClient {
+ return &queryClient{cc}
+}
+
+func (c *queryClient) Channel(ctx context.Context, in *QueryChannelRequest, opts ...grpc.CallOption) (*QueryChannelResponse, error) {
+ out := new(QueryChannelResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/Channel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) Channels(ctx context.Context, in *QueryChannelsRequest, opts ...grpc.CallOption) (*QueryChannelsResponse, error) {
+ out := new(QueryChannelsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/Channels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ConnectionChannels(ctx context.Context, in *QueryConnectionChannelsRequest, opts ...grpc.CallOption) (*QueryConnectionChannelsResponse, error) {
+ out := new(QueryConnectionChannelsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ConnectionChannels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ChannelClientState(ctx context.Context, in *QueryChannelClientStateRequest, opts ...grpc.CallOption) (*QueryChannelClientStateResponse, error) {
+ out := new(QueryChannelClientStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ChannelClientState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) ChannelConsensusState(ctx context.Context, in *QueryChannelConsensusStateRequest, opts ...grpc.CallOption) (*QueryChannelConsensusStateResponse, error) {
+ out := new(QueryChannelConsensusStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ChannelConsensusState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) PacketCommitment(ctx context.Context, in *QueryPacketCommitmentRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentResponse, error) {
+ out := new(QueryPacketCommitmentResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketCommitment", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) PacketCommitments(ctx context.Context, in *QueryPacketCommitmentsRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentsResponse, error) {
+ out := new(QueryPacketCommitmentsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketCommitments", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) PacketReceipt(ctx context.Context, in *QueryPacketReceiptRequest, opts ...grpc.CallOption) (*QueryPacketReceiptResponse, error) {
+ out := new(QueryPacketReceiptResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketReceipt", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) PacketAcknowledgement(ctx context.Context, in *QueryPacketAcknowledgementRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementResponse, error) {
+ out := new(QueryPacketAcknowledgementResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketAcknowledgement", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) PacketAcknowledgements(ctx context.Context, in *QueryPacketAcknowledgementsRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementsResponse, error) {
+ out := new(QueryPacketAcknowledgementsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketAcknowledgements", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) UnreceivedPackets(ctx context.Context, in *QueryUnreceivedPacketsRequest, opts ...grpc.CallOption) (*QueryUnreceivedPacketsResponse, error) {
+ out := new(QueryUnreceivedPacketsResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/UnreceivedPackets", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) UnreceivedAcks(ctx context.Context, in *QueryUnreceivedAcksRequest, opts ...grpc.CallOption) (*QueryUnreceivedAcksResponse, error) {
+ out := new(QueryUnreceivedAcksResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/UnreceivedAcks", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *queryClient) NextSequenceReceive(ctx context.Context, in *QueryNextSequenceReceiveRequest, opts ...grpc.CallOption) (*QueryNextSequenceReceiveResponse, error) {
+ out := new(QueryNextSequenceReceiveResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/NextSequenceReceive", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServer is the server API for Query service.
+type QueryServer interface {
+ // Channel queries an IBC Channel.
+ Channel(context.Context, *QueryChannelRequest) (*QueryChannelResponse, error)
+ // Channels queries all the IBC channels of a chain.
+ Channels(context.Context, *QueryChannelsRequest) (*QueryChannelsResponse, error)
+ // ConnectionChannels queries all the channels associated with a connection
+ // end.
+ ConnectionChannels(context.Context, *QueryConnectionChannelsRequest) (*QueryConnectionChannelsResponse, error)
+ // ChannelClientState queries for the client state for the channel associated
+ // with the provided channel identifiers.
+ ChannelClientState(context.Context, *QueryChannelClientStateRequest) (*QueryChannelClientStateResponse, error)
+ // ChannelConsensusState queries for the consensus state for the channel
+ // associated with the provided channel identifiers.
+ ChannelConsensusState(context.Context, *QueryChannelConsensusStateRequest) (*QueryChannelConsensusStateResponse, error)
+ // PacketCommitment queries a stored packet commitment hash.
+ PacketCommitment(context.Context, *QueryPacketCommitmentRequest) (*QueryPacketCommitmentResponse, error)
+ // PacketCommitments returns all the packet commitments hashes associated
+ // with a channel.
+ PacketCommitments(context.Context, *QueryPacketCommitmentsRequest) (*QueryPacketCommitmentsResponse, error)
+ // PacketReceipt queries if a given packet sequence has been received on the
+ // queried chain
+ PacketReceipt(context.Context, *QueryPacketReceiptRequest) (*QueryPacketReceiptResponse, error)
+ // PacketAcknowledgement queries a stored packet acknowledgement hash.
+ PacketAcknowledgement(context.Context, *QueryPacketAcknowledgementRequest) (*QueryPacketAcknowledgementResponse, error)
+ // PacketAcknowledgements returns all the packet acknowledgements associated
+ // with a channel.
+ PacketAcknowledgements(context.Context, *QueryPacketAcknowledgementsRequest) (*QueryPacketAcknowledgementsResponse, error)
+ // UnreceivedPackets returns all the unreceived IBC packets associated with a
+ // channel and sequences.
+ UnreceivedPackets(context.Context, *QueryUnreceivedPacketsRequest) (*QueryUnreceivedPacketsResponse, error)
+ // UnreceivedAcks returns all the unreceived IBC acknowledgements associated
+ // with a channel and sequences.
+ UnreceivedAcks(context.Context, *QueryUnreceivedAcksRequest) (*QueryUnreceivedAcksResponse, error)
+ // NextSequenceReceive returns the next receive sequence for a given channel.
+ NextSequenceReceive(context.Context, *QueryNextSequenceReceiveRequest) (*QueryNextSequenceReceiveResponse, error)
+}
+
+// UnimplementedQueryServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServer struct {
+}
+
+func (*UnimplementedQueryServer) Channel(ctx context.Context, req *QueryChannelRequest) (*QueryChannelResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Channel not implemented")
+}
+func (*UnimplementedQueryServer) Channels(ctx context.Context, req *QueryChannelsRequest) (*QueryChannelsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Channels not implemented")
+}
+func (*UnimplementedQueryServer) ConnectionChannels(ctx context.Context, req *QueryConnectionChannelsRequest) (*QueryConnectionChannelsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ConnectionChannels not implemented")
+}
+func (*UnimplementedQueryServer) ChannelClientState(ctx context.Context, req *QueryChannelClientStateRequest) (*QueryChannelClientStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelClientState not implemented")
+}
+func (*UnimplementedQueryServer) ChannelConsensusState(ctx context.Context, req *QueryChannelConsensusStateRequest) (*QueryChannelConsensusStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelConsensusState not implemented")
+}
+func (*UnimplementedQueryServer) PacketCommitment(ctx context.Context, req *QueryPacketCommitmentRequest) (*QueryPacketCommitmentResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PacketCommitment not implemented")
+}
+func (*UnimplementedQueryServer) PacketCommitments(ctx context.Context, req *QueryPacketCommitmentsRequest) (*QueryPacketCommitmentsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PacketCommitments not implemented")
+}
+func (*UnimplementedQueryServer) PacketReceipt(ctx context.Context, req *QueryPacketReceiptRequest) (*QueryPacketReceiptResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PacketReceipt not implemented")
+}
+func (*UnimplementedQueryServer) PacketAcknowledgement(ctx context.Context, req *QueryPacketAcknowledgementRequest) (*QueryPacketAcknowledgementResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PacketAcknowledgement not implemented")
+}
+func (*UnimplementedQueryServer) PacketAcknowledgements(ctx context.Context, req *QueryPacketAcknowledgementsRequest) (*QueryPacketAcknowledgementsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method PacketAcknowledgements not implemented")
+}
+func (*UnimplementedQueryServer) UnreceivedPackets(ctx context.Context, req *QueryUnreceivedPacketsRequest) (*QueryUnreceivedPacketsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UnreceivedPackets not implemented")
+}
+func (*UnimplementedQueryServer) UnreceivedAcks(ctx context.Context, req *QueryUnreceivedAcksRequest) (*QueryUnreceivedAcksResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UnreceivedAcks not implemented")
+}
+func (*UnimplementedQueryServer) NextSequenceReceive(ctx context.Context, req *QueryNextSequenceReceiveRequest) (*QueryNextSequenceReceiveResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method NextSequenceReceive not implemented")
+}
+
+func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
+ s.RegisterService(&_Query_serviceDesc, srv)
+}
+
+func _Query_Channel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Channel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/Channel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Channel(ctx, req.(*QueryChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_Channels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryChannelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).Channels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/Channels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).Channels(ctx, req.(*QueryChannelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ConnectionChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryConnectionChannelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ConnectionChannels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/ConnectionChannels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ConnectionChannels(ctx, req.(*QueryConnectionChannelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ChannelClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryChannelClientStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ChannelClientState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/ChannelClientState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ChannelClientState(ctx, req.(*QueryChannelClientStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_ChannelConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryChannelConsensusStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ChannelConsensusState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/ChannelConsensusState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ChannelConsensusState(ctx, req.(*QueryChannelConsensusStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_PacketCommitment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryPacketCommitmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).PacketCommitment(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/PacketCommitment",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).PacketCommitment(ctx, req.(*QueryPacketCommitmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_PacketCommitments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryPacketCommitmentsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).PacketCommitments(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/PacketCommitments",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).PacketCommitments(ctx, req.(*QueryPacketCommitmentsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_PacketReceipt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryPacketReceiptRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).PacketReceipt(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/PacketReceipt",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).PacketReceipt(ctx, req.(*QueryPacketReceiptRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_PacketAcknowledgement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryPacketAcknowledgementRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).PacketAcknowledgement(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/PacketAcknowledgement",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).PacketAcknowledgement(ctx, req.(*QueryPacketAcknowledgementRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_PacketAcknowledgements_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryPacketAcknowledgementsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).PacketAcknowledgements(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/PacketAcknowledgements",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).PacketAcknowledgements(ctx, req.(*QueryPacketAcknowledgementsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_UnreceivedPackets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryUnreceivedPacketsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).UnreceivedPackets(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/UnreceivedPackets",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).UnreceivedPackets(ctx, req.(*QueryUnreceivedPacketsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_UnreceivedAcks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryUnreceivedAcksRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).UnreceivedAcks(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/UnreceivedAcks",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).UnreceivedAcks(ctx, req.(*QueryUnreceivedAcksRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Query_NextSequenceReceive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryNextSequenceReceiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).NextSequenceReceive(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Query/NextSequenceReceive",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).NextSequenceReceive(ctx, req.(*QueryNextSequenceReceiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Query_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.core.channel.v1.Query",
+ HandlerType: (*QueryServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Channel",
+ Handler: _Query_Channel_Handler,
+ },
+ {
+ MethodName: "Channels",
+ Handler: _Query_Channels_Handler,
+ },
+ {
+ MethodName: "ConnectionChannels",
+ Handler: _Query_ConnectionChannels_Handler,
+ },
+ {
+ MethodName: "ChannelClientState",
+ Handler: _Query_ChannelClientState_Handler,
+ },
+ {
+ MethodName: "ChannelConsensusState",
+ Handler: _Query_ChannelConsensusState_Handler,
+ },
+ {
+ MethodName: "PacketCommitment",
+ Handler: _Query_PacketCommitment_Handler,
+ },
+ {
+ MethodName: "PacketCommitments",
+ Handler: _Query_PacketCommitments_Handler,
+ },
+ {
+ MethodName: "PacketReceipt",
+ Handler: _Query_PacketReceipt_Handler,
+ },
+ {
+ MethodName: "PacketAcknowledgement",
+ Handler: _Query_PacketAcknowledgement_Handler,
+ },
+ {
+ MethodName: "PacketAcknowledgements",
+ Handler: _Query_PacketAcknowledgements_Handler,
+ },
+ {
+ MethodName: "UnreceivedPackets",
+ Handler: _Query_UnreceivedPackets_Handler,
+ },
+ {
+ MethodName: "UnreceivedAcks",
+ Handler: _Query_UnreceivedAcks_Handler,
+ },
+ {
+ MethodName: "NextSequenceReceive",
+ Handler: _Query_NextSequenceReceive_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/core/channel/v1/query.proto",
+}
+
+func (m *QueryChannelRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Channel != nil {
+ {
+ size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Channels) > 0 {
+ for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Channels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionChannelsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionChannelsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionChannelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Connection) > 0 {
+ i -= len(m.Connection)
+ copy(dAtA[i:], m.Connection)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Connection)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryConnectionChannelsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryConnectionChannelsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryConnectionChannelsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Channels) > 0 {
+ for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Channels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelClientStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelClientStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelClientStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelClientStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.IdentifiedClientState != nil {
+ {
+ size, err := m.IdentifiedClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelConsensusStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.RevisionHeight != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.RevisionHeight))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.RevisionNumber != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.RevisionNumber))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryChannelConsensusStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryChannelConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryChannelConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketCommitmentRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketCommitmentRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketCommitmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketCommitmentResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketCommitmentResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketCommitmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Commitment) > 0 {
+ i -= len(m.Commitment)
+ copy(dAtA[i:], m.Commitment)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Commitment)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketCommitmentsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketCommitmentsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketCommitmentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketCommitmentsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketCommitmentsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketCommitmentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Commitments) > 0 {
+ for iNdEx := len(m.Commitments) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Commitments[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketReceiptRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketReceiptRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketReceiptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketReceiptResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketReceiptResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketReceiptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Received {
+ i--
+ if m.Received {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketAcknowledgementRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketAcknowledgementRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketAcknowledgementRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketAcknowledgementResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketAcknowledgementResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketAcknowledgementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Acknowledgement) > 0 {
+ i -= len(m.Acknowledgement)
+ copy(dAtA[i:], m.Acknowledgement)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Acknowledgement)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketAcknowledgementsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketAcknowledgementsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketAcknowledgementsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryPacketAcknowledgementsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryPacketAcknowledgementsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryPacketAcknowledgementsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.Pagination != nil {
+ {
+ size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Acknowledgements) > 0 {
+ for iNdEx := len(m.Acknowledgements) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Acknowledgements[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryUnreceivedPacketsRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUnreceivedPacketsRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUnreceivedPacketsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.PacketCommitmentSequences) > 0 {
+ dAtA23 := make([]byte, len(m.PacketCommitmentSequences)*10)
+ var j22 int
+ for _, num := range m.PacketCommitmentSequences {
+ for num >= 1<<7 {
+ dAtA23[j22] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j22++
+ }
+ dAtA23[j22] = uint8(num)
+ j22++
+ }
+ i -= j22
+ copy(dAtA[i:], dAtA23[:j22])
+ i = encodeVarintQuery(dAtA, i, uint64(j22))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryUnreceivedPacketsResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUnreceivedPacketsResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUnreceivedPacketsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Sequences) > 0 {
+ dAtA26 := make([]byte, len(m.Sequences)*10)
+ var j25 int
+ for _, num := range m.Sequences {
+ for num >= 1<<7 {
+ dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j25++
+ }
+ dAtA26[j25] = uint8(num)
+ j25++
+ }
+ i -= j25
+ copy(dAtA[i:], dAtA26[:j25])
+ i = encodeVarintQuery(dAtA, i, uint64(j25))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryUnreceivedAcksRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUnreceivedAcksRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUnreceivedAcksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.PacketAckSequences) > 0 {
+ dAtA28 := make([]byte, len(m.PacketAckSequences)*10)
+ var j27 int
+ for _, num := range m.PacketAckSequences {
+ for num >= 1<<7 {
+ dAtA28[j27] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j27++
+ }
+ dAtA28[j27] = uint8(num)
+ j27++
+ }
+ i -= j27
+ copy(dAtA[i:], dAtA28[:j27])
+ i = encodeVarintQuery(dAtA, i, uint64(j27))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryUnreceivedAcksResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUnreceivedAcksResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUnreceivedAcksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Sequences) > 0 {
+ dAtA31 := make([]byte, len(m.Sequences)*10)
+ var j30 int
+ for _, num := range m.Sequences {
+ for num >= 1<<7 {
+ dAtA31[j30] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j30++
+ }
+ dAtA31[j30] = uint8(num)
+ j30++
+ }
+ i -= j30
+ copy(dAtA[i:], dAtA31[:j30])
+ i = encodeVarintQuery(dAtA, i, uint64(j30))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryNextSequenceReceiveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryNextSequenceReceiveRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryNextSequenceReceiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryNextSequenceReceiveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryNextSequenceReceiveResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryNextSequenceReceiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Proof) > 0 {
+ i -= len(m.Proof)
+ copy(dAtA[i:], m.Proof)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.NextSequenceReceive != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.NextSequenceReceive))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
+ offset -= sovQuery(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *QueryChannelRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryChannelResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Channel != nil {
+ l = m.Channel.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryChannelsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryChannelsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Channels) > 0 {
+ for _, e := range m.Channels {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryConnectionChannelsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Connection)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryConnectionChannelsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Channels) > 0 {
+ for _, e := range m.Channels {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryChannelClientStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryChannelClientStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.IdentifiedClientState != nil {
+ l = m.IdentifiedClientState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryChannelConsensusStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.RevisionNumber != 0 {
+ n += 1 + sovQuery(uint64(m.RevisionNumber))
+ }
+ if m.RevisionHeight != 0 {
+ n += 1 + sovQuery(uint64(m.RevisionHeight))
+ }
+ return n
+}
+
+func (m *QueryChannelConsensusStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryPacketCommitmentRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovQuery(uint64(m.Sequence))
+ }
+ return n
+}
+
+func (m *QueryPacketCommitmentResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Commitment)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryPacketCommitmentsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryPacketCommitmentsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Commitments) > 0 {
+ for _, e := range m.Commitments {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryPacketReceiptRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovQuery(uint64(m.Sequence))
+ }
+ return n
+}
+
+func (m *QueryPacketReceiptResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Received {
+ n += 2
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryPacketAcknowledgementRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovQuery(uint64(m.Sequence))
+ }
+ return n
+}
+
+func (m *QueryPacketAcknowledgementResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Acknowledgement)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryPacketAcknowledgementsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryPacketAcknowledgementsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Acknowledgements) > 0 {
+ for _, e := range m.Acknowledgements {
+ l = e.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ }
+ if m.Pagination != nil {
+ l = m.Pagination.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryUnreceivedPacketsRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if len(m.PacketCommitmentSequences) > 0 {
+ l = 0
+ for _, e := range m.PacketCommitmentSequences {
+ l += sovQuery(uint64(e))
+ }
+ n += 1 + sovQuery(uint64(l)) + l
+ }
+ return n
+}
+
+func (m *QueryUnreceivedPacketsResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Sequences) > 0 {
+ l = 0
+ for _, e := range m.Sequences {
+ l += sovQuery(uint64(e))
+ }
+ n += 1 + sovQuery(uint64(l)) + l
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryUnreceivedAcksRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if len(m.PacketAckSequences) > 0 {
+ l = 0
+ for _, e := range m.PacketAckSequences {
+ l += sovQuery(uint64(e))
+ }
+ n += 1 + sovQuery(uint64(l)) + l
+ }
+ return n
+}
+
+func (m *QueryUnreceivedAcksResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Sequences) > 0 {
+ l = 0
+ for _, e := range m.Sequences {
+ l += sovQuery(uint64(e))
+ }
+ n += 1 + sovQuery(uint64(l)) + l
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func (m *QueryNextSequenceReceiveRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryNextSequenceReceiveResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NextSequenceReceive != 0 {
+ n += 1 + sovQuery(uint64(m.NextSequenceReceive))
+ }
+ l = len(m.Proof)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ return n
+}
+
+func sovQuery(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozQuery(x uint64) (n int) {
+ return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *QueryChannelRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Channel == nil {
+ m.Channel = &Channel{}
+ }
+ if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Channels = append(m.Channels, &IdentifiedChannel{})
+ if err := m.Channels[len(m.Channels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionChannelsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionChannelsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionChannelsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Connection = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryConnectionChannelsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryConnectionChannelsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryConnectionChannelsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Channels = append(m.Channels, &IdentifiedChannel{})
+ if err := m.Channels[len(m.Channels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelClientStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelClientStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelClientStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelClientStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IdentifiedClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IdentifiedClientState == nil {
+ m.IdentifiedClientState = &types.IdentifiedClientState{}
+ }
+ if err := m.IdentifiedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelConsensusStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelConsensusStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType)
+ }
+ m.RevisionNumber = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionNumber |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType)
+ }
+ m.RevisionHeight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RevisionHeight |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryChannelConsensusStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryChannelConsensusStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryChannelConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types1.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketCommitmentRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketCommitmentRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketCommitmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketCommitmentResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketCommitmentResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketCommitmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...)
+ if m.Commitment == nil {
+ m.Commitment = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketCommitmentsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketCommitmentsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketCommitmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketCommitmentsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketCommitmentsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketCommitmentsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commitments", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commitments = append(m.Commitments, &PacketState{})
+ if err := m.Commitments[len(m.Commitments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketReceiptRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketReceiptRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketReceiptRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketReceiptResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketReceiptResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketReceiptResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Received", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Received = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketAcknowledgementRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketAcknowledgementResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...)
+ if m.Acknowledgement == nil {
+ m.Acknowledgement = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketAcknowledgementsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageRequest{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryPacketAcknowledgementsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryPacketAcknowledgementsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgements", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Acknowledgements = append(m.Acknowledgements, &PacketState{})
+ if err := m.Acknowledgements[len(m.Acknowledgements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Pagination == nil {
+ m.Pagination = &query.PageResponse{}
+ }
+ if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUnreceivedPacketsRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUnreceivedPacketsRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUnreceivedPacketsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PacketCommitmentSequences = append(m.PacketCommitmentSequences, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.PacketCommitmentSequences) == 0 {
+ m.PacketCommitmentSequences = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PacketCommitmentSequences = append(m.PacketCommitmentSequences, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field PacketCommitmentSequences", wireType)
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUnreceivedPacketsResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUnreceivedPacketsResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUnreceivedPacketsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sequences = append(m.Sequences, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.Sequences) == 0 {
+ m.Sequences = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sequences = append(m.Sequences, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequences", wireType)
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUnreceivedAcksRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUnreceivedAcksRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUnreceivedAcksRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PacketAckSequences = append(m.PacketAckSequences, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.PacketAckSequences) == 0 {
+ m.PacketAckSequences = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PacketAckSequences = append(m.PacketAckSequences, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field PacketAckSequences", wireType)
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUnreceivedAcksResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUnreceivedAcksResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUnreceivedAcksResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sequences = append(m.Sequences, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.Sequences) == 0 {
+ m.Sequences = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sequences = append(m.Sequences, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequences", wireType)
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryNextSequenceReceiveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryNextSequenceReceiveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryNextSequenceReceiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryNextSequenceReceiveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryNextSequenceReceiveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryNextSequenceReceiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextSequenceReceive", wireType)
+ }
+ m.NextSequenceReceive = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextSequenceReceive |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...)
+ if m.Proof == nil {
+ m.Proof = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipQuery(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupQuery
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthQuery
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/04-channel/types/query.pb.gw.go b/core/04-channel/types/query.pb.gw.go
new file mode 100644
index 00000000..58be2aca
--- /dev/null
+++ b/core/04-channel/types/query.pb.gw.go
@@ -0,0 +1,1792 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: ibcgo/core/channel/v1/query.proto
+
+/*
+Package types is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package types
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = descriptor.ForMessage
+
+func request_Query_Channel_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ msg, err := client.Channel(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Channel_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ msg, err := server.Channel(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_Channels_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
+)
+
+func request_Query_Channels_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelsRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Channels_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Channels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_Channels_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelsRequest
+ var metadata runtime.ServerMetadata
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Channels_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.Channels(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_ConnectionChannels_0 = &utilities.DoubleArray{Encoding: map[string]int{"connection": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
+)
+
+func request_Query_ConnectionChannels_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionChannelsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection")
+ }
+
+ protoReq.Connection, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConnectionChannels_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.ConnectionChannels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ConnectionChannels_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryConnectionChannelsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["connection"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection")
+ }
+
+ protoReq.Connection, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConnectionChannels_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.ConnectionChannels(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_ChannelClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ msg, err := client.ChannelClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ChannelClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ msg, err := server.ChannelClientState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_ChannelConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["revision_number"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number")
+ }
+
+ protoReq.RevisionNumber, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err)
+ }
+
+ val, ok = pathParams["revision_height"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height")
+ }
+
+ protoReq.RevisionHeight, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err)
+ }
+
+ msg, err := client.ChannelConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ChannelConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryChannelConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["revision_number"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number")
+ }
+
+ protoReq.RevisionNumber, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err)
+ }
+
+ val, ok = pathParams["revision_height"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height")
+ }
+
+ protoReq.RevisionHeight, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err)
+ }
+
+ msg, err := server.ChannelConsensusState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_PacketCommitment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketCommitmentRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["sequence"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence")
+ }
+
+ protoReq.Sequence, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err)
+ }
+
+ msg, err := client.PacketCommitment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_PacketCommitment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketCommitmentRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["sequence"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence")
+ }
+
+ protoReq.Sequence, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err)
+ }
+
+ msg, err := server.PacketCommitment(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_PacketCommitments_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0, "port_id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
+)
+
+func request_Query_PacketCommitments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketCommitmentsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketCommitments_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.PacketCommitments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_PacketCommitments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketCommitmentsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketCommitments_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.PacketCommitments(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_PacketReceipt_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketReceiptRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["sequence"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence")
+ }
+
+ protoReq.Sequence, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err)
+ }
+
+ msg, err := client.PacketReceipt(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_PacketReceipt_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketReceiptRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["sequence"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence")
+ }
+
+ protoReq.Sequence, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err)
+ }
+
+ msg, err := server.PacketReceipt(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_PacketAcknowledgement_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketAcknowledgementRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["sequence"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence")
+ }
+
+ protoReq.Sequence, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err)
+ }
+
+ msg, err := client.PacketAcknowledgement(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_PacketAcknowledgement_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketAcknowledgementRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["sequence"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence")
+ }
+
+ protoReq.Sequence, err = runtime.Uint64(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err)
+ }
+
+ msg, err := server.PacketAcknowledgement(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+var (
+ filter_Query_PacketAcknowledgements_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0, "port_id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
+)
+
+func request_Query_PacketAcknowledgements_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketAcknowledgementsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketAcknowledgements_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.PacketAcknowledgements(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_PacketAcknowledgements_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryPacketAcknowledgementsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketAcknowledgements_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.PacketAcknowledgements(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_UnreceivedPackets_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUnreceivedPacketsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["packet_commitment_sequences"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_commitment_sequences")
+ }
+
+ protoReq.PacketCommitmentSequences, err = runtime.Uint64Slice(val, ",")
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_commitment_sequences", err)
+ }
+
+ msg, err := client.UnreceivedPackets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_UnreceivedPackets_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUnreceivedPacketsRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["packet_commitment_sequences"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_commitment_sequences")
+ }
+
+ protoReq.PacketCommitmentSequences, err = runtime.Uint64Slice(val, ",")
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_commitment_sequences", err)
+ }
+
+ msg, err := server.UnreceivedPackets(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_UnreceivedAcks_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUnreceivedAcksRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["packet_ack_sequences"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_ack_sequences")
+ }
+
+ protoReq.PacketAckSequences, err = runtime.Uint64Slice(val, ",")
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_ack_sequences", err)
+ }
+
+ msg, err := client.UnreceivedAcks(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_UnreceivedAcks_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUnreceivedAcksRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ val, ok = pathParams["packet_ack_sequences"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_ack_sequences")
+ }
+
+ protoReq.PacketAckSequences, err = runtime.Uint64Slice(val, ",")
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_ack_sequences", err)
+ }
+
+ msg, err := server.UnreceivedAcks(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+func request_Query_NextSequenceReceive_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryNextSequenceReceiveRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ msg, err := client.NextSequenceReceive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_NextSequenceReceive_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryNextSequenceReceiveRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["channel_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id")
+ }
+
+ protoReq.ChannelId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err)
+ }
+
+ val, ok = pathParams["port_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id")
+ }
+
+ protoReq.PortId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err)
+ }
+
+ msg, err := server.NextSequenceReceive(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
+// UnaryRPC :call QueryServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
+func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
+
+ mux.Handle("GET", pattern_Query_Channel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Channel_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Channel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Channels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_Channels_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Channels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConnectionChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ConnectionChannels_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConnectionChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ChannelClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ChannelClientState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ChannelClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ChannelConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ChannelConsensusState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ChannelConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketCommitment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_PacketCommitment_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketCommitment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketCommitments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_PacketCommitments_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketCommitments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketReceipt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_PacketReceipt_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketReceipt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketAcknowledgement_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_PacketAcknowledgement_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketAcknowledgement_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketAcknowledgements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_PacketAcknowledgements_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketAcknowledgements_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_UnreceivedPackets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_UnreceivedPackets_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UnreceivedPackets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_UnreceivedAcks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_UnreceivedAcks_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UnreceivedAcks_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_NextSequenceReceive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_NextSequenceReceive_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_NextSequenceReceive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterQueryHandler(ctx, mux, conn)
+}
+
+// RegisterQueryHandler registers the http handlers for service Query to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
+}
+
+// RegisterQueryHandlerClient registers the http handlers for service Query
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "QueryClient" to call the correct interceptors.
+func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
+
+ mux.Handle("GET", pattern_Query_Channel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Channel_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Channel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_Channels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_Channels_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_Channels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ConnectionChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ConnectionChannels_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ConnectionChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ChannelClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ChannelClientState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ChannelClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_ChannelConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ChannelConsensusState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ChannelConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketCommitment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_PacketCommitment_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketCommitment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketCommitments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_PacketCommitments_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketCommitments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketReceipt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_PacketReceipt_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketReceipt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketAcknowledgement_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_PacketAcknowledgement_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketAcknowledgement_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_PacketAcknowledgements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_PacketAcknowledgements_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_PacketAcknowledgements_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_UnreceivedPackets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_UnreceivedPackets_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UnreceivedPackets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_UnreceivedAcks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_UnreceivedAcks_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UnreceivedAcks_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("GET", pattern_Query_NextSequenceReceive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_NextSequenceReceive_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_NextSequenceReceive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Query_Channel_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_Channels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "channel", "v1", "channels"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ConnectionChannels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"ibc", "core", "channel", "v1", "connections", "connection", "channels"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ChannelClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "client_state"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_ChannelConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 2, 9, 1, 0, 4, 1, 5, 10, 2, 11, 1, 0, 4, 1, 5, 12}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "consensus_state", "revision", "revision_number", "height", "revision_height"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_PacketCommitment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments", "sequence"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_PacketCommitments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_PacketReceipt_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_receipts", "sequence"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_PacketAcknowledgement_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_acks", "sequence"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_PacketAcknowledgements_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_acknowledgements"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_UnreceivedPackets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9, 2, 10}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments", "packet_commitment_sequences", "unreceived_packets"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_UnreceivedAcks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9, 2, 10}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments", "packet_ack_sequences", "unreceived_acks"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_NextSequenceReceive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "next_sequence"}, "", runtime.AssumeColonVerbOpt(true)))
+)
+
+var (
+ forward_Query_Channel_0 = runtime.ForwardResponseMessage
+
+ forward_Query_Channels_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ConnectionChannels_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ChannelClientState_0 = runtime.ForwardResponseMessage
+
+ forward_Query_ChannelConsensusState_0 = runtime.ForwardResponseMessage
+
+ forward_Query_PacketCommitment_0 = runtime.ForwardResponseMessage
+
+ forward_Query_PacketCommitments_0 = runtime.ForwardResponseMessage
+
+ forward_Query_PacketReceipt_0 = runtime.ForwardResponseMessage
+
+ forward_Query_PacketAcknowledgement_0 = runtime.ForwardResponseMessage
+
+ forward_Query_PacketAcknowledgements_0 = runtime.ForwardResponseMessage
+
+ forward_Query_UnreceivedPackets_0 = runtime.ForwardResponseMessage
+
+ forward_Query_UnreceivedAcks_0 = runtime.ForwardResponseMessage
+
+ forward_Query_NextSequenceReceive_0 = runtime.ForwardResponseMessage
+)
diff --git a/core/04-channel/types/tx.pb.go b/core/04-channel/types/tx.pb.go
new file mode 100644
index 00000000..9b8976ec
--- /dev/null
+++ b/core/04-channel/types/tx.pb.go
@@ -0,0 +1,5264 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/channel/v1/tx.proto
+
+package types
+
+import (
+ context "context"
+ fmt "fmt"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ grpc1 "github.com/gogo/protobuf/grpc"
+ proto "github.com/gogo/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
+// is called by a relayer on Chain A.
+type MsgChannelOpenInit struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ Channel Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel"`
+ Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgChannelOpenInit) Reset() { *m = MsgChannelOpenInit{} }
+func (m *MsgChannelOpenInit) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenInit) ProtoMessage() {}
+func (*MsgChannelOpenInit) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{0}
+}
+func (m *MsgChannelOpenInit) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenInit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenInit.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenInit) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenInit.Merge(m, src)
+}
+func (m *MsgChannelOpenInit) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenInit) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenInit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenInit proto.InternalMessageInfo
+
+// MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
+type MsgChannelOpenInitResponse struct {
+}
+
+func (m *MsgChannelOpenInitResponse) Reset() { *m = MsgChannelOpenInitResponse{} }
+func (m *MsgChannelOpenInitResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenInitResponse) ProtoMessage() {}
+func (*MsgChannelOpenInitResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{1}
+}
+func (m *MsgChannelOpenInitResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenInitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenInitResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenInitResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenInitResponse.Merge(m, src)
+}
+func (m *MsgChannelOpenInitResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenInitResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenInitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenInitResponse proto.InternalMessageInfo
+
+// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
+// on Chain B.
+type MsgChannelOpenTry struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ // in the case of crossing hello's, when both chains call OpenInit, we need
+ // the channel identifier of the previous channel in state INIT
+ PreviousChannelId string `protobuf:"bytes,2,opt,name=previous_channel_id,json=previousChannelId,proto3" json:"previous_channel_id,omitempty" yaml:"previous_channel_id"`
+ Channel Channel `protobuf:"bytes,3,opt,name=channel,proto3" json:"channel"`
+ CounterpartyVersion string `protobuf:"bytes,4,opt,name=counterparty_version,json=counterpartyVersion,proto3" json:"counterparty_version,omitempty" yaml:"counterparty_version"`
+ ProofInit []byte `protobuf:"bytes,5,opt,name=proof_init,json=proofInit,proto3" json:"proof_init,omitempty" yaml:"proof_init"`
+ ProofHeight types.Height `protobuf:"bytes,6,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,7,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgChannelOpenTry) Reset() { *m = MsgChannelOpenTry{} }
+func (m *MsgChannelOpenTry) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenTry) ProtoMessage() {}
+func (*MsgChannelOpenTry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{2}
+}
+func (m *MsgChannelOpenTry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenTry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenTry.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenTry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenTry.Merge(m, src)
+}
+func (m *MsgChannelOpenTry) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenTry) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenTry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenTry proto.InternalMessageInfo
+
+// MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.
+type MsgChannelOpenTryResponse struct {
+}
+
+func (m *MsgChannelOpenTryResponse) Reset() { *m = MsgChannelOpenTryResponse{} }
+func (m *MsgChannelOpenTryResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenTryResponse) ProtoMessage() {}
+func (*MsgChannelOpenTryResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{3}
+}
+func (m *MsgChannelOpenTryResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenTryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenTryResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenTryResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenTryResponse.Merge(m, src)
+}
+func (m *MsgChannelOpenTryResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenTryResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenTryResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenTryResponse proto.InternalMessageInfo
+
+// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
+// the change of channel state to TRYOPEN on Chain B.
+type MsgChannelOpenAck struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+ CounterpartyChannelId string `protobuf:"bytes,3,opt,name=counterparty_channel_id,json=counterpartyChannelId,proto3" json:"counterparty_channel_id,omitempty" yaml:"counterparty_channel_id"`
+ CounterpartyVersion string `protobuf:"bytes,4,opt,name=counterparty_version,json=counterpartyVersion,proto3" json:"counterparty_version,omitempty" yaml:"counterparty_version"`
+ ProofTry []byte `protobuf:"bytes,5,opt,name=proof_try,json=proofTry,proto3" json:"proof_try,omitempty" yaml:"proof_try"`
+ ProofHeight types.Height `protobuf:"bytes,6,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,7,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgChannelOpenAck) Reset() { *m = MsgChannelOpenAck{} }
+func (m *MsgChannelOpenAck) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenAck) ProtoMessage() {}
+func (*MsgChannelOpenAck) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{4}
+}
+func (m *MsgChannelOpenAck) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenAck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenAck.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenAck) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenAck.Merge(m, src)
+}
+func (m *MsgChannelOpenAck) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenAck) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenAck.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenAck proto.InternalMessageInfo
+
+// MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.
+type MsgChannelOpenAckResponse struct {
+}
+
+func (m *MsgChannelOpenAckResponse) Reset() { *m = MsgChannelOpenAckResponse{} }
+func (m *MsgChannelOpenAckResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenAckResponse) ProtoMessage() {}
+func (*MsgChannelOpenAckResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{5}
+}
+func (m *MsgChannelOpenAckResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenAckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenAckResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenAckResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenAckResponse.Merge(m, src)
+}
+func (m *MsgChannelOpenAckResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenAckResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenAckResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenAckResponse proto.InternalMessageInfo
+
+// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
+// acknowledge the change of channel state to OPEN on Chain A.
+type MsgChannelOpenConfirm struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+ ProofAck []byte `protobuf:"bytes,3,opt,name=proof_ack,json=proofAck,proto3" json:"proof_ack,omitempty" yaml:"proof_ack"`
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgChannelOpenConfirm) Reset() { *m = MsgChannelOpenConfirm{} }
+func (m *MsgChannelOpenConfirm) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenConfirm) ProtoMessage() {}
+func (*MsgChannelOpenConfirm) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{6}
+}
+func (m *MsgChannelOpenConfirm) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenConfirm.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenConfirm) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenConfirm.Merge(m, src)
+}
+func (m *MsgChannelOpenConfirm) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenConfirm) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenConfirm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenConfirm proto.InternalMessageInfo
+
+// MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response
+// type.
+type MsgChannelOpenConfirmResponse struct {
+}
+
+func (m *MsgChannelOpenConfirmResponse) Reset() { *m = MsgChannelOpenConfirmResponse{} }
+func (m *MsgChannelOpenConfirmResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelOpenConfirmResponse) ProtoMessage() {}
+func (*MsgChannelOpenConfirmResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{7}
+}
+func (m *MsgChannelOpenConfirmResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelOpenConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelOpenConfirmResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelOpenConfirmResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelOpenConfirmResponse.Merge(m, src)
+}
+func (m *MsgChannelOpenConfirmResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelOpenConfirmResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelOpenConfirmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelOpenConfirmResponse proto.InternalMessageInfo
+
+// MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
+// to close a channel with Chain B.
+type MsgChannelCloseInit struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+ Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgChannelCloseInit) Reset() { *m = MsgChannelCloseInit{} }
+func (m *MsgChannelCloseInit) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelCloseInit) ProtoMessage() {}
+func (*MsgChannelCloseInit) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{8}
+}
+func (m *MsgChannelCloseInit) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelCloseInit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelCloseInit.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelCloseInit) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelCloseInit.Merge(m, src)
+}
+func (m *MsgChannelCloseInit) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelCloseInit) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelCloseInit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelCloseInit proto.InternalMessageInfo
+
+// MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
+type MsgChannelCloseInitResponse struct {
+}
+
+func (m *MsgChannelCloseInitResponse) Reset() { *m = MsgChannelCloseInitResponse{} }
+func (m *MsgChannelCloseInitResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelCloseInitResponse) ProtoMessage() {}
+func (*MsgChannelCloseInitResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{9}
+}
+func (m *MsgChannelCloseInitResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelCloseInitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelCloseInitResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelCloseInitResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelCloseInitResponse.Merge(m, src)
+}
+func (m *MsgChannelCloseInitResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelCloseInitResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelCloseInitResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelCloseInitResponse proto.InternalMessageInfo
+
+// MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
+// to acknowledge the change of channel state to CLOSED on Chain A.
+type MsgChannelCloseConfirm struct {
+ PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
+ ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"`
+ ProofInit []byte `protobuf:"bytes,3,opt,name=proof_init,json=proofInit,proto3" json:"proof_init,omitempty" yaml:"proof_init"`
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgChannelCloseConfirm) Reset() { *m = MsgChannelCloseConfirm{} }
+func (m *MsgChannelCloseConfirm) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelCloseConfirm) ProtoMessage() {}
+func (*MsgChannelCloseConfirm) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{10}
+}
+func (m *MsgChannelCloseConfirm) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelCloseConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelCloseConfirm.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelCloseConfirm) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelCloseConfirm.Merge(m, src)
+}
+func (m *MsgChannelCloseConfirm) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelCloseConfirm) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelCloseConfirm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelCloseConfirm proto.InternalMessageInfo
+
+// MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response
+// type.
+type MsgChannelCloseConfirmResponse struct {
+}
+
+func (m *MsgChannelCloseConfirmResponse) Reset() { *m = MsgChannelCloseConfirmResponse{} }
+func (m *MsgChannelCloseConfirmResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgChannelCloseConfirmResponse) ProtoMessage() {}
+func (*MsgChannelCloseConfirmResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{11}
+}
+func (m *MsgChannelCloseConfirmResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgChannelCloseConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgChannelCloseConfirmResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgChannelCloseConfirmResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgChannelCloseConfirmResponse.Merge(m, src)
+}
+func (m *MsgChannelCloseConfirmResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgChannelCloseConfirmResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgChannelCloseConfirmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgChannelCloseConfirmResponse proto.InternalMessageInfo
+
+// MsgRecvPacket receives incoming IBC packet
+type MsgRecvPacket struct {
+ Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"`
+ ProofCommitment []byte `protobuf:"bytes,2,opt,name=proof_commitment,json=proofCommitment,proto3" json:"proof_commitment,omitempty" yaml:"proof_commitment"`
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,4,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgRecvPacket) Reset() { *m = MsgRecvPacket{} }
+func (m *MsgRecvPacket) String() string { return proto.CompactTextString(m) }
+func (*MsgRecvPacket) ProtoMessage() {}
+func (*MsgRecvPacket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{12}
+}
+func (m *MsgRecvPacket) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgRecvPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgRecvPacket.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgRecvPacket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgRecvPacket.Merge(m, src)
+}
+func (m *MsgRecvPacket) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgRecvPacket) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgRecvPacket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgRecvPacket proto.InternalMessageInfo
+
+// MsgRecvPacketResponse defines the Msg/RecvPacket response type.
+type MsgRecvPacketResponse struct {
+}
+
+func (m *MsgRecvPacketResponse) Reset() { *m = MsgRecvPacketResponse{} }
+func (m *MsgRecvPacketResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgRecvPacketResponse) ProtoMessage() {}
+func (*MsgRecvPacketResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{13}
+}
+func (m *MsgRecvPacketResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgRecvPacketResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgRecvPacketResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgRecvPacketResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgRecvPacketResponse.Merge(m, src)
+}
+func (m *MsgRecvPacketResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgRecvPacketResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgRecvPacketResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgRecvPacketResponse proto.InternalMessageInfo
+
+// MsgTimeout receives timed-out packet
+type MsgTimeout struct {
+ Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"`
+ ProofUnreceived []byte `protobuf:"bytes,2,opt,name=proof_unreceived,json=proofUnreceived,proto3" json:"proof_unreceived,omitempty" yaml:"proof_unreceived"`
+ ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ NextSequenceRecv uint64 `protobuf:"varint,4,opt,name=next_sequence_recv,json=nextSequenceRecv,proto3" json:"next_sequence_recv,omitempty" yaml:"next_sequence_recv"`
+ Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgTimeout) Reset() { *m = MsgTimeout{} }
+func (m *MsgTimeout) String() string { return proto.CompactTextString(m) }
+func (*MsgTimeout) ProtoMessage() {}
+func (*MsgTimeout) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{14}
+}
+func (m *MsgTimeout) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTimeout) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTimeout.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTimeout) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTimeout.Merge(m, src)
+}
+func (m *MsgTimeout) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTimeout) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTimeout.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTimeout proto.InternalMessageInfo
+
+// MsgTimeoutResponse defines the Msg/Timeout response type.
+type MsgTimeoutResponse struct {
+}
+
+func (m *MsgTimeoutResponse) Reset() { *m = MsgTimeoutResponse{} }
+func (m *MsgTimeoutResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgTimeoutResponse) ProtoMessage() {}
+func (*MsgTimeoutResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{15}
+}
+func (m *MsgTimeoutResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTimeoutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTimeoutResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTimeoutResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTimeoutResponse.Merge(m, src)
+}
+func (m *MsgTimeoutResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTimeoutResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTimeoutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTimeoutResponse proto.InternalMessageInfo
+
+// MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
+type MsgTimeoutOnClose struct {
+ Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"`
+ ProofUnreceived []byte `protobuf:"bytes,2,opt,name=proof_unreceived,json=proofUnreceived,proto3" json:"proof_unreceived,omitempty" yaml:"proof_unreceived"`
+ ProofClose []byte `protobuf:"bytes,3,opt,name=proof_close,json=proofClose,proto3" json:"proof_close,omitempty" yaml:"proof_close"`
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ NextSequenceRecv uint64 `protobuf:"varint,5,opt,name=next_sequence_recv,json=nextSequenceRecv,proto3" json:"next_sequence_recv,omitempty" yaml:"next_sequence_recv"`
+ Signer string `protobuf:"bytes,6,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgTimeoutOnClose) Reset() { *m = MsgTimeoutOnClose{} }
+func (m *MsgTimeoutOnClose) String() string { return proto.CompactTextString(m) }
+func (*MsgTimeoutOnClose) ProtoMessage() {}
+func (*MsgTimeoutOnClose) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{16}
+}
+func (m *MsgTimeoutOnClose) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTimeoutOnClose) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTimeoutOnClose.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTimeoutOnClose) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTimeoutOnClose.Merge(m, src)
+}
+func (m *MsgTimeoutOnClose) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTimeoutOnClose) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTimeoutOnClose.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTimeoutOnClose proto.InternalMessageInfo
+
+// MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
+type MsgTimeoutOnCloseResponse struct {
+}
+
+func (m *MsgTimeoutOnCloseResponse) Reset() { *m = MsgTimeoutOnCloseResponse{} }
+func (m *MsgTimeoutOnCloseResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgTimeoutOnCloseResponse) ProtoMessage() {}
+func (*MsgTimeoutOnCloseResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{17}
+}
+func (m *MsgTimeoutOnCloseResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgTimeoutOnCloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgTimeoutOnCloseResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgTimeoutOnCloseResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgTimeoutOnCloseResponse.Merge(m, src)
+}
+func (m *MsgTimeoutOnCloseResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgTimeoutOnCloseResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgTimeoutOnCloseResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgTimeoutOnCloseResponse proto.InternalMessageInfo
+
+// MsgAcknowledgement receives incoming IBC acknowledgement
+type MsgAcknowledgement struct {
+ Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"`
+ Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"`
+ ProofAcked []byte `protobuf:"bytes,3,opt,name=proof_acked,json=proofAcked,proto3" json:"proof_acked,omitempty" yaml:"proof_acked"`
+ ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"`
+ Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"`
+}
+
+func (m *MsgAcknowledgement) Reset() { *m = MsgAcknowledgement{} }
+func (m *MsgAcknowledgement) String() string { return proto.CompactTextString(m) }
+func (*MsgAcknowledgement) ProtoMessage() {}
+func (*MsgAcknowledgement) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{18}
+}
+func (m *MsgAcknowledgement) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgAcknowledgement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgAcknowledgement.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgAcknowledgement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgAcknowledgement.Merge(m, src)
+}
+func (m *MsgAcknowledgement) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgAcknowledgement) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgAcknowledgement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgAcknowledgement proto.InternalMessageInfo
+
+// MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.
+type MsgAcknowledgementResponse struct {
+}
+
+func (m *MsgAcknowledgementResponse) Reset() { *m = MsgAcknowledgementResponse{} }
+func (m *MsgAcknowledgementResponse) String() string { return proto.CompactTextString(m) }
+func (*MsgAcknowledgementResponse) ProtoMessage() {}
+func (*MsgAcknowledgementResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4f707a6c6f551009, []int{19}
+}
+func (m *MsgAcknowledgementResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MsgAcknowledgementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MsgAcknowledgementResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MsgAcknowledgementResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MsgAcknowledgementResponse.Merge(m, src)
+}
+func (m *MsgAcknowledgementResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MsgAcknowledgementResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MsgAcknowledgementResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MsgAcknowledgementResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*MsgChannelOpenInit)(nil), "ibcgo.core.channel.v1.MsgChannelOpenInit")
+ proto.RegisterType((*MsgChannelOpenInitResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenInitResponse")
+ proto.RegisterType((*MsgChannelOpenTry)(nil), "ibcgo.core.channel.v1.MsgChannelOpenTry")
+ proto.RegisterType((*MsgChannelOpenTryResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenTryResponse")
+ proto.RegisterType((*MsgChannelOpenAck)(nil), "ibcgo.core.channel.v1.MsgChannelOpenAck")
+ proto.RegisterType((*MsgChannelOpenAckResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenAckResponse")
+ proto.RegisterType((*MsgChannelOpenConfirm)(nil), "ibcgo.core.channel.v1.MsgChannelOpenConfirm")
+ proto.RegisterType((*MsgChannelOpenConfirmResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenConfirmResponse")
+ proto.RegisterType((*MsgChannelCloseInit)(nil), "ibcgo.core.channel.v1.MsgChannelCloseInit")
+ proto.RegisterType((*MsgChannelCloseInitResponse)(nil), "ibcgo.core.channel.v1.MsgChannelCloseInitResponse")
+ proto.RegisterType((*MsgChannelCloseConfirm)(nil), "ibcgo.core.channel.v1.MsgChannelCloseConfirm")
+ proto.RegisterType((*MsgChannelCloseConfirmResponse)(nil), "ibcgo.core.channel.v1.MsgChannelCloseConfirmResponse")
+ proto.RegisterType((*MsgRecvPacket)(nil), "ibcgo.core.channel.v1.MsgRecvPacket")
+ proto.RegisterType((*MsgRecvPacketResponse)(nil), "ibcgo.core.channel.v1.MsgRecvPacketResponse")
+ proto.RegisterType((*MsgTimeout)(nil), "ibcgo.core.channel.v1.MsgTimeout")
+ proto.RegisterType((*MsgTimeoutResponse)(nil), "ibcgo.core.channel.v1.MsgTimeoutResponse")
+ proto.RegisterType((*MsgTimeoutOnClose)(nil), "ibcgo.core.channel.v1.MsgTimeoutOnClose")
+ proto.RegisterType((*MsgTimeoutOnCloseResponse)(nil), "ibcgo.core.channel.v1.MsgTimeoutOnCloseResponse")
+ proto.RegisterType((*MsgAcknowledgement)(nil), "ibcgo.core.channel.v1.MsgAcknowledgement")
+ proto.RegisterType((*MsgAcknowledgementResponse)(nil), "ibcgo.core.channel.v1.MsgAcknowledgementResponse")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/channel/v1/tx.proto", fileDescriptor_4f707a6c6f551009) }
+
+var fileDescriptor_4f707a6c6f551009 = []byte{
+ // 1126 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6e, 0xe3, 0x54,
+ 0x14, 0xce, 0x5f, 0xd3, 0xf6, 0xb4, 0x4c, 0x5b, 0xa7, 0x3f, 0x19, 0x67, 0x6a, 0x77, 0x0c, 0x8b,
+ 0x0c, 0x4c, 0x93, 0x49, 0x29, 0x42, 0x1a, 0x24, 0xa4, 0xa4, 0x12, 0x9a, 0x11, 0x2a, 0x83, 0x4c,
+ 0x01, 0x69, 0x84, 0x14, 0xd2, 0x9b, 0x3b, 0xae, 0x95, 0xc4, 0x37, 0xd8, 0x4e, 0x68, 0xc4, 0x0b,
+ 0xb0, 0x64, 0xc1, 0x8a, 0x05, 0x1a, 0x89, 0x35, 0x0b, 0x24, 0x1e, 0x62, 0x96, 0xb3, 0xe3, 0x67,
+ 0x61, 0xa1, 0x76, 0xc3, 0xda, 0x4f, 0x80, 0x7c, 0x7d, 0xed, 0x38, 0x89, 0xdd, 0x3a, 0x1d, 0xd2,
+ 0xe9, 0xce, 0x3e, 0xe7, 0xbb, 0xe7, 0x9c, 0xfb, 0x7d, 0xc7, 0xc7, 0xd7, 0x06, 0x41, 0x3d, 0x46,
+ 0x0a, 0x29, 0x23, 0xa2, 0xe3, 0x32, 0x3a, 0x69, 0x68, 0x1a, 0x6e, 0x97, 0xfb, 0x95, 0xb2, 0x79,
+ 0x5a, 0xea, 0xea, 0xc4, 0x24, 0xdc, 0x06, 0xf5, 0x97, 0x1c, 0x7f, 0x89, 0xf9, 0x4b, 0xfd, 0x0a,
+ 0xbf, 0xae, 0x10, 0x85, 0x50, 0x44, 0xd9, 0xb9, 0x72, 0xc1, 0xfc, 0xdd, 0x60, 0xb0, 0xb6, 0x8a,
+ 0x35, 0xd3, 0x89, 0xe5, 0x5e, 0x31, 0xc8, 0x9b, 0xe1, 0xf9, 0xbc, 0xd0, 0x14, 0x24, 0xfd, 0x92,
+ 0x04, 0xee, 0xd0, 0x50, 0x0e, 0x5c, 0xe3, 0x93, 0x2e, 0xd6, 0x1e, 0x6b, 0xaa, 0xc9, 0xbd, 0x03,
+ 0xf3, 0x5d, 0xa2, 0x9b, 0x75, 0xb5, 0x99, 0x4f, 0xee, 0x24, 0x8b, 0x8b, 0x35, 0xce, 0xb6, 0xc4,
+ 0x5b, 0x83, 0x46, 0xa7, 0xfd, 0x50, 0x62, 0x0e, 0x49, 0xce, 0x3a, 0x57, 0x8f, 0x9b, 0xdc, 0x87,
+ 0x30, 0xcf, 0x82, 0xe6, 0x53, 0x3b, 0xc9, 0xe2, 0xd2, 0x9e, 0x50, 0x0a, 0xdd, 0x4a, 0x89, 0x65,
+ 0xa9, 0x65, 0x5e, 0x58, 0x62, 0x42, 0xf6, 0x16, 0x71, 0x9b, 0x90, 0x35, 0x54, 0x45, 0xc3, 0x7a,
+ 0x3e, 0xed, 0xe4, 0x92, 0xd9, 0xdd, 0xc3, 0x85, 0xef, 0x9f, 0x8b, 0x89, 0x7f, 0x9f, 0x8b, 0x09,
+ 0xe9, 0x0e, 0xf0, 0x93, 0x45, 0xca, 0xd8, 0xe8, 0x12, 0xcd, 0xc0, 0xd2, 0xdf, 0x69, 0x58, 0x1b,
+ 0x75, 0x1f, 0xe9, 0x83, 0xe9, 0xb6, 0xf0, 0x09, 0xe4, 0xba, 0x3a, 0xee, 0xab, 0xa4, 0x67, 0xd4,
+ 0x59, 0x59, 0xce, 0xc2, 0x14, 0x5d, 0x28, 0xd8, 0x96, 0xc8, 0xb3, 0x85, 0x93, 0x20, 0x49, 0x5e,
+ 0xf3, 0xac, 0xac, 0x82, 0x51, 0x4a, 0xd2, 0x57, 0xa1, 0x44, 0x86, 0x75, 0x44, 0x7a, 0x9a, 0x89,
+ 0xf5, 0x6e, 0x43, 0x37, 0x07, 0xf5, 0x3e, 0xd6, 0x0d, 0x95, 0x68, 0xf9, 0x0c, 0x2d, 0x48, 0xb4,
+ 0x2d, 0xb1, 0xe0, 0x16, 0x14, 0x86, 0x92, 0xe4, 0x5c, 0xd0, 0xfc, 0x85, 0x6b, 0xe5, 0xf6, 0x01,
+ 0xba, 0x3a, 0x21, 0xcf, 0xea, 0xaa, 0xa6, 0x9a, 0xf9, 0xb9, 0x9d, 0x64, 0x71, 0xb9, 0xb6, 0x61,
+ 0x5b, 0xe2, 0x9a, 0xb7, 0x35, 0xcf, 0x27, 0xc9, 0x8b, 0xf4, 0x86, 0x76, 0xc2, 0x57, 0xb0, 0xec,
+ 0x7a, 0x4e, 0xb0, 0xaa, 0x9c, 0x98, 0xf9, 0x2c, 0xdd, 0xce, 0x9d, 0x91, 0xed, 0xb8, 0x5d, 0xd7,
+ 0xaf, 0x94, 0x1e, 0x51, 0x4c, 0xad, 0xe0, 0x6c, 0xc6, 0xb6, 0xc4, 0x5c, 0x30, 0xb2, 0xbb, 0x5e,
+ 0x92, 0x97, 0xe8, 0xad, 0x8b, 0x0c, 0x48, 0x3f, 0x1f, 0x21, 0x7d, 0x01, 0x6e, 0x4f, 0x68, 0xeb,
+ 0x2b, 0xff, 0xd7, 0x84, 0xf2, 0x55, 0xd4, 0x9a, 0x4e, 0xf9, 0x7d, 0x80, 0x09, 0xc1, 0x03, 0xac,
+ 0x04, 0x75, 0x5e, 0x44, 0xbe, 0xbe, 0x4f, 0x61, 0x6b, 0x84, 0xf9, 0x40, 0x08, 0xda, 0xc3, 0x35,
+ 0xc9, 0xb6, 0x44, 0x21, 0x44, 0xa2, 0x60, 0xbc, 0x8d, 0xa0, 0x67, 0xd8, 0x3b, 0xb3, 0xd0, 0xbe,
+ 0x02, 0xae, 0xa4, 0x75, 0x53, 0x1f, 0x30, 0xe9, 0xd7, 0x6d, 0x4b, 0x5c, 0x0d, 0x0a, 0x64, 0xea,
+ 0x03, 0x49, 0x5e, 0xa0, 0xd7, 0xce, 0xf3, 0x73, 0xe3, 0x84, 0xaf, 0xa2, 0x96, 0x2f, 0xfc, 0xaf,
+ 0x29, 0xd8, 0x18, 0xf5, 0x1e, 0x10, 0xed, 0x99, 0xaa, 0x77, 0xae, 0x43, 0x7c, 0x9f, 0xcc, 0x06,
+ 0x6a, 0x51, 0xb9, 0x43, 0xc8, 0x6c, 0xa0, 0x96, 0x47, 0xa6, 0xd3, 0x92, 0xe3, 0x64, 0x66, 0x66,
+ 0x44, 0xe6, 0x5c, 0x04, 0x99, 0x22, 0x6c, 0x87, 0xd2, 0xe5, 0x13, 0xfa, 0x53, 0x12, 0x72, 0x43,
+ 0xc4, 0x41, 0x9b, 0x18, 0x78, 0xfa, 0x17, 0xc1, 0xd5, 0xe8, 0xbc, 0x7c, 0xfc, 0x6f, 0x43, 0x21,
+ 0xa4, 0x36, 0xbf, 0xf6, 0xdf, 0x52, 0xb0, 0x39, 0xe6, 0xbf, 0xc6, 0x6e, 0x18, 0x1d, 0xab, 0xe9,
+ 0x2b, 0x8e, 0xd5, 0xeb, 0x6e, 0x88, 0x1d, 0x10, 0xc2, 0x29, 0xf3, 0x59, 0xfd, 0x31, 0x05, 0x6f,
+ 0x1c, 0x1a, 0x8a, 0x8c, 0x51, 0xff, 0xd3, 0x06, 0x6a, 0x61, 0x93, 0xfb, 0x00, 0xb2, 0x5d, 0x7a,
+ 0x45, 0xb9, 0x5c, 0xda, 0xdb, 0x8e, 0x78, 0xa7, 0xb9, 0x70, 0xf6, 0x4a, 0x63, 0x4b, 0xb8, 0x8f,
+ 0x60, 0xd5, 0x2d, 0x18, 0x91, 0x4e, 0x47, 0x35, 0x3b, 0x58, 0x33, 0x29, 0xc5, 0xcb, 0xb5, 0x82,
+ 0x6d, 0x89, 0x5b, 0xc1, 0x2d, 0x0d, 0x11, 0x92, 0xbc, 0x42, 0x4d, 0x07, 0xbe, 0x65, 0x82, 0xb8,
+ 0xf4, 0x8c, 0x88, 0xcb, 0x44, 0x10, 0xb7, 0x45, 0x07, 0xcf, 0x90, 0x15, 0x9f, 0x2f, 0x2b, 0x05,
+ 0x70, 0x68, 0x28, 0x47, 0x6a, 0x07, 0x93, 0xde, 0xff, 0x45, 0x56, 0x4f, 0xd3, 0x31, 0xc2, 0x6a,
+ 0x1f, 0x37, 0xa3, 0xc8, 0x1a, 0x22, 0x3c, 0xb2, 0x3e, 0xf7, 0x2d, 0x33, 0x26, 0xeb, 0x63, 0xe0,
+ 0x34, 0x7c, 0x6a, 0xd6, 0x0d, 0xfc, 0x4d, 0x0f, 0x6b, 0x08, 0xd7, 0x75, 0x8c, 0xfa, 0x94, 0xb8,
+ 0x4c, 0x6d, 0xdb, 0xb6, 0xc4, 0xdb, 0x6e, 0x84, 0x49, 0x8c, 0x24, 0xaf, 0x3a, 0xc6, 0xcf, 0x98,
+ 0xcd, 0x21, 0x33, 0x46, 0xcb, 0xae, 0xd3, 0x93, 0x2a, 0xe3, 0xd7, 0xa7, 0xfd, 0x67, 0xf7, 0x08,
+ 0xc0, 0xcc, 0x4f, 0x34, 0xda, 0xcb, 0x37, 0x83, 0xfd, 0xf7, 0x61, 0x89, 0x35, 0xb4, 0x53, 0x13,
+ 0x1b, 0x0d, 0x9b, 0xb6, 0x25, 0x72, 0x23, 0xdd, 0xee, 0x38, 0x25, 0xd9, 0x1d, 0x22, 0x6e, 0xf5,
+ 0xb3, 0x1d, 0x0e, 0xe1, 0xb2, 0xcd, 0xbd, 0xaa, 0x6c, 0xd9, 0x0b, 0xdf, 0xe3, 0xa3, 0xfa, 0xf8,
+ 0xea, 0xfd, 0x9e, 0xa2, 0xa2, 0x56, 0x51, 0x4b, 0x23, 0xdf, 0xb6, 0x71, 0x53, 0xc1, 0xf4, 0x21,
+ 0x7f, 0x25, 0xf9, 0x8a, 0xb0, 0xd2, 0x18, 0x8d, 0xe7, 0xaa, 0x27, 0x8f, 0x9b, 0x87, 0x02, 0x39,
+ 0x0b, 0x9b, 0x51, 0x02, 0x51, 0xa7, 0x27, 0x50, 0xd5, 0xb9, 0x79, 0xed, 0xd3, 0xdb, 0xfd, 0x1e,
+ 0x1a, 0x63, 0xcd, 0x23, 0x75, 0xef, 0x8f, 0x05, 0x48, 0x1f, 0x1a, 0x0a, 0x47, 0x60, 0x65, 0xfc,
+ 0xbb, 0xee, 0x5e, 0x04, 0x91, 0x93, 0x5f, 0x57, 0x7c, 0x25, 0x36, 0xd4, 0x4b, 0xcc, 0xb5, 0xe1,
+ 0xd6, 0xd8, 0x47, 0x58, 0x31, 0x56, 0x90, 0x23, 0x7d, 0xc0, 0x3f, 0x88, 0x8b, 0x8c, 0xc8, 0xe6,
+ 0x9c, 0xb2, 0xe2, 0x65, 0xab, 0xa2, 0x56, 0xcc, 0x6c, 0x81, 0x13, 0x27, 0x77, 0x0a, 0x5c, 0xc8,
+ 0x69, 0xf3, 0x7e, 0xac, 0x38, 0x0c, 0xcd, 0xef, 0x4f, 0x83, 0xf6, 0x33, 0xeb, 0xb0, 0x3a, 0x71,
+ 0x2c, 0x7b, 0xfb, 0xd2, 0x48, 0x3e, 0x96, 0xdf, 0x8b, 0x8f, 0xf5, 0x73, 0x7e, 0x07, 0xb9, 0xb0,
+ 0xe3, 0xd4, 0x6e, 0xbc, 0x50, 0xde, 0x7e, 0xdf, 0x9b, 0x0a, 0xee, 0x27, 0xff, 0x1a, 0x20, 0x70,
+ 0xea, 0x78, 0x2b, 0x3a, 0xc8, 0x10, 0xc5, 0xdf, 0x8f, 0x83, 0xf2, 0x33, 0x7c, 0x09, 0xf3, 0xde,
+ 0x7b, 0xfa, 0x6e, 0xf4, 0x42, 0x06, 0xe1, 0xef, 0x5d, 0x0a, 0x09, 0xf6, 0xe4, 0xd8, 0x9b, 0xa8,
+ 0x78, 0xe9, 0x62, 0x86, 0xbc, 0xa8, 0x27, 0xc3, 0xa7, 0xa7, 0xf3, 0x80, 0x8f, 0x4f, 0xce, 0x0b,
+ 0x6a, 0x1d, 0x83, 0x5e, 0xf4, 0x80, 0x47, 0x4c, 0x96, 0xda, 0xa3, 0x17, 0x67, 0x42, 0xf2, 0xe5,
+ 0x99, 0x90, 0xfc, 0xe7, 0x4c, 0x48, 0xfe, 0x70, 0x2e, 0x24, 0x5e, 0x9e, 0x0b, 0x89, 0x3f, 0xcf,
+ 0x85, 0xc4, 0xd3, 0x92, 0xa2, 0x9a, 0x27, 0xbd, 0xe3, 0x12, 0x22, 0x9d, 0x32, 0x22, 0x46, 0x87,
+ 0x18, 0x65, 0xf5, 0x18, 0xed, 0x7a, 0xff, 0x9f, 0x1e, 0xec, 0xef, 0x7a, 0xbf, 0xa0, 0xcc, 0x41,
+ 0x17, 0x1b, 0xc7, 0x59, 0xfa, 0xfb, 0xe9, 0xdd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xfe,
+ 0x8d, 0x31, 0x15, 0x13, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MsgClient is the client API for Msg service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MsgClient interface {
+ // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit.
+ ChannelOpenInit(ctx context.Context, in *MsgChannelOpenInit, opts ...grpc.CallOption) (*MsgChannelOpenInitResponse, error)
+ // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry.
+ ChannelOpenTry(ctx context.Context, in *MsgChannelOpenTry, opts ...grpc.CallOption) (*MsgChannelOpenTryResponse, error)
+ // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck.
+ ChannelOpenAck(ctx context.Context, in *MsgChannelOpenAck, opts ...grpc.CallOption) (*MsgChannelOpenAckResponse, error)
+ // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.
+ ChannelOpenConfirm(ctx context.Context, in *MsgChannelOpenConfirm, opts ...grpc.CallOption) (*MsgChannelOpenConfirmResponse, error)
+ // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.
+ ChannelCloseInit(ctx context.Context, in *MsgChannelCloseInit, opts ...grpc.CallOption) (*MsgChannelCloseInitResponse, error)
+ // ChannelCloseConfirm defines a rpc handler method for
+ // MsgChannelCloseConfirm.
+ ChannelCloseConfirm(ctx context.Context, in *MsgChannelCloseConfirm, opts ...grpc.CallOption) (*MsgChannelCloseConfirmResponse, error)
+ // RecvPacket defines a rpc handler method for MsgRecvPacket.
+ RecvPacket(ctx context.Context, in *MsgRecvPacket, opts ...grpc.CallOption) (*MsgRecvPacketResponse, error)
+ // Timeout defines a rpc handler method for MsgTimeout.
+ Timeout(ctx context.Context, in *MsgTimeout, opts ...grpc.CallOption) (*MsgTimeoutResponse, error)
+ // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose.
+ TimeoutOnClose(ctx context.Context, in *MsgTimeoutOnClose, opts ...grpc.CallOption) (*MsgTimeoutOnCloseResponse, error)
+ // Acknowledgement defines a rpc handler method for MsgAcknowledgement.
+ Acknowledgement(ctx context.Context, in *MsgAcknowledgement, opts ...grpc.CallOption) (*MsgAcknowledgementResponse, error)
+}
+
+type msgClient struct {
+ cc grpc1.ClientConn
+}
+
+func NewMsgClient(cc grpc1.ClientConn) MsgClient {
+ return &msgClient{cc}
+}
+
+func (c *msgClient) ChannelOpenInit(ctx context.Context, in *MsgChannelOpenInit, opts ...grpc.CallOption) (*MsgChannelOpenInitResponse, error) {
+ out := new(MsgChannelOpenInitResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenInit", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ChannelOpenTry(ctx context.Context, in *MsgChannelOpenTry, opts ...grpc.CallOption) (*MsgChannelOpenTryResponse, error) {
+ out := new(MsgChannelOpenTryResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenTry", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ChannelOpenAck(ctx context.Context, in *MsgChannelOpenAck, opts ...grpc.CallOption) (*MsgChannelOpenAckResponse, error) {
+ out := new(MsgChannelOpenAckResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenAck", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ChannelOpenConfirm(ctx context.Context, in *MsgChannelOpenConfirm, opts ...grpc.CallOption) (*MsgChannelOpenConfirmResponse, error) {
+ out := new(MsgChannelOpenConfirmResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenConfirm", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ChannelCloseInit(ctx context.Context, in *MsgChannelCloseInit, opts ...grpc.CallOption) (*MsgChannelCloseInitResponse, error) {
+ out := new(MsgChannelCloseInitResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelCloseInit", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) ChannelCloseConfirm(ctx context.Context, in *MsgChannelCloseConfirm, opts ...grpc.CallOption) (*MsgChannelCloseConfirmResponse, error) {
+ out := new(MsgChannelCloseConfirmResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelCloseConfirm", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) RecvPacket(ctx context.Context, in *MsgRecvPacket, opts ...grpc.CallOption) (*MsgRecvPacketResponse, error) {
+ out := new(MsgRecvPacketResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/RecvPacket", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) Timeout(ctx context.Context, in *MsgTimeout, opts ...grpc.CallOption) (*MsgTimeoutResponse, error) {
+ out := new(MsgTimeoutResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/Timeout", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) TimeoutOnClose(ctx context.Context, in *MsgTimeoutOnClose, opts ...grpc.CallOption) (*MsgTimeoutOnCloseResponse, error) {
+ out := new(MsgTimeoutOnCloseResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/TimeoutOnClose", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *msgClient) Acknowledgement(ctx context.Context, in *MsgAcknowledgement, opts ...grpc.CallOption) (*MsgAcknowledgementResponse, error) {
+ out := new(MsgAcknowledgementResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/Acknowledgement", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MsgServer is the server API for Msg service.
+type MsgServer interface {
+ // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit.
+ ChannelOpenInit(context.Context, *MsgChannelOpenInit) (*MsgChannelOpenInitResponse, error)
+ // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry.
+ ChannelOpenTry(context.Context, *MsgChannelOpenTry) (*MsgChannelOpenTryResponse, error)
+ // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck.
+ ChannelOpenAck(context.Context, *MsgChannelOpenAck) (*MsgChannelOpenAckResponse, error)
+ // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.
+ ChannelOpenConfirm(context.Context, *MsgChannelOpenConfirm) (*MsgChannelOpenConfirmResponse, error)
+ // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.
+ ChannelCloseInit(context.Context, *MsgChannelCloseInit) (*MsgChannelCloseInitResponse, error)
+ // ChannelCloseConfirm defines a rpc handler method for
+ // MsgChannelCloseConfirm.
+ ChannelCloseConfirm(context.Context, *MsgChannelCloseConfirm) (*MsgChannelCloseConfirmResponse, error)
+ // RecvPacket defines a rpc handler method for MsgRecvPacket.
+ RecvPacket(context.Context, *MsgRecvPacket) (*MsgRecvPacketResponse, error)
+ // Timeout defines a rpc handler method for MsgTimeout.
+ Timeout(context.Context, *MsgTimeout) (*MsgTimeoutResponse, error)
+ // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose.
+ TimeoutOnClose(context.Context, *MsgTimeoutOnClose) (*MsgTimeoutOnCloseResponse, error)
+ // Acknowledgement defines a rpc handler method for MsgAcknowledgement.
+ Acknowledgement(context.Context, *MsgAcknowledgement) (*MsgAcknowledgementResponse, error)
+}
+
+// UnimplementedMsgServer can be embedded to have forward compatible implementations.
+type UnimplementedMsgServer struct {
+}
+
+func (*UnimplementedMsgServer) ChannelOpenInit(ctx context.Context, req *MsgChannelOpenInit) (*MsgChannelOpenInitResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenInit not implemented")
+}
+func (*UnimplementedMsgServer) ChannelOpenTry(ctx context.Context, req *MsgChannelOpenTry) (*MsgChannelOpenTryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenTry not implemented")
+}
+func (*UnimplementedMsgServer) ChannelOpenAck(ctx context.Context, req *MsgChannelOpenAck) (*MsgChannelOpenAckResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenAck not implemented")
+}
+func (*UnimplementedMsgServer) ChannelOpenConfirm(ctx context.Context, req *MsgChannelOpenConfirm) (*MsgChannelOpenConfirmResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenConfirm not implemented")
+}
+func (*UnimplementedMsgServer) ChannelCloseInit(ctx context.Context, req *MsgChannelCloseInit) (*MsgChannelCloseInitResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelCloseInit not implemented")
+}
+func (*UnimplementedMsgServer) ChannelCloseConfirm(ctx context.Context, req *MsgChannelCloseConfirm) (*MsgChannelCloseConfirmResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ChannelCloseConfirm not implemented")
+}
+func (*UnimplementedMsgServer) RecvPacket(ctx context.Context, req *MsgRecvPacket) (*MsgRecvPacketResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RecvPacket not implemented")
+}
+func (*UnimplementedMsgServer) Timeout(ctx context.Context, req *MsgTimeout) (*MsgTimeoutResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Timeout not implemented")
+}
+func (*UnimplementedMsgServer) TimeoutOnClose(ctx context.Context, req *MsgTimeoutOnClose) (*MsgTimeoutOnCloseResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method TimeoutOnClose not implemented")
+}
+func (*UnimplementedMsgServer) Acknowledgement(ctx context.Context, req *MsgAcknowledgement) (*MsgAcknowledgementResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Acknowledgement not implemented")
+}
+
+func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
+ s.RegisterService(&_Msg_serviceDesc, srv)
+}
+
+func _Msg_ChannelOpenInit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgChannelOpenInit)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ChannelOpenInit(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenInit",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ChannelOpenInit(ctx, req.(*MsgChannelOpenInit))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ChannelOpenTry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgChannelOpenTry)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ChannelOpenTry(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenTry",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ChannelOpenTry(ctx, req.(*MsgChannelOpenTry))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ChannelOpenAck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgChannelOpenAck)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ChannelOpenAck(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenAck",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ChannelOpenAck(ctx, req.(*MsgChannelOpenAck))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ChannelOpenConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgChannelOpenConfirm)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ChannelOpenConfirm(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenConfirm",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ChannelOpenConfirm(ctx, req.(*MsgChannelOpenConfirm))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ChannelCloseInit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgChannelCloseInit)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ChannelCloseInit(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelCloseInit",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ChannelCloseInit(ctx, req.(*MsgChannelCloseInit))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_ChannelCloseConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgChannelCloseConfirm)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).ChannelCloseConfirm(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelCloseConfirm",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).ChannelCloseConfirm(ctx, req.(*MsgChannelCloseConfirm))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_RecvPacket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgRecvPacket)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).RecvPacket(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/RecvPacket",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).RecvPacket(ctx, req.(*MsgRecvPacket))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_Timeout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgTimeout)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).Timeout(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/Timeout",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).Timeout(ctx, req.(*MsgTimeout))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_TimeoutOnClose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgTimeoutOnClose)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).TimeoutOnClose(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/TimeoutOnClose",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).TimeoutOnClose(ctx, req.(*MsgTimeoutOnClose))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Msg_Acknowledgement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MsgAcknowledgement)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MsgServer).Acknowledgement(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.channel.v1.Msg/Acknowledgement",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MsgServer).Acknowledgement(ctx, req.(*MsgAcknowledgement))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Msg_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "ibcgo.core.channel.v1.Msg",
+ HandlerType: (*MsgServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ChannelOpenInit",
+ Handler: _Msg_ChannelOpenInit_Handler,
+ },
+ {
+ MethodName: "ChannelOpenTry",
+ Handler: _Msg_ChannelOpenTry_Handler,
+ },
+ {
+ MethodName: "ChannelOpenAck",
+ Handler: _Msg_ChannelOpenAck_Handler,
+ },
+ {
+ MethodName: "ChannelOpenConfirm",
+ Handler: _Msg_ChannelOpenConfirm_Handler,
+ },
+ {
+ MethodName: "ChannelCloseInit",
+ Handler: _Msg_ChannelCloseInit_Handler,
+ },
+ {
+ MethodName: "ChannelCloseConfirm",
+ Handler: _Msg_ChannelCloseConfirm_Handler,
+ },
+ {
+ MethodName: "RecvPacket",
+ Handler: _Msg_RecvPacket_Handler,
+ },
+ {
+ MethodName: "Timeout",
+ Handler: _Msg_Timeout_Handler,
+ },
+ {
+ MethodName: "TimeoutOnClose",
+ Handler: _Msg_TimeoutOnClose_Handler,
+ },
+ {
+ MethodName: "Acknowledgement",
+ Handler: _Msg_Acknowledgement_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ibcgo/core/channel/v1/tx.proto",
+}
+
+func (m *MsgChannelOpenInit) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenInit) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenInit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenInitResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenInitResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenInitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenTry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenTry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenTry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if len(m.ProofInit) > 0 {
+ i -= len(m.ProofInit)
+ copy(dAtA[i:], m.ProofInit)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofInit)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.CounterpartyVersion) > 0 {
+ i -= len(m.CounterpartyVersion)
+ copy(dAtA[i:], m.CounterpartyVersion)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyVersion)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.PreviousChannelId) > 0 {
+ i -= len(m.PreviousChannelId)
+ copy(dAtA[i:], m.PreviousChannelId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PreviousChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenTryResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenTryResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenTryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenAck) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenAck) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenAck) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ if len(m.ProofTry) > 0 {
+ i -= len(m.ProofTry)
+ copy(dAtA[i:], m.ProofTry)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofTry)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.CounterpartyVersion) > 0 {
+ i -= len(m.CounterpartyVersion)
+ copy(dAtA[i:], m.CounterpartyVersion)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyVersion)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.CounterpartyChannelId) > 0 {
+ i -= len(m.CounterpartyChannelId)
+ copy(dAtA[i:], m.CounterpartyChannelId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyChannelId)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenAckResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenAckResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenAckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenConfirm) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenConfirm) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.ProofAck) > 0 {
+ i -= len(m.ProofAck)
+ copy(dAtA[i:], m.ProofAck)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofAck)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelOpenConfirmResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelOpenConfirmResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelOpenConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelCloseInit) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelCloseInit) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelCloseInit) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelCloseInitResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelCloseInitResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelCloseInitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelCloseConfirm) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelCloseConfirm) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelCloseConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.ProofInit) > 0 {
+ i -= len(m.ProofInit)
+ copy(dAtA[i:], m.ProofInit)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofInit)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ChannelId) > 0 {
+ i -= len(m.ChannelId)
+ copy(dAtA[i:], m.ChannelId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.PortId) > 0 {
+ i -= len(m.PortId)
+ copy(dAtA[i:], m.PortId)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.PortId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgChannelCloseConfirmResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgChannelCloseConfirmResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgChannelCloseConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgRecvPacket) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgRecvPacket) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgRecvPacket) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.ProofCommitment) > 0 {
+ i -= len(m.ProofCommitment)
+ copy(dAtA[i:], m.ProofCommitment)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofCommitment)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgRecvPacketResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgRecvPacketResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgRecvPacketResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgTimeout) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTimeout) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTimeout) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NextSequenceRecv != 0 {
+ i = encodeVarintTx(dAtA, i, uint64(m.NextSequenceRecv))
+ i--
+ dAtA[i] = 0x20
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.ProofUnreceived) > 0 {
+ i -= len(m.ProofUnreceived)
+ copy(dAtA[i:], m.ProofUnreceived)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUnreceived)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgTimeoutResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTimeoutResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTimeoutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgTimeoutOnClose) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTimeoutOnClose) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTimeoutOnClose) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.NextSequenceRecv != 0 {
+ i = encodeVarintTx(dAtA, i, uint64(m.NextSequenceRecv))
+ i--
+ dAtA[i] = 0x28
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.ProofClose) > 0 {
+ i -= len(m.ProofClose)
+ copy(dAtA[i:], m.ProofClose)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofClose)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ProofUnreceived) > 0 {
+ i -= len(m.ProofUnreceived)
+ copy(dAtA[i:], m.ProofUnreceived)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUnreceived)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgTimeoutOnCloseResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgTimeoutOnCloseResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgTimeoutOnCloseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgAcknowledgement) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgAcknowledgement) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgAcknowledgement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Signer) > 0 {
+ i -= len(m.Signer)
+ copy(dAtA[i:], m.Signer)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Signer)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ {
+ size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ if len(m.ProofAcked) > 0 {
+ i -= len(m.ProofAcked)
+ copy(dAtA[i:], m.ProofAcked)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.ProofAcked)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Acknowledgement) > 0 {
+ i -= len(m.Acknowledgement)
+ copy(dAtA[i:], m.Acknowledgement)
+ i = encodeVarintTx(dAtA, i, uint64(len(m.Acknowledgement)))
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTx(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *MsgAcknowledgementResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MsgAcknowledgementResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MsgAcknowledgementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTx(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MsgChannelOpenInit) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Channel.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgChannelOpenInitResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgChannelOpenTry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.PreviousChannelId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.Channel.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.CounterpartyVersion)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofInit)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgChannelOpenTryResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgChannelOpenAck) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.CounterpartyChannelId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.CounterpartyVersion)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofTry)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgChannelOpenAckResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgChannelOpenConfirm) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofAck)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgChannelOpenConfirmResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgChannelCloseInit) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgChannelCloseInitResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgChannelCloseConfirm) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PortId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ChannelId)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofInit)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgChannelCloseConfirmResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgRecvPacket) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Packet.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.ProofCommitment)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgRecvPacketResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgTimeout) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Packet.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.ProofUnreceived)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ if m.NextSequenceRecv != 0 {
+ n += 1 + sovTx(uint64(m.NextSequenceRecv))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgTimeoutResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgTimeoutOnClose) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Packet.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.ProofUnreceived)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofClose)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ if m.NextSequenceRecv != 0 {
+ n += 1 + sovTx(uint64(m.NextSequenceRecv))
+ }
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgTimeoutOnCloseResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MsgAcknowledgement) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Packet.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Acknowledgement)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = len(m.ProofAcked)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ l = m.ProofHeight.Size()
+ n += 1 + l + sovTx(uint64(l))
+ l = len(m.Signer)
+ if l > 0 {
+ n += 1 + l + sovTx(uint64(l))
+ }
+ return n
+}
+
+func (m *MsgAcknowledgementResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func sovTx(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTx(x uint64) (n int) {
+ return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MsgChannelOpenInit) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenInit: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenInit: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenInitResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenInitResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenInitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenTry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenTry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenTry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreviousChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PreviousChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CounterpartyVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofInit", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofInit = append(m.ProofInit[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofInit == nil {
+ m.ProofInit = []byte{}
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenTryResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenTryResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenTryResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenAck) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenAck: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenAck: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CounterpartyChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CounterpartyVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofTry", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofTry = append(m.ProofTry[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofTry == nil {
+ m.ProofTry = []byte{}
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenAckResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenAckResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenAckResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenConfirm) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenConfirm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenConfirm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofAck", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofAck = append(m.ProofAck[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofAck == nil {
+ m.ProofAck = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelOpenConfirmResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelOpenConfirmResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelOpenConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelCloseInit) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelCloseInit: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelCloseInit: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelCloseInitResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelCloseInitResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelCloseInitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelCloseConfirm) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelCloseConfirm: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelCloseConfirm: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PortId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChannelId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofInit", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofInit = append(m.ProofInit[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofInit == nil {
+ m.ProofInit = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgChannelCloseConfirmResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgChannelCloseConfirmResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgChannelCloseConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgRecvPacket) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgRecvPacket: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgRecvPacket: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofCommitment", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofCommitment = append(m.ProofCommitment[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofCommitment == nil {
+ m.ProofCommitment = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgRecvPacketResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgRecvPacketResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgRecvPacketResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgTimeout) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTimeout: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTimeout: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofUnreceived", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofUnreceived = append(m.ProofUnreceived[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofUnreceived == nil {
+ m.ProofUnreceived = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextSequenceRecv", wireType)
+ }
+ m.NextSequenceRecv = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextSequenceRecv |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgTimeoutResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTimeoutResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTimeoutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgTimeoutOnClose) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTimeoutOnClose: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTimeoutOnClose: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofUnreceived", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofUnreceived = append(m.ProofUnreceived[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofUnreceived == nil {
+ m.ProofUnreceived = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofClose", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofClose = append(m.ProofClose[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofClose == nil {
+ m.ProofClose = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextSequenceRecv", wireType)
+ }
+ m.NextSequenceRecv = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextSequenceRecv |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgTimeoutOnCloseResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgTimeoutOnCloseResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgTimeoutOnCloseResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgAcknowledgement) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgAcknowledgement: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgAcknowledgement: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...)
+ if m.Acknowledgement == nil {
+ m.Acknowledgement = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofAcked", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofAcked = append(m.ProofAcked[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofAcked == nil {
+ m.ProofAcked = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTx
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTx
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signer = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MsgAcknowledgementResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MsgAcknowledgementResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MsgAcknowledgementResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTx(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTx
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTx(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTx
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTx
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTx
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/05-port/keeper/keeper.go b/core/05-port/keeper/keeper.go
new file mode 100644
index 00000000..8a4b2300
--- /dev/null
+++ b/core/05-port/keeper/keeper.go
@@ -0,0 +1,80 @@
+package keeper
+
+import (
+ "fmt"
+
+ "github.com/tendermint/tendermint/libs/log"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// Keeper defines the IBC connection keeper
+type Keeper struct {
+ scopedKeeper capabilitykeeper.ScopedKeeper
+}
+
+// NewKeeper creates a new IBC connection Keeper instance
+func NewKeeper(sck capabilitykeeper.ScopedKeeper) Keeper {
+ return Keeper{
+ scopedKeeper: sck,
+ }
+}
+
+// Logger returns a module-specific logger.
+func (k Keeper) Logger(ctx sdk.Context) log.Logger {
+ return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
+}
+
+// isBounded checks a given port ID is already bounded.
+func (k Keeper) isBound(ctx sdk.Context, portID string) bool {
+ _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID))
+ return ok
+}
+
+// BindPort binds to a port and returns the associated capability.
+// Ports must be bound statically when the chain starts in `app.go`.
+// The capability must then be passed to a module which will need to pass
+// it as an extra parameter when calling functions on the IBC module.
+func (k *Keeper) BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability {
+ if err := host.PortIdentifierValidator(portID); err != nil {
+ panic(err.Error())
+ }
+
+ if k.isBound(ctx, portID) {
+ panic(fmt.Sprintf("port %s is already bound", portID))
+ }
+
+ key, err := k.scopedKeeper.NewCapability(ctx, host.PortPath(portID))
+ if err != nil {
+ panic(err.Error())
+ }
+
+ k.Logger(ctx).Info("port binded", "port", portID)
+ return key
+}
+
+// Authenticate authenticates a capability key against a port ID
+// by checking if the memory address of the capability was previously
+// generated and bound to the port (provided as a parameter) which the capability
+// is being authenticated against.
+func (k Keeper) Authenticate(ctx sdk.Context, key *capabilitytypes.Capability, portID string) bool {
+ if err := host.PortIdentifierValidator(portID); err != nil {
+ panic(err.Error())
+ }
+
+ return k.scopedKeeper.AuthenticateCapability(ctx, key, host.PortPath(portID))
+}
+
+// LookupModuleByPort will return the IBCModule along with the capability associated with a given portID
+func (k Keeper) LookupModuleByPort(ctx sdk.Context, portID string) (string, *capabilitytypes.Capability, error) {
+ modules, cap, err := k.scopedKeeper.LookupModules(ctx, host.PortPath(portID))
+ if err != nil {
+ return "", nil, err
+ }
+
+ return types.GetModuleOwner(modules), cap, nil
+}
diff --git a/core/05-port/keeper/keeper_test.go b/core/05-port/keeper/keeper_test.go
new file mode 100644
index 00000000..29c0e158
--- /dev/null
+++ b/core/05-port/keeper/keeper_test.go
@@ -0,0 +1,70 @@
+package keeper_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/keeper"
+)
+
+var (
+ validPort = "validportid"
+ invalidPort = "(invalidPortID)"
+)
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ ctx sdk.Context
+ keeper *keeper.Keeper
+}
+
+func (suite *KeeperTestSuite) SetupTest() {
+ isCheckTx := false
+ app := simapp.Setup(isCheckTx)
+
+ suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{})
+ suite.keeper = &app.IBCKeeper.PortKeeper
+}
+
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+
+func (suite *KeeperTestSuite) TestBind() {
+ // Test that invalid portID causes panic
+ require.Panics(suite.T(), func() { suite.keeper.BindPort(suite.ctx, invalidPort) }, "Did not panic on invalid portID")
+
+ // Test that valid BindPort returns capability key
+ capKey := suite.keeper.BindPort(suite.ctx, validPort)
+ require.NotNil(suite.T(), capKey, "capabilityKey is nil on valid BindPort")
+
+ // Test that rebinding the same portid causes panic
+ require.Panics(suite.T(), func() { suite.keeper.BindPort(suite.ctx, validPort) }, "did not panic on re-binding the same port")
+}
+
+func (suite *KeeperTestSuite) TestAuthenticate() {
+ capKey := suite.keeper.BindPort(suite.ctx, validPort)
+
+ // Require that passing in invalid portID causes panic
+ require.Panics(suite.T(), func() { suite.keeper.Authenticate(suite.ctx, capKey, invalidPort) }, "did not panic on invalid portID")
+
+ // Valid authentication should return true
+ auth := suite.keeper.Authenticate(suite.ctx, capKey, validPort)
+ require.True(suite.T(), auth, "valid authentication failed")
+
+ // Test that authenticating against incorrect portid fails
+ auth = suite.keeper.Authenticate(suite.ctx, capKey, "wrongportid")
+ require.False(suite.T(), auth, "invalid authentication failed")
+
+ // Test that authenticating port against different valid
+ // capability key fails
+ capKey2 := suite.keeper.BindPort(suite.ctx, "otherportid")
+ auth = suite.keeper.Authenticate(suite.ctx, capKey2, validPort)
+ require.False(suite.T(), auth, "invalid authentication for different capKey failed")
+}
diff --git a/core/05-port/types/errors.go b/core/05-port/types/errors.go
new file mode 100644
index 00000000..23a2776f
--- /dev/null
+++ b/core/05-port/types/errors.go
@@ -0,0 +1,13 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// IBC port sentinel errors
+var (
+ ErrPortExists = sdkerrors.Register(SubModuleName, 2, "port is already binded")
+ ErrPortNotFound = sdkerrors.Register(SubModuleName, 3, "port not found")
+ ErrInvalidPort = sdkerrors.Register(SubModuleName, 4, "invalid port")
+ ErrInvalidRoute = sdkerrors.Register(SubModuleName, 5, "route not found")
+)
diff --git a/core/05-port/types/keys.go b/core/05-port/types/keys.go
new file mode 100644
index 00000000..6e79bb53
--- /dev/null
+++ b/core/05-port/types/keys.go
@@ -0,0 +1,15 @@
+package types
+
+const (
+ // SubModuleName defines the IBC port name
+ SubModuleName = "port"
+
+ // StoreKey is the store key string for IBC ports
+ StoreKey = SubModuleName
+
+ // RouterKey is the message route for IBC ports
+ RouterKey = SubModuleName
+
+ // QuerierRoute is the querier route for IBC ports
+ QuerierRoute = SubModuleName
+)
diff --git a/core/05-port/types/module.go b/core/05-port/types/module.go
new file mode 100644
index 00000000..4c686732
--- /dev/null
+++ b/core/05-port/types/module.go
@@ -0,0 +1,78 @@
+package types
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// IBCModule defines an interface that implements all the callbacks
+// that modules must define as specified in ICS-26
+type IBCModule interface {
+ OnChanOpenInit(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID string,
+ channelID string,
+ channelCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version string,
+ ) error
+
+ OnChanOpenTry(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID,
+ channelID string,
+ channelCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version,
+ counterpartyVersion string,
+ ) error
+
+ OnChanOpenAck(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ counterpartyVersion string,
+ ) error
+
+ OnChanOpenConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ ) error
+
+ OnChanCloseInit(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ ) error
+
+ OnChanCloseConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ ) error
+
+ // OnRecvPacket must return the acknowledgement bytes
+ // In the case of an asynchronous acknowledgement, nil should be returned.
+ OnRecvPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ ) (*sdk.Result, []byte, error)
+
+ OnAcknowledgementPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ acknowledgement []byte,
+ ) (*sdk.Result, error)
+
+ OnTimeoutPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ ) (*sdk.Result, error)
+}
diff --git a/core/05-port/types/router.go b/core/05-port/types/router.go
new file mode 100644
index 00000000..6bfba907
--- /dev/null
+++ b/core/05-port/types/router.go
@@ -0,0 +1,65 @@
+package types
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+// The router is a map from module name to the IBCModule
+// which contains all the module-defined callbacks required by ICS-26
+type Router struct {
+ routes map[string]IBCModule
+ sealed bool
+}
+
+func NewRouter() *Router {
+ return &Router{
+ routes: make(map[string]IBCModule),
+ }
+}
+
+// Seal prevents the Router from any subsequent route handlers to be registered.
+// Seal will panic if called more than once.
+func (rtr *Router) Seal() {
+ if rtr.sealed {
+ panic("router already sealed")
+ }
+ rtr.sealed = true
+}
+
+// Sealed returns a boolean signifying if the Router is sealed or not.
+func (rtr Router) Sealed() bool {
+ return rtr.sealed
+}
+
+// AddRoute adds IBCModule for a given module name. It returns the Router
+// so AddRoute calls can be linked. It will panic if the Router is sealed.
+func (rtr *Router) AddRoute(module string, cbs IBCModule) *Router {
+ if rtr.sealed {
+ panic(fmt.Sprintf("router sealed; cannot register %s route callbacks", module))
+ }
+ if !sdk.IsAlphaNumeric(module) {
+ panic("route expressions can only contain alphanumeric characters")
+ }
+ if rtr.HasRoute(module) {
+ panic(fmt.Sprintf("route %s has already been registered", module))
+ }
+
+ rtr.routes[module] = cbs
+ return rtr
+}
+
+// HasRoute returns true if the Router has a module registered or false otherwise.
+func (rtr *Router) HasRoute(module string) bool {
+ _, ok := rtr.routes[module]
+ return ok
+}
+
+// GetRoute returns a IBCModule for a given module.
+func (rtr *Router) GetRoute(module string) (IBCModule, bool) {
+ if !rtr.HasRoute(module) {
+ return nil, false
+ }
+ return rtr.routes[module], true
+}
diff --git a/core/05-port/types/utils.go b/core/05-port/types/utils.go
new file mode 100644
index 00000000..a12f2ef7
--- /dev/null
+++ b/core/05-port/types/utils.go
@@ -0,0 +1,17 @@
+package types
+
+import "fmt"
+
+// GetModuleOwner enforces that only IBC and the module bound to port can own the capability
+// while future implementations may allow multiple modules to bind to a port, currently we
+// only allow one module to be bound to a port at any given time
+func GetModuleOwner(modules []string) string {
+ if len(modules) != 2 {
+ panic(fmt.Sprintf("capability should only be owned by port or channel owner and ibc module, multiple owners currently not supported, owners: %v", modules))
+ }
+
+ if modules[0] == "ibc" {
+ return modules[1]
+ }
+ return modules[0]
+}
diff --git a/core/23-commitment/types/bench_test.go b/core/23-commitment/types/bench_test.go
new file mode 100644
index 00000000..83794fc6
--- /dev/null
+++ b/core/23-commitment/types/bench_test.go
@@ -0,0 +1,15 @@
+package types
+
+import (
+ "testing"
+)
+
+func BenchmarkMerkleProofEmpty(b *testing.B) {
+ b.ReportAllocs()
+ var mk MerkleProof
+ for i := 0; i < b.N; i++ {
+ if !mk.Empty() {
+ b.Fatal("supposed to be empty")
+ }
+ }
+}
diff --git a/core/23-commitment/types/codec.go b/core/23-commitment/types/codec.go
new file mode 100644
index 00000000..1195c7c2
--- /dev/null
+++ b/core/23-commitment/types/codec.go
@@ -0,0 +1,43 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces registers the commitment interfaces to protobuf Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterInterface(
+ "ibc.core.commitment.v1.Root",
+ (*exported.Root)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.commitment.v1.Prefix",
+ (*exported.Prefix)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.commitment.v1.Path",
+ (*exported.Path)(nil),
+ )
+ registry.RegisterInterface(
+ "ibc.core.commitment.v1.Proof",
+ (*exported.Proof)(nil),
+ )
+
+ registry.RegisterImplementations(
+ (*exported.Root)(nil),
+ &MerkleRoot{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Prefix)(nil),
+ &MerklePrefix{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Path)(nil),
+ &MerklePath{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Proof)(nil),
+ &MerkleProof{},
+ )
+}
diff --git a/core/23-commitment/types/commitment.pb.go b/core/23-commitment/types/commitment.pb.go
new file mode 100644
index 00000000..ac4201c4
--- /dev/null
+++ b/core/23-commitment/types/commitment.pb.go
@@ -0,0 +1,863 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/commitment/v1/commitment.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _go "github.com/confio/ics23/go"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// MerkleRoot defines a merkle root hash.
+// In the Cosmos SDK, the AppHash of a block header becomes the root.
+type MerkleRoot struct {
+ Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (m *MerkleRoot) Reset() { *m = MerkleRoot{} }
+func (m *MerkleRoot) String() string { return proto.CompactTextString(m) }
+func (*MerkleRoot) ProtoMessage() {}
+func (*MerkleRoot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eb23d5444771a147, []int{0}
+}
+func (m *MerkleRoot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MerkleRoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MerkleRoot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MerkleRoot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MerkleRoot.Merge(m, src)
+}
+func (m *MerkleRoot) XXX_Size() int {
+ return m.Size()
+}
+func (m *MerkleRoot) XXX_DiscardUnknown() {
+ xxx_messageInfo_MerkleRoot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MerkleRoot proto.InternalMessageInfo
+
+// MerklePrefix is merkle path prefixed to the key.
+// The constructed key from the Path and the key will be append(Path.KeyPath,
+// append(Path.KeyPrefix, key...))
+type MerklePrefix struct {
+ KeyPrefix []byte `protobuf:"bytes,1,opt,name=key_prefix,json=keyPrefix,proto3" json:"key_prefix,omitempty" yaml:"key_prefix"`
+}
+
+func (m *MerklePrefix) Reset() { *m = MerklePrefix{} }
+func (m *MerklePrefix) String() string { return proto.CompactTextString(m) }
+func (*MerklePrefix) ProtoMessage() {}
+func (*MerklePrefix) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eb23d5444771a147, []int{1}
+}
+func (m *MerklePrefix) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MerklePrefix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MerklePrefix.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MerklePrefix) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MerklePrefix.Merge(m, src)
+}
+func (m *MerklePrefix) XXX_Size() int {
+ return m.Size()
+}
+func (m *MerklePrefix) XXX_DiscardUnknown() {
+ xxx_messageInfo_MerklePrefix.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MerklePrefix proto.InternalMessageInfo
+
+func (m *MerklePrefix) GetKeyPrefix() []byte {
+ if m != nil {
+ return m.KeyPrefix
+ }
+ return nil
+}
+
+// MerklePath is the path used to verify commitment proofs, which can be an
+// arbitrary structured object (defined by a commitment type).
+// MerklePath is represented from root-to-leaf
+type MerklePath struct {
+ KeyPath []string `protobuf:"bytes,1,rep,name=key_path,json=keyPath,proto3" json:"key_path,omitempty" yaml:"key_path"`
+}
+
+func (m *MerklePath) Reset() { *m = MerklePath{} }
+func (*MerklePath) ProtoMessage() {}
+func (*MerklePath) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eb23d5444771a147, []int{2}
+}
+func (m *MerklePath) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MerklePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MerklePath.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MerklePath) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MerklePath.Merge(m, src)
+}
+func (m *MerklePath) XXX_Size() int {
+ return m.Size()
+}
+func (m *MerklePath) XXX_DiscardUnknown() {
+ xxx_messageInfo_MerklePath.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MerklePath proto.InternalMessageInfo
+
+func (m *MerklePath) GetKeyPath() []string {
+ if m != nil {
+ return m.KeyPath
+ }
+ return nil
+}
+
+// MerkleProof is a wrapper type over a chain of CommitmentProofs.
+// It demonstrates membership or non-membership for an element or set of
+// elements, verifiable in conjunction with a known commitment root. Proofs
+// should be succinct.
+// MerkleProofs are ordered from leaf-to-root
+type MerkleProof struct {
+ Proofs []*_go.CommitmentProof `protobuf:"bytes,1,rep,name=proofs,proto3" json:"proofs,omitempty"`
+}
+
+func (m *MerkleProof) Reset() { *m = MerkleProof{} }
+func (m *MerkleProof) String() string { return proto.CompactTextString(m) }
+func (*MerkleProof) ProtoMessage() {}
+func (*MerkleProof) Descriptor() ([]byte, []int) {
+ return fileDescriptor_eb23d5444771a147, []int{3}
+}
+func (m *MerkleProof) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MerkleProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MerkleProof.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MerkleProof) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MerkleProof.Merge(m, src)
+}
+func (m *MerkleProof) XXX_Size() int {
+ return m.Size()
+}
+func (m *MerkleProof) XXX_DiscardUnknown() {
+ xxx_messageInfo_MerkleProof.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MerkleProof proto.InternalMessageInfo
+
+func (m *MerkleProof) GetProofs() []*_go.CommitmentProof {
+ if m != nil {
+ return m.Proofs
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*MerkleRoot)(nil), "ibcgo.core.commitment.v1.MerkleRoot")
+ proto.RegisterType((*MerklePrefix)(nil), "ibcgo.core.commitment.v1.MerklePrefix")
+ proto.RegisterType((*MerklePath)(nil), "ibcgo.core.commitment.v1.MerklePath")
+ proto.RegisterType((*MerkleProof)(nil), "ibcgo.core.commitment.v1.MerkleProof")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/core/commitment/v1/commitment.proto", fileDescriptor_eb23d5444771a147)
+}
+
+var fileDescriptor_eb23d5444771a147 = []byte{
+ // 329 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
+ 0x10, 0xc6, 0x13, 0x51, 0x15, 0xea, 0x56, 0x42, 0xa4, 0x80, 0xaa, 0x0e, 0x29, 0xca, 0x80, 0xca,
+ 0x50, 0x5b, 0x6d, 0x99, 0x2a, 0xb1, 0x04, 0x36, 0x84, 0x54, 0x65, 0x64, 0x41, 0x89, 0xe5, 0x24,
+ 0x56, 0x9b, 0x5e, 0x14, 0x9b, 0x8a, 0xbc, 0x01, 0x23, 0x23, 0x23, 0x8f, 0xc3, 0xd8, 0x91, 0xa9,
+ 0x42, 0xed, 0x1b, 0xf4, 0x09, 0x90, 0x6d, 0x0a, 0xd9, 0xee, 0x7c, 0xbf, 0xfb, 0xe3, 0xef, 0x43,
+ 0x57, 0x3c, 0xa2, 0x09, 0x10, 0x0a, 0x05, 0x23, 0x14, 0xb2, 0x8c, 0xcb, 0x8c, 0x2d, 0x24, 0x59,
+ 0x0e, 0x2b, 0x19, 0xce, 0x0b, 0x90, 0xe0, 0x74, 0x34, 0x8a, 0x15, 0x8a, 0x2b, 0xc5, 0xe5, 0xb0,
+ 0x7b, 0x9a, 0x40, 0x02, 0x1a, 0x22, 0x2a, 0x32, 0x7c, 0xb7, 0x4d, 0x61, 0x11, 0x73, 0x20, 0x79,
+ 0x01, 0x10, 0x0b, 0xf3, 0xe8, 0x5d, 0x22, 0xf4, 0xc0, 0x8a, 0xd9, 0x9c, 0x05, 0x00, 0xd2, 0x71,
+ 0x50, 0x2d, 0x0d, 0x45, 0xda, 0xb1, 0x2f, 0xec, 0x7e, 0x2b, 0xd0, 0xf1, 0xa4, 0xf6, 0xfa, 0xd1,
+ 0xb3, 0xbc, 0x3b, 0xd4, 0x32, 0xdc, 0xb4, 0x60, 0x31, 0x7f, 0x71, 0xae, 0x11, 0x9a, 0xb1, 0xf2,
+ 0x29, 0xd7, 0x99, 0xe1, 0xfd, 0xb3, 0xdd, 0xba, 0x77, 0x52, 0x86, 0xd9, 0x7c, 0xe2, 0xfd, 0xd7,
+ 0xbc, 0xa0, 0x31, 0x63, 0xa5, 0xe9, 0xf2, 0xfc, 0xfd, 0xb6, 0x69, 0x28, 0x53, 0x07, 0xa3, 0x23,
+ 0xcd, 0x85, 0x52, 0x6d, 0x3c, 0xe8, 0x37, 0xfc, 0xf6, 0x6e, 0xdd, 0x3b, 0xae, 0x4c, 0x08, 0x65,
+ 0xea, 0x05, 0x87, 0xaa, 0x3f, 0x94, 0xe9, 0xa4, 0xf6, 0xae, 0x2e, 0xb9, 0x41, 0xcd, 0xfd, 0x25,
+ 0x00, 0xb1, 0x83, 0x51, 0xdd, 0x7c, 0x48, 0x8f, 0x68, 0x8e, 0xce, 0x31, 0xa7, 0x62, 0x34, 0xc6,
+ 0xb7, 0x7f, 0x8a, 0x68, 0x2e, 0xf8, 0xa5, 0xfc, 0xfb, 0xcf, 0x8d, 0x6b, 0xaf, 0x36, 0xae, 0xfd,
+ 0xbd, 0x71, 0xed, 0xb7, 0xad, 0x6b, 0xad, 0xb6, 0xae, 0xf5, 0xb5, 0x75, 0xad, 0xc7, 0x61, 0xc2,
+ 0x65, 0xfa, 0x1c, 0x29, 0x2d, 0x09, 0x05, 0x91, 0x81, 0x20, 0x3c, 0xa2, 0x83, 0xbd, 0x1b, 0xa3,
+ 0xf1, 0xa0, 0x62, 0x88, 0x2c, 0x73, 0x26, 0xa2, 0xba, 0x16, 0x71, 0xfc, 0x13, 0x00, 0x00, 0xff,
+ 0xff, 0xe6, 0x8b, 0xf4, 0x8a, 0xb6, 0x01, 0x00, 0x00,
+}
+
+func (m *MerkleRoot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MerkleRoot) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MerkleRoot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Hash) > 0 {
+ i -= len(m.Hash)
+ copy(dAtA[i:], m.Hash)
+ i = encodeVarintCommitment(dAtA, i, uint64(len(m.Hash)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MerklePrefix) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MerklePrefix) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MerklePrefix) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.KeyPrefix) > 0 {
+ i -= len(m.KeyPrefix)
+ copy(dAtA[i:], m.KeyPrefix)
+ i = encodeVarintCommitment(dAtA, i, uint64(len(m.KeyPrefix)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MerklePath) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MerklePath) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MerklePath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.KeyPath) > 0 {
+ for iNdEx := len(m.KeyPath) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.KeyPath[iNdEx])
+ copy(dAtA[i:], m.KeyPath[iNdEx])
+ i = encodeVarintCommitment(dAtA, i, uint64(len(m.KeyPath[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MerkleProof) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MerkleProof) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MerkleProof) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Proofs) > 0 {
+ for iNdEx := len(m.Proofs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Proofs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCommitment(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintCommitment(dAtA []byte, offset int, v uint64) int {
+ offset -= sovCommitment(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MerkleRoot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Hash)
+ if l > 0 {
+ n += 1 + l + sovCommitment(uint64(l))
+ }
+ return n
+}
+
+func (m *MerklePrefix) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.KeyPrefix)
+ if l > 0 {
+ n += 1 + l + sovCommitment(uint64(l))
+ }
+ return n
+}
+
+func (m *MerklePath) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.KeyPath) > 0 {
+ for _, s := range m.KeyPath {
+ l = len(s)
+ n += 1 + l + sovCommitment(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MerkleProof) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Proofs) > 0 {
+ for _, e := range m.Proofs {
+ l = e.Size()
+ n += 1 + l + sovCommitment(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovCommitment(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozCommitment(x uint64) (n int) {
+ return sovCommitment(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *MerkleRoot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MerkleRoot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MerkleRoot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...)
+ if m.Hash == nil {
+ m.Hash = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommitment(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MerklePrefix) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MerklePrefix: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MerklePrefix: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPrefix", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KeyPrefix = append(m.KeyPrefix[:0], dAtA[iNdEx:postIndex]...)
+ if m.KeyPrefix == nil {
+ m.KeyPrefix = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommitment(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MerklePath) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MerklePath: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MerklePath: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KeyPath = append(m.KeyPath, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommitment(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MerkleProof) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MerkleProof: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MerkleProof: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Proofs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Proofs = append(m.Proofs, &_go.CommitmentProof{})
+ if err := m.Proofs[len(m.Proofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCommitment(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthCommitment
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipCommitment(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCommitment
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthCommitment
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupCommitment
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthCommitment
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthCommitment = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowCommitment = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupCommitment = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/23-commitment/types/commitment_test.go b/core/23-commitment/types/commitment_test.go
new file mode 100644
index 00000000..932599e5
--- /dev/null
+++ b/core/23-commitment/types/commitment_test.go
@@ -0,0 +1,37 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cosmos/cosmos-sdk/store/iavl"
+ "github.com/cosmos/cosmos-sdk/store/rootmulti"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
+
+ dbm "github.com/tendermint/tm-db"
+)
+
+type MerkleTestSuite struct {
+ suite.Suite
+
+ store *rootmulti.Store
+ storeKey *storetypes.KVStoreKey
+ iavlStore *iavl.Store
+}
+
+func (suite *MerkleTestSuite) SetupTest() {
+ db := dbm.NewMemDB()
+ suite.store = rootmulti.NewStore(db)
+
+ suite.storeKey = storetypes.NewKVStoreKey("iavlStoreKey")
+
+ suite.store.MountStoreWithDB(suite.storeKey, storetypes.StoreTypeIAVL, nil)
+ suite.store.LoadVersion(0)
+
+ suite.iavlStore = suite.store.GetCommitStore(suite.storeKey).(*iavl.Store)
+}
+
+func TestMerkleTestSuite(t *testing.T) {
+ suite.Run(t, new(MerkleTestSuite))
+}
diff --git a/core/23-commitment/types/errors.go b/core/23-commitment/types/errors.go
new file mode 100644
index 00000000..7191baef
--- /dev/null
+++ b/core/23-commitment/types/errors.go
@@ -0,0 +1,15 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// SubModuleName is the error codespace
+const SubModuleName string = "commitment"
+
+// IBC connection sentinel errors
+var (
+ ErrInvalidProof = sdkerrors.Register(SubModuleName, 2, "invalid proof")
+ ErrInvalidPrefix = sdkerrors.Register(SubModuleName, 3, "invalid prefix")
+ ErrInvalidMerkleProof = sdkerrors.Register(SubModuleName, 4, "invalid merkle proof")
+)
diff --git a/core/23-commitment/types/merkle.go b/core/23-commitment/types/merkle.go
new file mode 100644
index 00000000..e90fccc3
--- /dev/null
+++ b/core/23-commitment/types/merkle.go
@@ -0,0 +1,312 @@
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+
+ ics23 "github.com/confio/ics23/go"
+ "github.com/gogo/protobuf/proto"
+ tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// var representing the proofspecs for a SDK chain
+var sdkSpecs = []*ics23.ProofSpec{ics23.IavlSpec, ics23.TendermintSpec}
+
+// ICS 023 Merkle Types Implementation
+//
+// This file defines Merkle commitment types that implements ICS 023.
+
+// Merkle proof implementation of the Proof interface
+// Applied on SDK-based IBC implementation
+var _ exported.Root = (*MerkleRoot)(nil)
+
+// GetSDKSpecs is a getter function for the proofspecs of an sdk chain
+func GetSDKSpecs() []*ics23.ProofSpec {
+ return sdkSpecs
+}
+
+// NewMerkleRoot constructs a new MerkleRoot
+func NewMerkleRoot(hash []byte) MerkleRoot {
+ return MerkleRoot{
+ Hash: hash,
+ }
+}
+
+// GetHash implements RootI interface
+func (mr MerkleRoot) GetHash() []byte {
+ return mr.Hash
+}
+
+// Empty returns true if the root is empty
+func (mr MerkleRoot) Empty() bool {
+ return len(mr.GetHash()) == 0
+}
+
+var _ exported.Prefix = (*MerklePrefix)(nil)
+
+// NewMerklePrefix constructs new MerklePrefix instance
+func NewMerklePrefix(keyPrefix []byte) MerklePrefix {
+ return MerklePrefix{
+ KeyPrefix: keyPrefix,
+ }
+}
+
+// Bytes returns the key prefix bytes
+func (mp MerklePrefix) Bytes() []byte {
+ return mp.KeyPrefix
+}
+
+// Empty returns true if the prefix is empty
+func (mp MerklePrefix) Empty() bool {
+ return len(mp.Bytes()) == 0
+}
+
+var _ exported.Path = (*MerklePath)(nil)
+
+// NewMerklePath creates a new MerklePath instance
+// The keys must be passed in from root-to-leaf order
+func NewMerklePath(keyPath ...string) MerklePath {
+ return MerklePath{
+ KeyPath: keyPath,
+ }
+}
+
+// String implements fmt.Stringer.
+// This represents the path in the same way the tendermint KeyPath will
+// represent a key path. The backslashes partition the key path into
+// the respective stores they belong to.
+func (mp MerklePath) String() string {
+ pathStr := ""
+ for _, k := range mp.KeyPath {
+ pathStr += "/" + url.PathEscape(k)
+ }
+ return pathStr
+}
+
+// Pretty returns the unescaped path of the URL string.
+// This function will unescape any backslash within a particular store key.
+// This makes the keypath more human-readable while removing information
+// about the exact partitions in the key path.
+func (mp MerklePath) Pretty() string {
+ path, err := url.PathUnescape(mp.String())
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// GetKey will return a byte representation of the key
+// after URL escaping the key element
+func (mp MerklePath) GetKey(i uint64) ([]byte, error) {
+ if i >= uint64(len(mp.KeyPath)) {
+ return nil, fmt.Errorf("index out of range. %d (index) >= %d (len)", i, len(mp.KeyPath))
+ }
+ key, err := url.PathUnescape(mp.KeyPath[i])
+ if err != nil {
+ return nil, err
+ }
+ return []byte(key), nil
+}
+
+// Empty returns true if the path is empty
+func (mp MerklePath) Empty() bool {
+ return len(mp.KeyPath) == 0
+}
+
+// ApplyPrefix constructs a new commitment path from the arguments. It prepends the prefix key
+// with the given path.
+func ApplyPrefix(prefix exported.Prefix, path MerklePath) (MerklePath, error) {
+ if prefix == nil || prefix.Empty() {
+ return MerklePath{}, sdkerrors.Wrap(ErrInvalidPrefix, "prefix can't be empty")
+ }
+ return NewMerklePath(append([]string{string(prefix.Bytes())}, path.KeyPath...)...), nil
+}
+
+var _ exported.Proof = (*MerkleProof)(nil)
+
+// VerifyMembership verifies the membership pf a merkle proof against the given root, path, and value.
+func (proof MerkleProof) VerifyMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, value []byte) error {
+ if err := proof.validateVerificationArgs(specs, root); err != nil {
+ return err
+ }
+
+ // VerifyMembership specific argument validation
+ mpath, ok := path.(MerklePath)
+ if !ok {
+ return sdkerrors.Wrapf(ErrInvalidProof, "path %v is not of type MerklePath", path)
+ }
+ if len(mpath.KeyPath) != len(specs) {
+ return sdkerrors.Wrapf(ErrInvalidProof, "path length %d not same as proof %d",
+ len(mpath.KeyPath), len(specs))
+ }
+ if len(value) == 0 {
+ return sdkerrors.Wrap(ErrInvalidProof, "empty value in membership proof")
+ }
+
+ // Since every proof in chain is a membership proof we can use verifyChainedMembershipProof from index 0
+ // to validate entire proof
+ if err := verifyChainedMembershipProof(root.GetHash(), specs, proof.Proofs, mpath, value, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+// VerifyNonMembership verifies the absence of a merkle proof against the given root and path.
+// VerifyNonMembership verifies a chained proof where the absence of a given path is proven
+// at the lowest subtree and then each subtree's inclusion is proved up to the final root.
+func (proof MerkleProof) VerifyNonMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path) error {
+ if err := proof.validateVerificationArgs(specs, root); err != nil {
+ return err
+ }
+
+ // VerifyNonMembership specific argument validation
+ mpath, ok := path.(MerklePath)
+ if !ok {
+ return sdkerrors.Wrapf(ErrInvalidProof, "path %v is not of type MerkleProof", path)
+ }
+ if len(mpath.KeyPath) != len(specs) {
+ return sdkerrors.Wrapf(ErrInvalidProof, "path length %d not same as proof %d",
+ len(mpath.KeyPath), len(specs))
+ }
+
+ switch proof.Proofs[0].Proof.(type) {
+ case *ics23.CommitmentProof_Nonexist:
+ // VerifyNonMembership will verify the absence of key in lowest subtree, and then chain inclusion proofs
+ // of all subroots up to final root
+ subroot, err := proof.Proofs[0].Calculate()
+ if err != nil {
+ return sdkerrors.Wrapf(ErrInvalidProof, "could not calculate root for proof index 0, merkle tree is likely empty. %v", err)
+ }
+ key, err := mpath.GetKey(uint64(len(mpath.KeyPath) - 1))
+ if err != nil {
+ return sdkerrors.Wrapf(ErrInvalidProof, "could not retrieve key bytes for key: %s", mpath.KeyPath[len(mpath.KeyPath)-1])
+ }
+ if ok := ics23.VerifyNonMembership(specs[0], subroot, proof.Proofs[0], key); !ok {
+ return sdkerrors.Wrapf(ErrInvalidProof, "could not verify absence of key %s. Please ensure that the path is correct.", string(key))
+ }
+
+ // Verify chained membership proof starting from index 1 with value = subroot
+ if err := verifyChainedMembershipProof(root.GetHash(), specs, proof.Proofs, mpath, subroot, 1); err != nil {
+ return err
+ }
+ case *ics23.CommitmentProof_Exist:
+ return sdkerrors.Wrapf(ErrInvalidProof,
+ "got ExistenceProof in VerifyNonMembership. If this is unexpected, please ensure that proof was queried with the correct key.")
+ default:
+ return sdkerrors.Wrapf(ErrInvalidProof,
+ "expected proof type: %T, got: %T", &ics23.CommitmentProof_Exist{}, proof.Proofs[0].Proof)
+ }
+ return nil
+}
+
+// BatchVerifyMembership verifies a group of key value pairs against the given root
+// NOTE: Currently left unimplemented as it is unused
+func (proof MerkleProof) BatchVerifyMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, items map[string][]byte) error {
+ return sdkerrors.Wrap(ErrInvalidProof, "batch proofs are currently unsupported")
+}
+
+// BatchVerifyNonMembership verifies absence of a group of keys against the given root
+// NOTE: Currently left unimplemented as it is unused
+func (proof MerkleProof) BatchVerifyNonMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, items [][]byte) error {
+ return sdkerrors.Wrap(ErrInvalidProof, "batch proofs are currently unsupported")
+}
+
+// verifyChainedMembershipProof takes a list of proofs and specs and verifies each proof sequentially ensuring that the value is committed to
+// by first proof and each subsequent subroot is committed to by the next subroot and checking that the final calculated root is equal to the given roothash.
+// The proofs and specs are passed in from lowest subtree to the highest subtree, but the keys are passed in from highest subtree to lowest.
+// The index specifies what index to start chaining the membership proofs, this is useful since the lowest proof may not be a membership proof, thus we
+// will want to start the membership proof chaining from index 1 with value being the lowest subroot
+func verifyChainedMembershipProof(root []byte, specs []*ics23.ProofSpec, proofs []*ics23.CommitmentProof, keys MerklePath, value []byte, index int) error {
+ var (
+ subroot []byte
+ err error
+ )
+ // Initialize subroot to value since the proofs list may be empty.
+ // This may happen if this call is verifying intermediate proofs after the lowest proof has been executed.
+ // In this case, there may be no intermediate proofs to verify and we just check that lowest proof root equals final root
+ subroot = value
+ for i := index; i < len(proofs); i++ {
+ switch proofs[i].Proof.(type) {
+ case *ics23.CommitmentProof_Exist:
+ subroot, err = proofs[i].Calculate()
+ if err != nil {
+ return sdkerrors.Wrapf(ErrInvalidProof, "could not calculate proof root at index %d, merkle tree may be empty. %v", i, err)
+ }
+ // Since keys are passed in from highest to lowest, we must grab their indices in reverse order
+ // from the proofs and specs which are lowest to highest
+ key, err := keys.GetKey(uint64(len(keys.KeyPath) - 1 - i))
+ if err != nil {
+ return sdkerrors.Wrapf(ErrInvalidProof, "could not retrieve key bytes for key %s: %v", keys.KeyPath[len(keys.KeyPath)-1-i], err)
+ }
+
+ // verify membership of the proof at this index with appropriate key and value
+ if ok := ics23.VerifyMembership(specs[i], subroot, proofs[i], key, value); !ok {
+ return sdkerrors.Wrapf(ErrInvalidProof,
+ "chained membership proof failed to verify membership of value: %X in subroot %X at index %d. Please ensure the path and value are both correct.",
+ value, subroot, i)
+ }
+ // Set value to subroot so that we verify next proof in chain commits to this subroot
+ value = subroot
+ case *ics23.CommitmentProof_Nonexist:
+ return sdkerrors.Wrapf(ErrInvalidProof,
+ "chained membership proof contains nonexistence proof at index %d. If this is unexpected, please ensure that proof was queried from the height that contained the value in store and was queried with the correct key.",
+ i)
+ default:
+ return sdkerrors.Wrapf(ErrInvalidProof,
+ "expected proof type: %T, got: %T", &ics23.CommitmentProof_Exist{}, proofs[i].Proof)
+ }
+ }
+ // Check that chained proof root equals passed-in root
+ if !bytes.Equal(root, subroot) {
+ return sdkerrors.Wrapf(ErrInvalidProof,
+ "proof did not commit to expected root: %X, got: %X. Please ensure proof was submitted with correct proofHeight and to the correct chain.",
+ root, subroot)
+ }
+ return nil
+}
+
+// blankMerkleProof and blankProofOps will be used to compare against their zero values,
+// and are declared as globals to avoid having to unnecessarily re-allocate on every comparison.
+var blankMerkleProof = &MerkleProof{}
+var blankProofOps = &tmcrypto.ProofOps{}
+
+// Empty returns true if the root is empty
+func (proof *MerkleProof) Empty() bool {
+ return proof == nil || proto.Equal(proof, blankMerkleProof) || proto.Equal(proof, blankProofOps)
+}
+
+// ValidateBasic checks if the proof is empty.
+func (proof MerkleProof) ValidateBasic() error {
+ if proof.Empty() {
+ return ErrInvalidProof
+ }
+ return nil
+}
+
+// validateVerificationArgs verifies the proof arguments are valid
+func (proof MerkleProof) validateVerificationArgs(specs []*ics23.ProofSpec, root exported.Root) error {
+ if proof.Empty() {
+ return sdkerrors.Wrap(ErrInvalidMerkleProof, "proof cannot be empty")
+ }
+
+ if root == nil || root.Empty() {
+ return sdkerrors.Wrap(ErrInvalidMerkleProof, "root cannot be empty")
+ }
+
+ if len(specs) != len(proof.Proofs) {
+ return sdkerrors.Wrapf(ErrInvalidMerkleProof,
+ "length of specs: %d not equal to length of proof: %d",
+ len(specs), len(proof.Proofs))
+ }
+
+ for i, spec := range specs {
+ if spec == nil {
+ return sdkerrors.Wrapf(ErrInvalidProof, "spec at position %d is nil", i)
+ }
+ }
+ return nil
+}
diff --git a/core/23-commitment/types/merkle_test.go b/core/23-commitment/types/merkle_test.go
new file mode 100644
index 00000000..3c53847f
--- /dev/null
+++ b/core/23-commitment/types/merkle_test.go
@@ -0,0 +1,172 @@
+package types_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+)
+
+func (suite *MerkleTestSuite) TestVerifyMembership() {
+ suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
+ cid := suite.store.Commit()
+
+ res := suite.store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("MYKEY"),
+ Prove: true,
+ })
+ require.NotNil(suite.T(), res.ProofOps)
+
+ proof, err := types.ConvertProofs(res.ProofOps)
+ require.NoError(suite.T(), err)
+
+ suite.Require().NoError(proof.ValidateBasic())
+ suite.Require().Error(types.MerkleProof{}.ValidateBasic())
+
+ cases := []struct {
+ name string
+ root []byte
+ pathArr []string
+ value []byte
+ malleate func()
+ shouldPass bool
+ }{
+ {"valid proof", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {}, true}, // valid proof
+ {"wrong value", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte("WRONGVALUE"), func() {}, false}, // invalid proof with wrong value
+ {"nil value", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte(nil), func() {}, false}, // invalid proof with nil value
+ {"wrong key", cid.Hash, []string{suite.storeKey.Name(), "NOTMYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong key
+ {"wrong path 1", cid.Hash, []string{suite.storeKey.Name(), "MYKEY", "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path
+ {"wrong path 2", cid.Hash, []string{suite.storeKey.Name()}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path
+ {"wrong path 3", cid.Hash, []string{"MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path
+ {"wrong storekey", cid.Hash, []string{"otherStoreKey", "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong store prefix
+ {"wrong root", []byte("WRONGROOT"), []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong root
+ {"nil root", []byte(nil), []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with nil root
+ {"proof is wrong length", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {
+ proof = types.MerkleProof{
+ Proofs: proof.Proofs[1:],
+ }
+ }, false}, // invalid proof with wrong length
+
+ }
+
+ for i, tc := range cases {
+ tc := tc
+ suite.Run(tc.name, func() {
+ tc.malleate()
+
+ root := types.NewMerkleRoot(tc.root)
+ path := types.NewMerklePath(tc.pathArr...)
+
+ err := proof.VerifyMembership(types.GetSDKSpecs(), &root, path, tc.value)
+
+ if tc.shouldPass {
+ // nolint: scopelint
+ suite.Require().NoError(err, "test case %d should have passed", i)
+ } else {
+ // nolint: scopelint
+ suite.Require().Error(err, "test case %d should have failed", i)
+ }
+ })
+ }
+
+}
+
+func (suite *MerkleTestSuite) TestVerifyNonMembership() {
+ suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
+ cid := suite.store.Commit()
+
+ // Get Proof
+ res := suite.store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("MYABSENTKEY"),
+ Prove: true,
+ })
+ require.NotNil(suite.T(), res.ProofOps)
+
+ proof, err := types.ConvertProofs(res.ProofOps)
+ require.NoError(suite.T(), err)
+
+ suite.Require().NoError(proof.ValidateBasic())
+
+ cases := []struct {
+ name string
+ root []byte
+ pathArr []string
+ malleate func()
+ shouldPass bool
+ }{
+ {"valid proof", cid.Hash, []string{suite.storeKey.Name(), "MYABSENTKEY"}, func() {}, true}, // valid proof
+ {"wrong key", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, func() {}, false}, // invalid proof with existent key
+ {"wrong path 1", cid.Hash, []string{suite.storeKey.Name(), "MYKEY", "MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong path
+ {"wrong path 2", cid.Hash, []string{suite.storeKey.Name(), "MYABSENTKEY", "MYKEY"}, func() {}, false}, // invalid proof with wrong path
+ {"wrong path 3", cid.Hash, []string{suite.storeKey.Name()}, func() {}, false}, // invalid proof with wrong path
+ {"wrong path 4", cid.Hash, []string{"MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong path
+ {"wrong storeKey", cid.Hash, []string{"otherStoreKey", "MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong store prefix
+ {"wrong root", []byte("WRONGROOT"), []string{suite.storeKey.Name(), "MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong root
+ {"nil root", []byte(nil), []string{suite.storeKey.Name(), "MYABSENTKEY"}, func() {}, false}, // invalid proof with nil root
+ {"proof is wrong length", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, func() {
+ proof = types.MerkleProof{
+ Proofs: proof.Proofs[1:],
+ }
+ }, false}, // invalid proof with wrong length
+
+ }
+
+ for i, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ tc.malleate()
+
+ root := types.NewMerkleRoot(tc.root)
+ path := types.NewMerklePath(tc.pathArr...)
+
+ err := proof.VerifyNonMembership(types.GetSDKSpecs(), &root, path)
+
+ if tc.shouldPass {
+ // nolint: scopelint
+ suite.Require().NoError(err, "test case %d should have passed", i)
+ } else {
+ // nolint: scopelint
+ suite.Require().Error(err, "test case %d should have failed", i)
+ }
+ })
+ }
+
+}
+
+func TestApplyPrefix(t *testing.T) {
+ prefix := types.NewMerklePrefix([]byte("storePrefixKey"))
+
+ pathStr := "pathone/pathtwo/paththree/key"
+ path := types.MerklePath{
+ KeyPath: []string{pathStr},
+ }
+
+ prefixedPath, err := types.ApplyPrefix(prefix, path)
+ require.NoError(t, err, "valid prefix returns error")
+
+ require.Equal(t, "/storePrefixKey/"+pathStr, prefixedPath.Pretty(), "Prefixed path incorrect")
+ require.Equal(t, "/storePrefixKey/pathone%2Fpathtwo%2Fpaththree%2Fkey", prefixedPath.String(), "Prefixed escaped path incorrect")
+}
+
+func TestString(t *testing.T) {
+ path := types.NewMerklePath("rootKey", "storeKey", "path/to/leaf")
+
+ require.Equal(t, "/rootKey/storeKey/path%2Fto%2Fleaf", path.String(), "path String returns unxpected value")
+ require.Equal(t, "/rootKey/storeKey/path/to/leaf", path.Pretty(), "path's pretty string representation is incorrect")
+
+ onePath := types.NewMerklePath("path/to/leaf")
+
+ require.Equal(t, "/path%2Fto%2Fleaf", onePath.String(), "one element path does not have correct string representation")
+ require.Equal(t, "/path/to/leaf", onePath.Pretty(), "one element path has incorrect pretty string representation")
+
+ zeroPath := types.NewMerklePath()
+
+ require.Equal(t, "", zeroPath.String(), "zero element path does not have correct string representation")
+ require.Equal(t, "", zeroPath.Pretty(), "zero element path does not have correct pretty string representation")
+}
diff --git a/core/23-commitment/types/utils.go b/core/23-commitment/types/utils.go
new file mode 100644
index 00000000..e662f772
--- /dev/null
+++ b/core/23-commitment/types/utils.go
@@ -0,0 +1,28 @@
+package types
+
+import (
+ ics23 "github.com/confio/ics23/go"
+ crypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// ConvertProofs converts crypto.ProofOps into MerkleProof
+func ConvertProofs(tmProof *crypto.ProofOps) (MerkleProof, error) {
+ if tmProof == nil {
+ return MerkleProof{}, sdkerrors.Wrapf(ErrInvalidMerkleProof, "tendermint proof is nil")
+ }
+ // Unmarshal all proof ops to CommitmentProof
+ proofs := make([]*ics23.CommitmentProof, len(tmProof.Ops))
+ for i, op := range tmProof.Ops {
+ var p ics23.CommitmentProof
+ err := p.Unmarshal(op.Data)
+ if err != nil || p.Proof == nil {
+ return MerkleProof{}, sdkerrors.Wrapf(ErrInvalidMerkleProof, "could not unmarshal proof op into CommitmentProof at index %d: %v", i, err)
+ }
+ proofs[i] = &p
+ }
+ return MerkleProof{
+ Proofs: proofs,
+ }, nil
+}
diff --git a/core/23-commitment/types/utils_test.go b/core/23-commitment/types/utils_test.go
new file mode 100644
index 00000000..f852fb6c
--- /dev/null
+++ b/core/23-commitment/types/utils_test.go
@@ -0,0 +1,98 @@
+package types_test
+
+import (
+ "fmt"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ crypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+)
+
+func (suite *MerkleTestSuite) TestConvertProofs() {
+ suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
+ cid := suite.store.Commit()
+
+ root := types.NewMerkleRoot(cid.Hash)
+ existsPath := types.NewMerklePath(suite.storeKey.Name(), "MYKEY")
+ nonexistPath := types.NewMerklePath(suite.storeKey.Name(), "NOTMYKEY")
+ value := []byte("MYVALUE")
+
+ var proofOps *crypto.ProofOps
+ testcases := []struct {
+ name string
+ malleate func()
+ keyExists bool
+ expPass bool
+ }{
+ {
+ "success for ExistenceProof",
+ func() {
+ res := suite.store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("MYKEY"),
+ Prove: true,
+ })
+ require.NotNil(suite.T(), res.ProofOps)
+
+ proofOps = res.ProofOps
+ },
+ true, true,
+ },
+ {
+ "success for NonexistenceProof",
+ func() {
+ res := suite.store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("NOTMYKEY"),
+ Prove: true,
+ })
+ require.NotNil(suite.T(), res.ProofOps)
+
+ proofOps = res.ProofOps
+ },
+ false, true,
+ },
+ {
+ "nil proofOps",
+ func() {
+ proofOps = nil
+ },
+ true, false,
+ },
+ {
+ "proof op data is nil",
+ func() {
+ res := suite.store.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof
+ Data: []byte("MYKEY"),
+ Prove: true,
+ })
+ require.NotNil(suite.T(), res.ProofOps)
+
+ proofOps = res.ProofOps
+ proofOps.Ops[0].Data = nil
+ },
+ true, false,
+ },
+ }
+
+ for _, tc := range testcases {
+ tc.malleate()
+
+ proof, err := types.ConvertProofs(proofOps)
+ if tc.expPass {
+ suite.Require().NoError(err, "ConvertProofs unexpectedly returned error for case: %s", tc.name)
+ if tc.keyExists {
+ err := proof.VerifyMembership(types.GetSDKSpecs(), &root, existsPath, value)
+ suite.Require().NoError(err, "converted proof failed to verify membership for case: %s", tc.name)
+ } else {
+ err := proof.VerifyNonMembership(types.GetSDKSpecs(), &root, nonexistPath)
+ suite.Require().NoError(err, "converted proof failed to verify membership for case: %s", tc.name)
+ }
+ } else {
+ suite.Require().Error(err, "ConvertProofs passed on invalid case for case: %s", tc.name)
+ }
+ }
+}
diff --git a/core/24-host/errors.go b/core/24-host/errors.go
new file mode 100644
index 00000000..fe8129bd
--- /dev/null
+++ b/core/24-host/errors.go
@@ -0,0 +1,15 @@
+package host
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// SubModuleName defines the ICS 24 host
+const SubModuleName = "host"
+
+// IBC client sentinel errors
+var (
+ ErrInvalidID = sdkerrors.Register(SubModuleName, 2, "invalid identifier")
+ ErrInvalidPath = sdkerrors.Register(SubModuleName, 3, "invalid path")
+ ErrInvalidPacket = sdkerrors.Register(SubModuleName, 4, "invalid packet")
+)
diff --git a/core/24-host/keys.go b/core/24-host/keys.go
new file mode 100644
index 00000000..21f4bc43
--- /dev/null
+++ b/core/24-host/keys.go
@@ -0,0 +1,235 @@
+package host
+
+import (
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+const (
+ // ModuleName is the name of the IBC module
+ ModuleName = "ibc"
+
+ // StoreKey is the string store representation
+ StoreKey string = ModuleName
+
+ // QuerierRoute is the querier route for the IBC module
+ QuerierRoute string = ModuleName
+
+ // RouterKey is the msg router key for the IBC module
+ RouterKey string = ModuleName
+)
+
+// KVStore key prefixes for IBC
+var (
+ KeyClientStorePrefix = []byte("clients")
+)
+
+// KVStore key prefixes for IBC
+const (
+ KeyClientState = "clientState"
+ KeyConsensusStatePrefix = "consensusStates"
+ KeyConnectionPrefix = "connections"
+ KeyChannelEndPrefix = "channelEnds"
+ KeyChannelPrefix = "channels"
+ KeyPortPrefix = "ports"
+ KeySequencePrefix = "sequences"
+ KeyChannelCapabilityPrefix = "capabilities"
+ KeyNextSeqSendPrefix = "nextSequenceSend"
+ KeyNextSeqRecvPrefix = "nextSequenceRecv"
+ KeyNextSeqAckPrefix = "nextSequenceAck"
+ KeyPacketCommitmentPrefix = "commitments"
+ KeyPacketAckPrefix = "acks"
+ KeyPacketReceiptPrefix = "receipts"
+)
+
+// FullClientPath returns the full path of a specific client path in the format:
+// "clients/{clientID}/{path}" as a string.
+func FullClientPath(clientID string, path string) string {
+ return fmt.Sprintf("%s/%s/%s", KeyClientStorePrefix, clientID, path)
+}
+
+// FullClientKey returns the full path of specific client path in the format:
+// "clients/{clientID}/{path}" as a byte array.
+func FullClientKey(clientID string, path []byte) []byte {
+ return []byte(FullClientPath(clientID, string(path)))
+}
+
+// ICS02
+// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#path-space
+
+// FullClientStatePath takes a client identifier and returns a Path under which to store a
+// particular client state
+func FullClientStatePath(clientID string) string {
+ return FullClientPath(clientID, KeyClientState)
+}
+
+// FullClientStateKey takes a client identifier and returns a Key under which to store a
+// particular client state.
+func FullClientStateKey(clientID string) []byte {
+ return FullClientKey(clientID, []byte(KeyClientState))
+}
+
+// ClientStateKey returns a store key under which a particular client state is stored
+// in a client prefixed store
+func ClientStateKey() []byte {
+ return []byte(KeyClientState)
+}
+
+// FullConsensusStatePath takes a client identifier and returns a Path under which to
+// store the consensus state of a client.
+func FullConsensusStatePath(clientID string, height exported.Height) string {
+ return FullClientPath(clientID, ConsensusStatePath(height))
+}
+
+// FullConsensusStateKey returns the store key for the consensus state of a particular
+// client.
+func FullConsensusStateKey(clientID string, height exported.Height) []byte {
+ return []byte(FullConsensusStatePath(clientID, height))
+}
+
+// ConsensusStatePath returns the suffix store key for the consensus state at a
+// particular height stored in a client prefixed store.
+func ConsensusStatePath(height exported.Height) string {
+ return fmt.Sprintf("%s/%s", KeyConsensusStatePrefix, height)
+}
+
+// ConsensusStateKey returns the store key for a the consensus state of a particular
+// client stored in a client prefixed store.
+func ConsensusStateKey(height exported.Height) []byte {
+ return []byte(ConsensusStatePath(height))
+}
+
+// ICS03
+// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics#store-paths
+
+// ClientConnectionsPath defines a reverse mapping from clients to a set of connections
+func ClientConnectionsPath(clientID string) string {
+ return FullClientPath(clientID, KeyConnectionPrefix)
+}
+
+// ClientConnectionsKey returns the store key for the connections of a given client
+func ClientConnectionsKey(clientID string) []byte {
+ return []byte(ClientConnectionsPath(clientID))
+}
+
+// ConnectionPath defines the path under which connection paths are stored
+func ConnectionPath(connectionID string) string {
+ return fmt.Sprintf("%s/%s", KeyConnectionPrefix, connectionID)
+}
+
+// ConnectionKey returns the store key for a particular connection
+func ConnectionKey(connectionID string) []byte {
+ return []byte(ConnectionPath(connectionID))
+}
+
+// ICS04
+// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#store-paths
+
+// ChannelPath defines the path under which channels are stored
+func ChannelPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s", KeyChannelEndPrefix, channelPath(portID, channelID))
+}
+
+// ChannelKey returns the store key for a particular channel
+func ChannelKey(portID, channelID string) []byte {
+ return []byte(ChannelPath(portID, channelID))
+}
+
+// ChannelCapabilityPath defines the path under which capability keys associated
+// with a channel are stored
+func ChannelCapabilityPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s", KeyChannelCapabilityPrefix, channelPath(portID, channelID))
+}
+
+// NextSequenceSendPath defines the next send sequence counter store path
+func NextSequenceSendPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s", KeyNextSeqSendPrefix, channelPath(portID, channelID))
+}
+
+// NextSequenceSendKey returns the store key for the send sequence of a particular
+// channel binded to a specific port.
+func NextSequenceSendKey(portID, channelID string) []byte {
+ return []byte(NextSequenceSendPath(portID, channelID))
+}
+
+// NextSequenceRecvPath defines the next receive sequence counter store path.
+func NextSequenceRecvPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s", KeyNextSeqRecvPrefix, channelPath(portID, channelID))
+}
+
+// NextSequenceRecvKey returns the store key for the receive sequence of a particular
+// channel binded to a specific port
+func NextSequenceRecvKey(portID, channelID string) []byte {
+ return []byte(NextSequenceRecvPath(portID, channelID))
+}
+
+// NextSequenceAckPath defines the next acknowledgement sequence counter store path
+func NextSequenceAckPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s", KeyNextSeqAckPrefix, channelPath(portID, channelID))
+}
+
+// NextSequenceAckKey returns the store key for the acknowledgement sequence of
+// a particular channel binded to a specific port.
+func NextSequenceAckKey(portID, channelID string) []byte {
+ return []byte(NextSequenceAckPath(portID, channelID))
+}
+
+// PacketCommitmentPath defines the commitments to packet data fields store path
+func PacketCommitmentPath(portID, channelID string, sequence uint64) string {
+ return fmt.Sprintf("%s/%d", PacketCommitmentPrefixPath(portID, channelID), sequence)
+}
+
+// PacketCommitmentKey returns the store key of under which a packet commitment
+// is stored
+func PacketCommitmentKey(portID, channelID string, sequence uint64) []byte {
+ return []byte(PacketCommitmentPath(portID, channelID, sequence))
+}
+
+// PacketCommitmentPrefixPath defines the prefix for commitments to packet data fields store path.
+func PacketCommitmentPrefixPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s/%s", KeyPacketCommitmentPrefix, channelPath(portID, channelID), KeySequencePrefix)
+}
+
+// PacketAcknowledgementPath defines the packet acknowledgement store path
+func PacketAcknowledgementPath(portID, channelID string, sequence uint64) string {
+ return fmt.Sprintf("%s/%d", PacketAcknowledgementPrefixPath(portID, channelID), sequence)
+}
+
+// PacketAcknowledgementKey returns the store key of under which a packet
+// acknowledgement is stored
+func PacketAcknowledgementKey(portID, channelID string, sequence uint64) []byte {
+ return []byte(PacketAcknowledgementPath(portID, channelID, sequence))
+}
+
+// PacketAcknowledgementPrefixPath defines the prefix for commitments to packet data fields store path.
+func PacketAcknowledgementPrefixPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s/%s", KeyPacketAckPrefix, channelPath(portID, channelID), KeySequencePrefix)
+}
+
+// PacketReceiptPath defines the packet receipt store path
+func PacketReceiptPath(portID, channelID string, sequence uint64) string {
+ return fmt.Sprintf("%s/%s/%s", KeyPacketReceiptPrefix, channelPath(portID, channelID), sequencePath(sequence))
+}
+
+// PacketReceiptKey returns the store key of under which a packet
+// receipt is stored
+func PacketReceiptKey(portID, channelID string, sequence uint64) []byte {
+ return []byte(PacketReceiptPath(portID, channelID, sequence))
+}
+
+func channelPath(portID, channelID string) string {
+ return fmt.Sprintf("%s/%s/%s/%s", KeyPortPrefix, portID, KeyChannelPrefix, channelID)
+}
+
+func sequencePath(sequence uint64) string {
+ return fmt.Sprintf("%s/%d", KeySequencePrefix, sequence)
+}
+
+// ICS05
+// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-005-port-allocation#store-paths
+
+// PortPath defines the path under which ports paths are stored on the capability module
+func PortPath(portID string) string {
+ return fmt.Sprintf("%s/%s", KeyPortPrefix, portID)
+}
diff --git a/core/24-host/parse.go b/core/24-host/parse.go
new file mode 100644
index 00000000..8c345950
--- /dev/null
+++ b/core/24-host/parse.go
@@ -0,0 +1,79 @@
+package host
+
+import (
+ "strconv"
+ "strings"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// ParseIdentifier parses the sequence from the identifier using the provided prefix. This function
+// does not need to be used by counterparty chains. SDK generated connection and channel identifiers
+// are required to use this format.
+func ParseIdentifier(identifier, prefix string) (uint64, error) {
+ if !strings.HasPrefix(identifier, prefix) {
+ return 0, sdkerrors.Wrapf(ErrInvalidID, "identifier doesn't contain prefix `%s`", prefix)
+ }
+
+ splitStr := strings.Split(identifier, prefix)
+ if len(splitStr) != 2 {
+ return 0, sdkerrors.Wrapf(ErrInvalidID, "identifier must be in format: `%s{N}`", prefix)
+ }
+
+ // sanity check
+ if splitStr[0] != "" {
+ return 0, sdkerrors.Wrapf(ErrInvalidID, "identifier must begin with prefix %s", prefix)
+ }
+
+ sequence, err := strconv.ParseUint(splitStr[1], 10, 64)
+ if err != nil {
+ return 0, sdkerrors.Wrap(err, "failed to parse identifier sequence")
+ }
+ return sequence, nil
+}
+
+// ParseConnectionPath returns the connection ID from a full path. It returns
+// an error if the provided path is invalid.
+func ParseConnectionPath(path string) (string, error) {
+ split := strings.Split(path, "/")
+ if len(split) != 2 {
+ return "", sdkerrors.Wrapf(ErrInvalidPath, "cannot parse connection path %s", path)
+ }
+
+ return split[1], nil
+}
+
+// ParseChannelPath returns the port and channel ID from a full path. It returns
+// an error if the provided path is invalid.
+func ParseChannelPath(path string) (string, string, error) {
+ split := strings.Split(path, "/")
+ if len(split) < 5 {
+ return "", "", sdkerrors.Wrapf(ErrInvalidPath, "cannot parse channel path %s", path)
+ }
+
+ if split[1] != KeyPortPrefix || split[3] != KeyChannelPrefix {
+ return "", "", sdkerrors.Wrapf(ErrInvalidPath, "cannot parse channel path %s", path)
+ }
+
+ return split[2], split[4], nil
+}
+
+// MustParseConnectionPath returns the connection ID from a full path. Panics
+// if the provided path is invalid.
+func MustParseConnectionPath(path string) string {
+ connectionID, err := ParseConnectionPath(path)
+ if err != nil {
+ panic(err)
+ }
+ return connectionID
+}
+
+// MustParseChannelPath returns the port and channel ID from a full path. Panics
+// if the provided path is invalid.
+func MustParseChannelPath(path string) (string, string) {
+ portID, channelID, err := ParseChannelPath(path)
+ if err != nil {
+ panic(err)
+ }
+ return portID, channelID
+}
diff --git a/core/24-host/parse_test.go b/core/24-host/parse_test.go
new file mode 100644
index 00000000..9f74bf5f
--- /dev/null
+++ b/core/24-host/parse_test.go
@@ -0,0 +1,48 @@
+package host_test
+
+import (
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+func TestParseIdentifier(t *testing.T) {
+ testCases := []struct {
+ name string
+ identifier string
+ prefix string
+ expSeq uint64
+ expPass bool
+ }{
+ {"valid 0", "connection-0", "connection-", 0, true},
+ {"valid 1", "connection-1", "connection-", 1, true},
+ {"valid large sequence", connectiontypes.FormatConnectionIdentifier(math.MaxUint64), "connection-", math.MaxUint64, true},
+ // one above uint64 max
+ {"invalid uint64", "connection-18446744073709551616", "connection-", 0, false},
+ // uint64 == 20 characters
+ {"invalid large sequence", "connection-2345682193567182931243", "connection-", 0, false},
+ {"capital prefix", "Connection-0", "connection-", 0, false},
+ {"double prefix", "connection-connection-0", "connection-", 0, false},
+ {"doesn't have prefix", "connection-0", "prefix", 0, false},
+ {"missing dash", "connection0", "connection-", 0, false},
+ {"blank id", " ", "connection-", 0, false},
+ {"empty id", "", "connection-", 0, false},
+ {"negative sequence", "connection--1", "connection-", 0, false},
+ }
+
+ for _, tc := range testCases {
+
+ seq, err := host.ParseIdentifier(tc.identifier, tc.prefix)
+ require.Equal(t, tc.expSeq, seq)
+
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/core/24-host/validate.go b/core/24-host/validate.go
new file mode 100644
index 00000000..10458e8d
--- /dev/null
+++ b/core/24-host/validate.go
@@ -0,0 +1,114 @@
+package host
+
+import (
+ "regexp"
+ "strings"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// DefaultMaxCharacterLength defines the default maximum character length used
+// in validation of identifiers including the client, connection, port and
+// channel identifiers.
+//
+// NOTE: this restriction is specific to this golang implementation of IBC. If
+// your use case demands a higher limit, please open an issue and we will consider
+// adjusting this restriction.
+const DefaultMaxCharacterLength = 64
+
+// IsValidID defines regular expression to check if the string consist of
+// characters in one of the following categories only:
+// - Alphanumeric
+// - `.`, `_`, `+`, `-`, `#`
+// - `[`, `]`, `<`, `>`
+var IsValidID = regexp.MustCompile(`^[a-zA-Z0-9\.\_\+\-\#\[\]\<\>]+$`).MatchString
+
+// ICS 024 Identifier and Path Validation Implementation
+//
+// This file defines ValidateFn to validate identifier and path strings
+// The spec for ICS 024 can be located here:
+// https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements
+
+// ValidateFn function type to validate path and identifier bytestrings
+type ValidateFn func(string) error
+
+func defaultIdentifierValidator(id string, min, max int) error { //nolint:unparam
+ if strings.TrimSpace(id) == "" {
+ return sdkerrors.Wrap(ErrInvalidID, "identifier cannot be blank")
+ }
+ // valid id MUST NOT contain "/" separator
+ if strings.Contains(id, "/") {
+ return sdkerrors.Wrapf(ErrInvalidID, "identifier %s cannot contain separator '/'", id)
+ }
+ // valid id must fit the length requirements
+ if len(id) < min || len(id) > max {
+ return sdkerrors.Wrapf(ErrInvalidID, "identifier %s has invalid length: %d, must be between %d-%d characters", id, len(id), min, max)
+ }
+ // valid id must contain only lower alphabetic characters
+ if !IsValidID(id) {
+ return sdkerrors.Wrapf(
+ ErrInvalidID,
+ "identifier %s must contain only alphanumeric or the following characters: '.', '_', '+', '-', '#', '[', ']', '<', '>'",
+ id,
+ )
+ }
+ return nil
+}
+
+// ClientIdentifierValidator is the default validator function for Client identifiers.
+// A valid Identifier must be between 9-64 characters and only contain alphanumeric and some allowed
+// special characters (see IsValidID).
+func ClientIdentifierValidator(id string) error {
+ return defaultIdentifierValidator(id, 9, DefaultMaxCharacterLength)
+}
+
+// ConnectionIdentifierValidator is the default validator function for Connection identifiers.
+// A valid Identifier must be between 10-64 characters and only contain alphanumeric and some allowed
+// special characters (see IsValidID).
+func ConnectionIdentifierValidator(id string) error {
+ return defaultIdentifierValidator(id, 10, DefaultMaxCharacterLength)
+}
+
+// ChannelIdentifierValidator is the default validator function for Channel identifiers.
+// A valid Identifier must be between 8-64 characters and only contain alphanumeric and some allowed
+// special characters (see IsValidID).
+func ChannelIdentifierValidator(id string) error {
+ return defaultIdentifierValidator(id, 8, DefaultMaxCharacterLength)
+}
+
+// PortIdentifierValidator is the default validator function for Port identifiers.
+// A valid Identifier must be between 2-64 characters and only contain alphanumeric and some allowed
+// special characters (see IsValidID).
+func PortIdentifierValidator(id string) error {
+ return defaultIdentifierValidator(id, 2, DefaultMaxCharacterLength)
+}
+
+// NewPathValidator takes in a Identifier Validator function and returns
+// a Path Validator function which requires path to consist of `/`-separated valid identifiers,
+// where a valid identifier is between 1-64 characters, contains only alphanumeric and some allowed
+// special characters (see IsValidID), and satisfies the custom `idValidator` function.
+func NewPathValidator(idValidator ValidateFn) ValidateFn {
+ return func(path string) error {
+ pathArr := strings.Split(path, "/")
+ if len(pathArr) > 0 && pathArr[0] == path {
+ return sdkerrors.Wrapf(ErrInvalidPath, "path %s doesn't contain any separator '/'", path)
+ }
+
+ for _, p := range pathArr {
+ // a path beginning or ending in a separator returns empty string elements.
+ if p == "" {
+ return sdkerrors.Wrapf(ErrInvalidPath, "path %s cannot begin or end with '/'", path)
+ }
+
+ if err := idValidator(p); err != nil {
+ return err
+ }
+ // Each path element must either be a valid identifier or constant number
+ if err := defaultIdentifierValidator(p, 1, DefaultMaxCharacterLength); err != nil {
+ return sdkerrors.Wrapf(err, "path %s contains an invalid identifier: '%s'", path, p)
+ }
+ }
+
+ return nil
+ }
+}
diff --git a/core/24-host/validate_test.go b/core/24-host/validate_test.go
new file mode 100644
index 00000000..40987bd1
--- /dev/null
+++ b/core/24-host/validate_test.go
@@ -0,0 +1,119 @@
+package host
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type testCase struct {
+ msg string
+ id string
+ expPass bool
+}
+
+func TestDefaultIdentifierValidator(t *testing.T) {
+ testCases := []testCase{
+ {"valid lowercase", "lowercaseid", true},
+ {"valid id special chars", "._+-#[]<>._+-#[]<>", true},
+ {"valid id lower and special chars", "lower._+-#[]<>", true},
+ {"numeric id", "1234567890", true},
+ {"uppercase id", "NOTLOWERCASE", true},
+ {"numeric id", "1234567890", true},
+ {"blank id", " ", false},
+ {"id length out of range", "1", false},
+ {"id is too long", "this identifier is too long to be used as a valid identifier", false},
+ {"path-like id", "lower/case/id", false},
+ {"invalid id", "(clientid)", false},
+ {"empty string", "", false},
+ }
+
+ for _, tc := range testCases {
+
+ err := ClientIdentifierValidator(tc.id)
+ err1 := ConnectionIdentifierValidator(tc.id)
+ err2 := ChannelIdentifierValidator(tc.id)
+ err3 := PortIdentifierValidator(tc.id)
+ if tc.expPass {
+ require.NoError(t, err, tc.msg)
+ require.NoError(t, err1, tc.msg)
+ require.NoError(t, err2, tc.msg)
+ require.NoError(t, err3, tc.msg)
+ } else {
+ require.Error(t, err, tc.msg)
+ require.Error(t, err1, tc.msg)
+ require.Error(t, err2, tc.msg)
+ require.Error(t, err3, tc.msg)
+ }
+ }
+}
+
+func TestPathValidator(t *testing.T) {
+ testCases := []testCase{
+ {"valid lowercase", "p/lowercaseid", true},
+ {"numeric path", "p/239123", true},
+ {"valid id special chars", "p/._+-#[]<>._+-#[]<>", true},
+ {"valid id lower and special chars", "lower/._+-#[]<>", true},
+ {"id length out of range", "p/l", true},
+ {"uppercase id", "p/NOTLOWERCASE", true},
+ {"invalid path", "lowercaseid", false},
+ {"blank id", "p/ ", false},
+ {"id length out of range", "p/12345678901234567890123456789012345678901234567890123456789012345", false},
+ {"invalid id", "p/(clientid)", false},
+ {"empty string", "", false},
+ {"separators only", "////", false},
+ {"just separator", "/", false},
+ {"begins with separator", "/id", false},
+ {"blank before separator", " /id", false},
+ {"ends with separator", "id/", false},
+ {"blank after separator", "id/ ", false},
+ {"blanks with separator", " / ", false},
+ }
+
+ for _, tc := range testCases {
+ f := NewPathValidator(func(path string) error {
+ return nil
+ })
+
+ err := f(tc.id)
+
+ if tc.expPass {
+ seps := strings.Count(tc.id, "/")
+ require.Equal(t, 1, seps)
+ require.NoError(t, err, tc.msg)
+ } else {
+ require.Error(t, err, tc.msg)
+ }
+ }
+}
+
+func TestCustomPathValidator(t *testing.T) {
+ validateFn := NewPathValidator(func(path string) error {
+ if !strings.HasPrefix(path, "id_") {
+ return fmt.Errorf("identifier %s must start with 'id_", path)
+ }
+ return nil
+ })
+
+ testCases := []testCase{
+ {"valid custom path", "id_client/id_one", true},
+ {"invalid path", "client", false},
+ {"invalid custom path", "id_one/client", false},
+ {"invalid identifier", "id_client/id_1234567890123456789012345678901234567890123457890123456789012345", false},
+ {"separators only", "////", false},
+ {"just separator", "/", false},
+ {"ends with separator", "id_client/id_one/", false},
+ {"beings with separator", "/id_client/id_one", false},
+ }
+
+ for _, tc := range testCases {
+ err := validateFn(tc.id)
+ if tc.expPass {
+ require.NoError(t, err, tc.msg)
+ } else {
+ require.Error(t, err, tc.msg)
+ }
+ }
+}
diff --git a/core/client/cli/cli.go b/core/client/cli/cli.go
new file mode 100644
index 00000000..bda4123b
--- /dev/null
+++ b/core/client/cli/cli.go
@@ -0,0 +1,50 @@
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection"
+ channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// GetTxCmd returns the transaction commands for this module
+func GetTxCmd() *cobra.Command {
+ ibcTxCmd := &cobra.Command{
+ Use: host.ModuleName,
+ Short: "IBC transaction subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ ibcTxCmd.AddCommand(
+ ibcclient.GetTxCmd(),
+ connection.GetTxCmd(),
+ channel.GetTxCmd(),
+ )
+
+ return ibcTxCmd
+}
+
+// GetQueryCmd returns the cli query commands for this module
+func GetQueryCmd() *cobra.Command {
+ // Group ibc queries under a subcommand
+ ibcQueryCmd := &cobra.Command{
+ Use: host.ModuleName,
+ Short: "Querying commands for the IBC module",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ ibcQueryCmd.AddCommand(
+ ibcclient.GetQueryCmd(),
+ connection.GetQueryCmd(),
+ channel.GetQueryCmd(),
+ )
+
+ return ibcQueryCmd
+}
diff --git a/core/client/query.go b/core/client/query.go
new file mode 100644
index 00000000..7055f1c7
--- /dev/null
+++ b/core/client/query.go
@@ -0,0 +1,67 @@
+package client
+
+import (
+ "fmt"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+// QueryTendermintProof performs an ABCI query with the given key and returns
+// the value of the query, the proto encoded merkle proof, and the height of
+// the Tendermint block containing the state root. The desired tendermint height
+// to perform the query should be set in the client context. The query will be
+// performed at one below this height (at the IAVL version) in order to obtain
+// the correct merkle proof. Proof queries at height less than or equal to 2 are
+// not supported. Queries with a client context height of 0 will perform a query
+// at the lastest state available.
+// Issue: https://github.com/cosmos/cosmos-sdk/issues/6567
+func QueryTendermintProof(clientCtx client.Context, key []byte) ([]byte, []byte, clienttypes.Height, error) {
+ height := clientCtx.Height
+
+ // ABCI queries at heights 1, 2 or less than or equal to 0 are not supported.
+ // Base app does not support queries for height less than or equal to 1.
+ // Therefore, a query at height 2 would be equivalent to a query at height 3.
+ // A height of 0 will query with the lastest state.
+ if height != 0 && height <= 2 {
+ return nil, nil, clienttypes.Height{}, fmt.Errorf("proof queries at height <= 2 are not supported")
+ }
+
+ // Use the IAVL height if a valid tendermint height is passed in.
+ // A height of 0 will query with the latest state.
+ if height != 0 {
+ height--
+ }
+
+ req := abci.RequestQuery{
+ Path: fmt.Sprintf("store/%s/key", host.StoreKey),
+ Height: height,
+ Data: key,
+ Prove: true,
+ }
+
+ res, err := clientCtx.QueryABCI(req)
+ if err != nil {
+ return nil, nil, clienttypes.Height{}, err
+ }
+
+ merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
+ if err != nil {
+ return nil, nil, clienttypes.Height{}, err
+ }
+
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ proofBz, err := cdc.MarshalBinaryBare(&merkleProof)
+ if err != nil {
+ return nil, nil, clienttypes.Height{}, err
+ }
+
+ revision := clienttypes.ParseChainID(clientCtx.ChainID)
+ return res.Value, proofBz, clienttypes.NewHeight(revision, uint64(res.Height)+1), nil
+}
diff --git a/core/exported/channel.go b/core/exported/channel.go
new file mode 100644
index 00000000..6a0d542c
--- /dev/null
+++ b/core/exported/channel.go
@@ -0,0 +1,32 @@
+package exported
+
+// ChannelI defines the standard interface for a channel end.
+type ChannelI interface {
+ GetState() int32
+ GetOrdering() int32
+ GetCounterparty() CounterpartyChannelI
+ GetConnectionHops() []string
+ GetVersion() string
+ ValidateBasic() error
+}
+
+// CounterpartyChannelI defines the standard interface for a channel end's
+// counterparty.
+type CounterpartyChannelI interface {
+ GetPortID() string
+ GetChannelID() string
+ ValidateBasic() error
+}
+
+// PacketI defines the standard interface for IBC packets
+type PacketI interface {
+ GetSequence() uint64
+ GetTimeoutHeight() Height
+ GetTimeoutTimestamp() uint64
+ GetSourcePort() string
+ GetSourceChannel() string
+ GetDestPort() string
+ GetDestChannel() string
+ GetData() []byte
+ ValidateBasic() error
+}
diff --git a/core/exported/client.go b/core/exported/client.go
new file mode 100644
index 00000000..3d552b07
--- /dev/null
+++ b/core/exported/client.go
@@ -0,0 +1,223 @@
+package exported
+
+import (
+ ics23 "github.com/confio/ics23/go"
+ proto "github.com/gogo/protobuf/proto"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+)
+
+const (
+ // TypeClientMisbehaviour is the shared evidence misbehaviour type
+ TypeClientMisbehaviour string = "client_misbehaviour"
+
+ // Solomachine is used to indicate that the light client is a solo machine.
+ Solomachine string = "06-solomachine"
+
+ // Tendermint is used to indicate that the client uses the Tendermint Consensus Algorithm.
+ Tendermint string = "07-tendermint"
+
+ // Localhost is the client type for a localhost client. It is also used as the clientID
+ // for the localhost client.
+ Localhost string = "09-localhost"
+)
+
+// ClientState defines the required common functions for light clients.
+type ClientState interface {
+ proto.Message
+
+ ClientType() string
+ GetLatestHeight() Height
+ IsFrozen() bool
+ GetFrozenHeight() Height
+ Validate() error
+ GetProofSpecs() []*ics23.ProofSpec
+
+ // Initialization function
+ // Clients must validate the initial consensus state, and may store any client-specific metadata
+ // necessary for correct light client operation
+ Initialize(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, ConsensusState) error
+
+ // Genesis function
+ ExportMetadata(sdk.KVStore) []GenesisMetadata
+
+ // Update and Misbehaviour functions
+
+ CheckHeaderAndUpdateState(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, Header) (ClientState, ConsensusState, error)
+ CheckMisbehaviourAndUpdateState(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, Misbehaviour) (ClientState, error)
+ CheckSubstituteAndUpdateState(ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore, substituteClientStore sdk.KVStore, substituteClient ClientState, height Height) (ClientState, error)
+
+ // Upgrade functions
+ // NOTE: proof heights are not included as upgrade to a new revision is expected to pass only on the last
+ // height committed by the current revision. Clients are responsible for ensuring that the planned last
+ // height of the current revision is somehow encoded in the proof verification process.
+ // This is to ensure that no premature upgrades occur, since upgrade plans committed to by the counterparty
+ // may be cancelled or modified before the last planned height.
+ VerifyUpgradeAndUpdateState(
+ ctx sdk.Context,
+ cdc codec.BinaryMarshaler,
+ store sdk.KVStore,
+ newClient ClientState,
+ newConsState ConsensusState,
+ proofUpgradeClient,
+ proofUpgradeConsState []byte,
+ ) (ClientState, ConsensusState, error)
+ // Utility function that zeroes out any client customizable fields in client state
+ // Ledger enforced fields are maintained while all custom fields are zero values
+ // Used to verify upgrades
+ ZeroCustomFields() ClientState
+
+ // State verification functions
+
+ VerifyClientState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ prefix Prefix,
+ counterpartyClientIdentifier string,
+ proof []byte,
+ clientState ClientState,
+ ) error
+ VerifyClientConsensusState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ counterpartyClientIdentifier string,
+ consensusHeight Height,
+ prefix Prefix,
+ proof []byte,
+ consensusState ConsensusState,
+ ) error
+ VerifyConnectionState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ prefix Prefix,
+ proof []byte,
+ connectionID string,
+ connectionEnd ConnectionI,
+ ) error
+ VerifyChannelState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ prefix Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ channel ChannelI,
+ ) error
+ VerifyPacketCommitment(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ commitmentBytes []byte,
+ ) error
+ VerifyPacketAcknowledgement(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ acknowledgement []byte,
+ ) error
+ VerifyPacketReceiptAbsence(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ ) error
+ VerifyNextSequenceRecv(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ nextSequenceRecv uint64,
+ ) error
+}
+
+// ConsensusState is the state of the consensus process
+type ConsensusState interface {
+ proto.Message
+
+ ClientType() string // Consensus kind
+
+ // GetRoot returns the commitment root of the consensus state,
+ // which is used for key-value pair verification.
+ GetRoot() Root
+
+ // GetTimestamp returns the timestamp (in nanoseconds) of the consensus state
+ GetTimestamp() uint64
+
+ ValidateBasic() error
+}
+
+// Misbehaviour defines counterparty misbehaviour for a specific consensus type
+type Misbehaviour interface {
+ proto.Message
+
+ ClientType() string
+ GetClientID() string
+ ValidateBasic() error
+
+ // Height at which the infraction occurred
+ GetHeight() Height
+}
+
+// Header is the consensus state update information
+type Header interface {
+ proto.Message
+
+ ClientType() string
+ GetHeight() Height
+ ValidateBasic() error
+}
+
+// Height is a wrapper interface over clienttypes.Height
+// all clients must use the concrete implementation in types
+type Height interface {
+ IsZero() bool
+ LT(Height) bool
+ LTE(Height) bool
+ EQ(Height) bool
+ GT(Height) bool
+ GTE(Height) bool
+ GetRevisionNumber() uint64
+ GetRevisionHeight() uint64
+ Increment() Height
+ Decrement() (Height, bool)
+ String() string
+}
+
+// GenesisMetadata is a wrapper interface over clienttypes.GenesisMetadata
+// all clients must use the concrete implementation in types
+type GenesisMetadata interface {
+ // return store key that contains metadata without clientID-prefix
+ GetKey() []byte
+ // returns metadata value
+ GetValue() []byte
+}
diff --git a/core/exported/commitment.go b/core/exported/commitment.go
new file mode 100644
index 00000000..b4f2c0c1
--- /dev/null
+++ b/core/exported/commitment.go
@@ -0,0 +1,45 @@
+package exported
+
+import ics23 "github.com/confio/ics23/go"
+
+// ICS 023 Types Implementation
+//
+// This file includes types defined under
+// https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments
+
+// spec:Path and spec:Value are defined as bytestring
+
+// Root implements spec:CommitmentRoot.
+// A root is constructed from a set of key-value pairs,
+// and the inclusion or non-inclusion of an arbitrary key-value pair
+// can be proven with the proof.
+type Root interface {
+ GetHash() []byte
+ Empty() bool
+}
+
+// Prefix implements spec:CommitmentPrefix.
+// Prefix represents the common "prefix" that a set of keys shares.
+type Prefix interface {
+ Bytes() []byte
+ Empty() bool
+}
+
+// Path implements spec:CommitmentPath.
+// A path is the additional information provided to the verification function.
+type Path interface {
+ String() string
+ Empty() bool
+}
+
+// Proof implements spec:CommitmentProof.
+// Proof can prove whether the key-value pair is a part of the Root or not.
+// Each proof has designated key-value pair it is able to prove.
+// Proofs includes key but value is provided dynamically at the verification time.
+type Proof interface {
+ VerifyMembership([]*ics23.ProofSpec, Root, Path, []byte) error
+ VerifyNonMembership([]*ics23.ProofSpec, Root, Path) error
+ Empty() bool
+
+ ValidateBasic() error
+}
diff --git a/core/exported/connection.go b/core/exported/connection.go
new file mode 100644
index 00000000..8f705daf
--- /dev/null
+++ b/core/exported/connection.go
@@ -0,0 +1,26 @@
+package exported
+
+// ConnectionI describes the required methods for a connection.
+type ConnectionI interface {
+ GetClientID() string
+ GetState() int32
+ GetCounterparty() CounterpartyConnectionI
+ GetVersions() []Version
+ GetDelayPeriod() uint64
+ ValidateBasic() error
+}
+
+// CounterpartyConnectionI describes the required methods for a counterparty connection.
+type CounterpartyConnectionI interface {
+ GetClientID() string
+ GetConnectionID() string
+ GetPrefix() Prefix
+ ValidateBasic() error
+}
+
+// Version defines an IBC version used in connection handshake negotiation.
+type Version interface {
+ GetIdentifier() string
+ GetFeatures() []string
+ VerifyProposedVersion(Version) error
+}
diff --git a/core/genesis.go b/core/genesis.go
new file mode 100644
index 00000000..7d5d60b9
--- /dev/null
+++ b/core/genesis.go
@@ -0,0 +1,27 @@
+package ibc
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection"
+ channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+)
+
+// InitGenesis initializes the ibc state from a provided genesis
+// state.
+func InitGenesis(ctx sdk.Context, k keeper.Keeper, createLocalhost bool, gs *types.GenesisState) {
+ client.InitGenesis(ctx, k.ClientKeeper, gs.ClientGenesis)
+ connection.InitGenesis(ctx, k.ConnectionKeeper, gs.ConnectionGenesis)
+ channel.InitGenesis(ctx, k.ChannelKeeper, gs.ChannelGenesis)
+}
+
+// ExportGenesis returns the ibc exported genesis.
+func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState {
+ return &types.GenesisState{
+ ClientGenesis: client.ExportGenesis(ctx, k.ClientKeeper),
+ ConnectionGenesis: connection.ExportGenesis(ctx, k.ConnectionKeeper),
+ ChannelGenesis: channel.ExportGenesis(ctx, k.ChannelKeeper),
+ }
+}
diff --git a/core/genesis_test.go b/core/genesis_test.go
new file mode 100644
index 00000000..c29feef7
--- /dev/null
+++ b/core/genesis_test.go
@@ -0,0 +1,370 @@
+package ibc_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/simapp"
+ ibc "github.com/cosmos/cosmos-sdk/x/ibc/core"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+const (
+ connectionID = "connection-0"
+ clientID = "07-tendermint-0"
+ connectionID2 = "connection-1"
+ clientID2 = "07-tendermin-1"
+ localhostID = exported.Localhost + "-1"
+
+ port1 = "firstport"
+ port2 = "secondport"
+
+ channel1 = "channel-0"
+ channel2 = "channel-1"
+)
+
+var clientHeight = clienttypes.NewHeight(0, 10)
+
+type IBCTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+// SetupTest creates a coordinator with 2 test chains.
+func (suite *IBCTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+}
+
+func TestIBCTestSuite(t *testing.T) {
+ suite.Run(t, new(IBCTestSuite))
+}
+
+func (suite *IBCTestSuite) TestValidateGenesis() {
+ header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, suite.chainA.CurrentHeader.Height, clienttypes.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height-1)), suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
+
+ testCases := []struct {
+ name string
+ genState *types.GenesisState
+ expPass bool
+ }{
+ {
+ name: "default",
+ genState: types.DefaultGenesisState(),
+ expPass: true,
+ },
+ {
+ name: "valid genesis",
+ genState: &types.GenesisState{
+ ClientGenesis: clienttypes.NewGenesisState(
+ []clienttypes.IdentifiedClientState{
+ clienttypes.NewIdentifiedClientState(
+ clientID, ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ clienttypes.NewIdentifiedClientState(
+ localhostID, localhosttypes.NewClientState("chaindID", clientHeight),
+ ),
+ },
+ []clienttypes.ClientConsensusStates{
+ clienttypes.NewClientConsensusStates(
+ clientID,
+ []clienttypes.ConsensusStateWithHeight{
+ clienttypes.NewConsensusStateWithHeight(
+ header.GetHeight().(clienttypes.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.AppHash), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ []clienttypes.IdentifiedGenesisMetadata{
+ clienttypes.NewIdentifiedGenesisMetadata(
+ clientID,
+ []clienttypes.GenesisMetadata{
+ clienttypes.NewGenesisMetadata([]byte("key1"), []byte("val1")),
+ clienttypes.NewGenesisMetadata([]byte("key2"), []byte("val2")),
+ },
+ ),
+ },
+ clienttypes.NewParams(exported.Tendermint, exported.Localhost),
+ true,
+ 2,
+ ),
+ ConnectionGenesis: connectiontypes.NewGenesisState(
+ []connectiontypes.IdentifiedConnection{
+ connectiontypes.NewIdentifiedConnection(connectionID, connectiontypes.NewConnectionEnd(connectiontypes.INIT, clientID, connectiontypes.NewCounterparty(clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))), []*connectiontypes.Version{ibctesting.ConnectionVersion}, 0)),
+ },
+ []connectiontypes.ConnectionPaths{
+ connectiontypes.NewConnectionPaths(clientID, []string{connectionID}),
+ },
+ 0,
+ ),
+ ChannelGenesis: channeltypes.NewGenesisState(
+ []channeltypes.IdentifiedChannel{
+ channeltypes.NewIdentifiedChannel(
+ port1, channel1, channeltypes.NewChannel(
+ channeltypes.INIT, channeltypes.ORDERED,
+ channeltypes.NewCounterparty(port2, channel2), []string{connectionID}, ibctesting.DefaultChannelVersion,
+ ),
+ ),
+ },
+ []channeltypes.PacketState{
+ channeltypes.NewPacketState(port2, channel2, 1, []byte("ack")),
+ },
+ []channeltypes.PacketState{
+ channeltypes.NewPacketState(port2, channel2, 1, []byte("")),
+ },
+ []channeltypes.PacketState{
+ channeltypes.NewPacketState(port1, channel1, 1, []byte("commit_hash")),
+ },
+ []channeltypes.PacketSequence{
+ channeltypes.NewPacketSequence(port1, channel1, 1),
+ },
+ []channeltypes.PacketSequence{
+ channeltypes.NewPacketSequence(port2, channel2, 1),
+ },
+ []channeltypes.PacketSequence{
+ channeltypes.NewPacketSequence(port2, channel2, 1),
+ },
+ 0,
+ ),
+ },
+ expPass: true,
+ },
+ {
+ name: "invalid client genesis",
+ genState: &types.GenesisState{
+ ClientGenesis: clienttypes.NewGenesisState(
+ []clienttypes.IdentifiedClientState{
+ clienttypes.NewIdentifiedClientState(
+ clientID, ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ clienttypes.NewIdentifiedClientState(
+ localhostID, localhosttypes.NewClientState("(chaindID)", clienttypes.ZeroHeight()),
+ ),
+ },
+ nil,
+ []clienttypes.IdentifiedGenesisMetadata{
+ clienttypes.NewIdentifiedGenesisMetadata(
+ clientID,
+ []clienttypes.GenesisMetadata{
+ clienttypes.NewGenesisMetadata([]byte(""), []byte("val1")),
+ clienttypes.NewGenesisMetadata([]byte("key2"), []byte("")),
+ },
+ ),
+ },
+ clienttypes.NewParams(exported.Tendermint),
+ false,
+ 2,
+ ),
+ ConnectionGenesis: connectiontypes.DefaultGenesisState(),
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid connection genesis",
+ genState: &types.GenesisState{
+ ClientGenesis: clienttypes.DefaultGenesisState(),
+ ConnectionGenesis: connectiontypes.NewGenesisState(
+ []connectiontypes.IdentifiedConnection{
+ connectiontypes.NewIdentifiedConnection(connectionID, connectiontypes.NewConnectionEnd(connectiontypes.INIT, "(CLIENTIDONE)", connectiontypes.NewCounterparty(clientID, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))), []*connectiontypes.Version{connectiontypes.NewVersion("1.1", nil)}, 0)),
+ },
+ []connectiontypes.ConnectionPaths{
+ connectiontypes.NewConnectionPaths(clientID, []string{connectionID}),
+ },
+ 0,
+ ),
+ },
+ expPass: false,
+ },
+ {
+ name: "invalid channel genesis",
+ genState: &types.GenesisState{
+ ClientGenesis: clienttypes.DefaultGenesisState(),
+ ConnectionGenesis: connectiontypes.DefaultGenesisState(),
+ ChannelGenesis: channeltypes.GenesisState{
+ Acknowledgements: []channeltypes.PacketState{
+ channeltypes.NewPacketState("(portID)", channel1, 1, []byte("ack")),
+ },
+ },
+ },
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ err := tc.genState.Validate()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *IBCTestSuite) TestInitGenesis() {
+ header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, suite.chainA.CurrentHeader.Height, clienttypes.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height-1)), suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
+
+ testCases := []struct {
+ name string
+ genState *types.GenesisState
+ }{
+ {
+ name: "default",
+ genState: types.DefaultGenesisState(),
+ },
+ {
+ name: "valid genesis",
+ genState: &types.GenesisState{
+ ClientGenesis: clienttypes.NewGenesisState(
+ []clienttypes.IdentifiedClientState{
+ clienttypes.NewIdentifiedClientState(
+ clientID, ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
+ ),
+ clienttypes.NewIdentifiedClientState(
+ exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight),
+ ),
+ },
+ []clienttypes.ClientConsensusStates{
+ clienttypes.NewClientConsensusStates(
+ clientID,
+ []clienttypes.ConsensusStateWithHeight{
+ clienttypes.NewConsensusStateWithHeight(
+ header.GetHeight().(clienttypes.Height),
+ ibctmtypes.NewConsensusState(
+ header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.AppHash), header.Header.NextValidatorsHash,
+ ),
+ ),
+ },
+ ),
+ },
+ []clienttypes.IdentifiedGenesisMetadata{
+ clienttypes.NewIdentifiedGenesisMetadata(
+ clientID,
+ []clienttypes.GenesisMetadata{
+ clienttypes.NewGenesisMetadata([]byte("key1"), []byte("val1")),
+ clienttypes.NewGenesisMetadata([]byte("key2"), []byte("val2")),
+ },
+ ),
+ },
+ clienttypes.NewParams(exported.Tendermint, exported.Localhost),
+ true,
+ 0,
+ ),
+ ConnectionGenesis: connectiontypes.NewGenesisState(
+ []connectiontypes.IdentifiedConnection{
+ connectiontypes.NewIdentifiedConnection(connectionID, connectiontypes.NewConnectionEnd(connectiontypes.INIT, clientID, connectiontypes.NewCounterparty(clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))), []*connectiontypes.Version{ibctesting.ConnectionVersion}, 0)),
+ },
+ []connectiontypes.ConnectionPaths{
+ connectiontypes.NewConnectionPaths(clientID, []string{connectionID}),
+ },
+ 0,
+ ),
+ ChannelGenesis: channeltypes.NewGenesisState(
+ []channeltypes.IdentifiedChannel{
+ channeltypes.NewIdentifiedChannel(
+ port1, channel1, channeltypes.NewChannel(
+ channeltypes.INIT, channeltypes.ORDERED,
+ channeltypes.NewCounterparty(port2, channel2), []string{connectionID}, ibctesting.DefaultChannelVersion,
+ ),
+ ),
+ },
+ []channeltypes.PacketState{
+ channeltypes.NewPacketState(port2, channel2, 1, []byte("ack")),
+ },
+ []channeltypes.PacketState{
+ channeltypes.NewPacketState(port2, channel2, 1, []byte("")),
+ },
+ []channeltypes.PacketState{
+ channeltypes.NewPacketState(port1, channel1, 1, []byte("commit_hash")),
+ },
+ []channeltypes.PacketSequence{
+ channeltypes.NewPacketSequence(port1, channel1, 1),
+ },
+ []channeltypes.PacketSequence{
+ channeltypes.NewPacketSequence(port2, channel2, 1),
+ },
+ []channeltypes.PacketSequence{
+ channeltypes.NewPacketSequence(port2, channel2, 1),
+ },
+ 0,
+ ),
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ app := simapp.Setup(false)
+
+ suite.NotPanics(func() {
+ ibc.InitGenesis(app.BaseApp.NewContext(false, tmproto.Header{Height: 1}), *app.IBCKeeper, true, tc.genState)
+ })
+ }
+}
+
+func (suite *IBCTestSuite) TestExportGenesis() {
+ testCases := []struct {
+ msg string
+ malleate func()
+ }{
+ {
+ "success",
+ func() {
+ // creates clients
+ suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ // create extra clients
+ suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest()
+
+ tc.malleate()
+
+ var gs *types.GenesisState
+ suite.NotPanics(func() {
+ gs = ibc.ExportGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper)
+ })
+
+ // init genesis based on export
+ suite.NotPanics(func() {
+ ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper, true, gs)
+ })
+
+ suite.NotPanics(func() {
+ cdc := codec.NewProtoCodec(suite.chainA.App.InterfaceRegistry())
+ genState := cdc.MustMarshalJSON(gs)
+ cdc.MustUnmarshalJSON(genState, gs)
+ })
+
+ // init genesis based on marshal and unmarshal
+ suite.NotPanics(func() {
+ ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper, true, gs)
+ })
+ })
+ }
+}
diff --git a/core/handler.go b/core/handler.go
new file mode 100644
index 00000000..c8e4dfc8
--- /dev/null
+++ b/core/handler.go
@@ -0,0 +1,98 @@
+package ibc
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+)
+
+// NewHandler defines the IBC handler
+func NewHandler(k keeper.Keeper) sdk.Handler {
+ return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
+ ctx = ctx.WithEventManager(sdk.NewEventManager())
+
+ switch msg := msg.(type) {
+ // IBC client msg interface types
+ case *clienttypes.MsgCreateClient:
+ res, err := k.CreateClient(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *clienttypes.MsgUpdateClient:
+ res, err := k.UpdateClient(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *clienttypes.MsgUpgradeClient:
+ res, err := k.UpgradeClient(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *clienttypes.MsgSubmitMisbehaviour:
+ res, err := k.SubmitMisbehaviour(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ // IBC connection msgs
+ case *connectiontypes.MsgConnectionOpenInit:
+ res, err := k.ConnectionOpenInit(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *connectiontypes.MsgConnectionOpenTry:
+ res, err := k.ConnectionOpenTry(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *connectiontypes.MsgConnectionOpenAck:
+ res, err := k.ConnectionOpenAck(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *connectiontypes.MsgConnectionOpenConfirm:
+ res, err := k.ConnectionOpenConfirm(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ // IBC channel msgs
+ case *channeltypes.MsgChannelOpenInit:
+ res, err := k.ChannelOpenInit(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgChannelOpenTry:
+ res, err := k.ChannelOpenTry(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgChannelOpenAck:
+ res, err := k.ChannelOpenAck(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgChannelOpenConfirm:
+ res, err := k.ChannelOpenConfirm(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgChannelCloseInit:
+ res, err := k.ChannelCloseInit(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgChannelCloseConfirm:
+ res, err := k.ChannelCloseConfirm(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ // IBC packet msgs get routed to the appropriate module callback
+ case *channeltypes.MsgRecvPacket:
+ res, err := k.RecvPacket(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgAcknowledgement:
+ res, err := k.Acknowledgement(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgTimeout:
+ res, err := k.Timeout(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ case *channeltypes.MsgTimeoutOnClose:
+ res, err := k.TimeoutOnClose(sdk.WrapSDKContext(ctx), msg)
+ return sdk.WrapServiceResult(ctx, res, err)
+
+ default:
+ return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized IBC message type: %T", msg)
+ }
+ }
+}
diff --git a/core/keeper/grpc_query.go b/core/keeper/grpc_query.go
new file mode 100644
index 00000000..f406d2e8
--- /dev/null
+++ b/core/keeper/grpc_query.go
@@ -0,0 +1,124 @@
+package keeper
+
+import (
+ "context"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// ClientState implements the IBC QueryServer interface
+func (q Keeper) ClientState(c context.Context, req *clienttypes.QueryClientStateRequest) (*clienttypes.QueryClientStateResponse, error) {
+ return q.ClientKeeper.ClientState(c, req)
+}
+
+// ClientStates implements the IBC QueryServer interface
+func (q Keeper) ClientStates(c context.Context, req *clienttypes.QueryClientStatesRequest) (*clienttypes.QueryClientStatesResponse, error) {
+ return q.ClientKeeper.ClientStates(c, req)
+}
+
+// ConsensusState implements the IBC QueryServer interface
+func (q Keeper) ConsensusState(c context.Context, req *clienttypes.QueryConsensusStateRequest) (*clienttypes.QueryConsensusStateResponse, error) {
+ return q.ClientKeeper.ConsensusState(c, req)
+}
+
+// ConsensusStates implements the IBC QueryServer interface
+func (q Keeper) ConsensusStates(c context.Context, req *clienttypes.QueryConsensusStatesRequest) (*clienttypes.QueryConsensusStatesResponse, error) {
+ return q.ClientKeeper.ConsensusStates(c, req)
+}
+
+// ClientParams implements the IBC QueryServer interface
+func (q Keeper) ClientParams(c context.Context, req *clienttypes.QueryClientParamsRequest) (*clienttypes.QueryClientParamsResponse, error) {
+ return q.ClientKeeper.ClientParams(c, req)
+}
+
+// Connection implements the IBC QueryServer interface
+func (q Keeper) Connection(c context.Context, req *connectiontypes.QueryConnectionRequest) (*connectiontypes.QueryConnectionResponse, error) {
+ return q.ConnectionKeeper.Connection(c, req)
+}
+
+// Connections implements the IBC QueryServer interface
+func (q Keeper) Connections(c context.Context, req *connectiontypes.QueryConnectionsRequest) (*connectiontypes.QueryConnectionsResponse, error) {
+ return q.ConnectionKeeper.Connections(c, req)
+}
+
+// ClientConnections implements the IBC QueryServer interface
+func (q Keeper) ClientConnections(c context.Context, req *connectiontypes.QueryClientConnectionsRequest) (*connectiontypes.QueryClientConnectionsResponse, error) {
+ return q.ConnectionKeeper.ClientConnections(c, req)
+}
+
+// ConnectionClientState implements the IBC QueryServer interface
+func (q Keeper) ConnectionClientState(c context.Context, req *connectiontypes.QueryConnectionClientStateRequest) (*connectiontypes.QueryConnectionClientStateResponse, error) {
+ return q.ConnectionKeeper.ConnectionClientState(c, req)
+}
+
+// ConnectionConsensusState implements the IBC QueryServer interface
+func (q Keeper) ConnectionConsensusState(c context.Context, req *connectiontypes.QueryConnectionConsensusStateRequest) (*connectiontypes.QueryConnectionConsensusStateResponse, error) {
+ return q.ConnectionKeeper.ConnectionConsensusState(c, req)
+}
+
+// Channel implements the IBC QueryServer interface
+func (q Keeper) Channel(c context.Context, req *channeltypes.QueryChannelRequest) (*channeltypes.QueryChannelResponse, error) {
+ return q.ChannelKeeper.Channel(c, req)
+}
+
+// Channels implements the IBC QueryServer interface
+func (q Keeper) Channels(c context.Context, req *channeltypes.QueryChannelsRequest) (*channeltypes.QueryChannelsResponse, error) {
+ return q.ChannelKeeper.Channels(c, req)
+}
+
+// ConnectionChannels implements the IBC QueryServer interface
+func (q Keeper) ConnectionChannels(c context.Context, req *channeltypes.QueryConnectionChannelsRequest) (*channeltypes.QueryConnectionChannelsResponse, error) {
+ return q.ChannelKeeper.ConnectionChannels(c, req)
+}
+
+// ChannelClientState implements the IBC QueryServer interface
+func (q Keeper) ChannelClientState(c context.Context, req *channeltypes.QueryChannelClientStateRequest) (*channeltypes.QueryChannelClientStateResponse, error) {
+ return q.ChannelKeeper.ChannelClientState(c, req)
+}
+
+// ChannelConsensusState implements the IBC QueryServer interface
+func (q Keeper) ChannelConsensusState(c context.Context, req *channeltypes.QueryChannelConsensusStateRequest) (*channeltypes.QueryChannelConsensusStateResponse, error) {
+ return q.ChannelKeeper.ChannelConsensusState(c, req)
+}
+
+// PacketCommitment implements the IBC QueryServer interface
+func (q Keeper) PacketCommitment(c context.Context, req *channeltypes.QueryPacketCommitmentRequest) (*channeltypes.QueryPacketCommitmentResponse, error) {
+ return q.ChannelKeeper.PacketCommitment(c, req)
+}
+
+// PacketCommitments implements the IBC QueryServer interface
+func (q Keeper) PacketCommitments(c context.Context, req *channeltypes.QueryPacketCommitmentsRequest) (*channeltypes.QueryPacketCommitmentsResponse, error) {
+ return q.ChannelKeeper.PacketCommitments(c, req)
+}
+
+// PacketReceipt implements the IBC QueryServer interface
+func (q Keeper) PacketReceipt(c context.Context, req *channeltypes.QueryPacketReceiptRequest) (*channeltypes.QueryPacketReceiptResponse, error) {
+ return q.ChannelKeeper.PacketReceipt(c, req)
+}
+
+// PacketAcknowledgement implements the IBC QueryServer interface
+func (q Keeper) PacketAcknowledgement(c context.Context, req *channeltypes.QueryPacketAcknowledgementRequest) (*channeltypes.QueryPacketAcknowledgementResponse, error) {
+ return q.ChannelKeeper.PacketAcknowledgement(c, req)
+}
+
+// PacketAcknowledgements implements the IBC QueryServer interface
+func (q Keeper) PacketAcknowledgements(c context.Context, req *channeltypes.QueryPacketAcknowledgementsRequest) (*channeltypes.QueryPacketAcknowledgementsResponse, error) {
+ return q.ChannelKeeper.PacketAcknowledgements(c, req)
+}
+
+// UnreceivedPackets implements the IBC QueryServer interface
+func (q Keeper) UnreceivedPackets(c context.Context, req *channeltypes.QueryUnreceivedPacketsRequest) (*channeltypes.QueryUnreceivedPacketsResponse, error) {
+ return q.ChannelKeeper.UnreceivedPackets(c, req)
+}
+
+// UnreceivedAcks implements the IBC QueryServer interface
+func (q Keeper) UnreceivedAcks(c context.Context, req *channeltypes.QueryUnreceivedAcksRequest) (*channeltypes.QueryUnreceivedAcksResponse, error) {
+ return q.ChannelKeeper.UnreceivedAcks(c, req)
+}
+
+// NextSequenceReceive implements the IBC QueryServer interface
+func (q Keeper) NextSequenceReceive(c context.Context, req *channeltypes.QueryNextSequenceReceiveRequest) (*channeltypes.QueryNextSequenceReceiveResponse, error) {
+ return q.ChannelKeeper.NextSequenceReceive(c, req)
+}
diff --git a/core/keeper/keeper.go b/core/keeper/keeper.go
new file mode 100644
index 00000000..5f9abc38
--- /dev/null
+++ b/core/keeper/keeper.go
@@ -0,0 +1,65 @@
+package keeper
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ clientkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectionkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/keeper"
+ channelkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper"
+ portkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/keeper"
+ porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+)
+
+var _ types.QueryServer = (*Keeper)(nil)
+
+// Keeper defines each ICS keeper for IBC
+type Keeper struct {
+ // implements gRPC QueryServer interface
+ types.QueryServer
+
+ cdc codec.BinaryMarshaler
+
+ ClientKeeper clientkeeper.Keeper
+ ConnectionKeeper connectionkeeper.Keeper
+ ChannelKeeper channelkeeper.Keeper
+ PortKeeper portkeeper.Keeper
+ Router *porttypes.Router
+}
+
+// NewKeeper creates a new ibc Keeper
+func NewKeeper(
+ cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace,
+ stakingKeeper clienttypes.StakingKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,
+) *Keeper {
+ clientKeeper := clientkeeper.NewKeeper(cdc, key, paramSpace, stakingKeeper)
+ connectionKeeper := connectionkeeper.NewKeeper(cdc, key, clientKeeper)
+ portKeeper := portkeeper.NewKeeper(scopedKeeper)
+ channelKeeper := channelkeeper.NewKeeper(cdc, key, clientKeeper, connectionKeeper, portKeeper, scopedKeeper)
+
+ return &Keeper{
+ cdc: cdc,
+ ClientKeeper: clientKeeper,
+ ConnectionKeeper: connectionKeeper,
+ ChannelKeeper: channelKeeper,
+ PortKeeper: portKeeper,
+ }
+}
+
+// Codec returns the IBC module codec.
+func (k Keeper) Codec() codec.BinaryMarshaler {
+ return k.cdc
+}
+
+// SetRouter sets the Router in IBC Keeper and seals it. The method panics if
+// there is an existing router that's already sealed.
+func (k *Keeper) SetRouter(rtr *porttypes.Router) {
+ if k.Router != nil && k.Router.Sealed() {
+ panic("cannot reset a sealed router")
+ }
+ k.Router = rtr
+ k.Router.Seal()
+}
diff --git a/core/keeper/msg_server.go b/core/keeper/msg_server.go
new file mode 100644
index 00000000..dcddcaed
--- /dev/null
+++ b/core/keeper/msg_server.go
@@ -0,0 +1,616 @@
+package keeper
+
+import (
+ "context"
+
+ "github.com/armon/go-metrics"
+
+ "github.com/cosmos/cosmos-sdk/telemetry"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+)
+
+var _ clienttypes.MsgServer = Keeper{}
+var _ connectiontypes.MsgServer = Keeper{}
+var _ channeltypes.MsgServer = Keeper{}
+
+// CreateClient defines a rpc handler method for MsgCreateClient.
+func (k Keeper) CreateClient(goCtx context.Context, msg *clienttypes.MsgCreateClient) (*clienttypes.MsgCreateClientResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ clientState, err := clienttypes.UnpackClientState(msg.ClientState)
+ if err != nil {
+ return nil, err
+ }
+
+ consensusState, err := clienttypes.UnpackConsensusState(msg.ConsensusState)
+ if err != nil {
+ return nil, err
+ }
+
+ clientID, err := k.ClientKeeper.CreateClient(ctx, clientState, consensusState)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ clienttypes.EventTypeCreateClient,
+ sdk.NewAttribute(clienttypes.AttributeKeyClientID, clientID),
+ sdk.NewAttribute(clienttypes.AttributeKeyClientType, clientState.ClientType()),
+ sdk.NewAttribute(clienttypes.AttributeKeyConsensusHeight, clientState.GetLatestHeight().String()),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, clienttypes.AttributeValueCategory),
+ ),
+ })
+
+ return &clienttypes.MsgCreateClientResponse{}, nil
+}
+
+// UpdateClient defines a rpc handler method for MsgUpdateClient.
+func (k Keeper) UpdateClient(goCtx context.Context, msg *clienttypes.MsgUpdateClient) (*clienttypes.MsgUpdateClientResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ header, err := clienttypes.UnpackHeader(msg.Header)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = k.ClientKeeper.UpdateClient(ctx, msg.ClientId, header); err != nil {
+ return nil, err
+ }
+
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, clienttypes.AttributeValueCategory),
+ ),
+ )
+
+ return &clienttypes.MsgUpdateClientResponse{}, nil
+}
+
+// UpgradeClient defines a rpc handler method for MsgUpgradeClient.
+func (k Keeper) UpgradeClient(goCtx context.Context, msg *clienttypes.MsgUpgradeClient) (*clienttypes.MsgUpgradeClientResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ upgradedClient, err := clienttypes.UnpackClientState(msg.ClientState)
+ if err != nil {
+ return nil, err
+ }
+ upgradedConsState, err := clienttypes.UnpackConsensusState(msg.ConsensusState)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = k.ClientKeeper.UpgradeClient(ctx, msg.ClientId, upgradedClient, upgradedConsState,
+ msg.ProofUpgradeClient, msg.ProofUpgradeConsensusState); err != nil {
+ return nil, err
+ }
+
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, clienttypes.AttributeValueCategory),
+ ),
+ )
+
+ return &clienttypes.MsgUpgradeClientResponse{}, nil
+}
+
+// SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.
+func (k Keeper) SubmitMisbehaviour(goCtx context.Context, msg *clienttypes.MsgSubmitMisbehaviour) (*clienttypes.MsgSubmitMisbehaviourResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ misbehaviour, err := clienttypes.UnpackMisbehaviour(msg.Misbehaviour)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := k.ClientKeeper.CheckMisbehaviourAndUpdateState(ctx, misbehaviour); err != nil {
+ return nil, sdkerrors.Wrap(err, "failed to process misbehaviour for IBC client")
+ }
+
+ ctx.EventManager().EmitEvent(
+ sdk.NewEvent(
+ clienttypes.EventTypeSubmitMisbehaviour,
+ sdk.NewAttribute(clienttypes.AttributeKeyClientID, msg.ClientId),
+ sdk.NewAttribute(clienttypes.AttributeKeyClientType, misbehaviour.ClientType()),
+ sdk.NewAttribute(clienttypes.AttributeKeyConsensusHeight, misbehaviour.GetHeight().String()),
+ ),
+ )
+
+ return &clienttypes.MsgSubmitMisbehaviourResponse{}, nil
+}
+
+// ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.
+func (k Keeper) ConnectionOpenInit(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenInit) (*connectiontypes.MsgConnectionOpenInitResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ connectionID, err := k.ConnectionKeeper.ConnOpenInit(ctx, msg.ClientId, msg.Counterparty, msg.Version, msg.DelayPeriod)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "connection handshake open init failed")
+ }
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ connectiontypes.EventTypeConnectionOpenInit,
+ sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, connectionID),
+ sdk.NewAttribute(connectiontypes.AttributeKeyClientID, msg.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, msg.Counterparty.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, msg.Counterparty.ConnectionId),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory),
+ ),
+ })
+
+ return &connectiontypes.MsgConnectionOpenInitResponse{}, nil
+}
+
+// ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.
+func (k Keeper) ConnectionOpenTry(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenTry) (*connectiontypes.MsgConnectionOpenTryResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ targetClient, err := clienttypes.UnpackClientState(msg.ClientState)
+ if err != nil {
+ return nil, sdkerrors.Wrapf(err, "client in msg is not exported.ClientState. invalid client: %v.", targetClient)
+ }
+
+ connectionID, err := k.ConnectionKeeper.ConnOpenTry(
+ ctx, msg.PreviousConnectionId, msg.Counterparty, msg.DelayPeriod, msg.ClientId, targetClient,
+ connectiontypes.ProtoVersionsToExported(msg.CounterpartyVersions), msg.ProofInit, msg.ProofClient, msg.ProofConsensus,
+ msg.ProofHeight, msg.ConsensusHeight,
+ )
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "connection handshake open try failed")
+ }
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ connectiontypes.EventTypeConnectionOpenTry,
+ sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, connectionID),
+ sdk.NewAttribute(connectiontypes.AttributeKeyClientID, msg.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, msg.Counterparty.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, msg.Counterparty.ConnectionId),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory),
+ ),
+ })
+
+ return &connectiontypes.MsgConnectionOpenTryResponse{}, nil
+}
+
+// ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.
+func (k Keeper) ConnectionOpenAck(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenAck) (*connectiontypes.MsgConnectionOpenAckResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ targetClient, err := clienttypes.UnpackClientState(msg.ClientState)
+ if err != nil {
+ return nil, sdkerrors.Wrapf(err, "client in msg is not exported.ClientState. invalid client: %v", targetClient)
+ }
+
+ if err := k.ConnectionKeeper.ConnOpenAck(
+ ctx, msg.ConnectionId, targetClient, msg.Version, msg.CounterpartyConnectionId,
+ msg.ProofTry, msg.ProofClient, msg.ProofConsensus,
+ msg.ProofHeight, msg.ConsensusHeight,
+ ); err != nil {
+ return nil, sdkerrors.Wrap(err, "connection handshake open ack failed")
+ }
+
+ connectionEnd, _ := k.ConnectionKeeper.GetConnection(ctx, msg.ConnectionId)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ connectiontypes.EventTypeConnectionOpenAck,
+ sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, msg.ConnectionId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyClientID, connectionEnd.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, connectionEnd.Counterparty.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, connectionEnd.Counterparty.ConnectionId),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory),
+ ),
+ })
+
+ return &connectiontypes.MsgConnectionOpenAckResponse{}, nil
+}
+
+// ConnectionOpenConfirm defines a rpc handler method for MsgConnectionOpenConfirm.
+func (k Keeper) ConnectionOpenConfirm(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenConfirm) (*connectiontypes.MsgConnectionOpenConfirmResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ if err := k.ConnectionKeeper.ConnOpenConfirm(
+ ctx, msg.ConnectionId, msg.ProofAck, msg.ProofHeight,
+ ); err != nil {
+ return nil, sdkerrors.Wrap(err, "connection handshake open confirm failed")
+ }
+
+ connectionEnd, _ := k.ConnectionKeeper.GetConnection(ctx, msg.ConnectionId)
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ connectiontypes.EventTypeConnectionOpenConfirm,
+ sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, msg.ConnectionId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyClientID, connectionEnd.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, connectionEnd.Counterparty.ClientId),
+ sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, connectionEnd.Counterparty.ConnectionId),
+ ),
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory),
+ ),
+ })
+
+ return &connectiontypes.MsgConnectionOpenConfirmResponse{}, nil
+}
+
+// ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit.
+func (k Keeper) ChannelOpenInit(goCtx context.Context, msg *channeltypes.MsgChannelOpenInit) (*channeltypes.MsgChannelOpenInitResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by port capability
+ module, portCap, err := k.PortKeeper.LookupModuleByPort(ctx, msg.PortId)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ _, channelID, cap, err := channel.HandleMsgChannelOpenInit(ctx, k.ChannelKeeper, portCap, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ if err = cbs.OnChanOpenInit(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, channelID, cap, msg.Channel.Counterparty, msg.Channel.Version); err != nil {
+ return nil, sdkerrors.Wrap(err, "channel open init callback failed")
+ }
+
+ return &channeltypes.MsgChannelOpenInitResponse{}, nil
+}
+
+// ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry.
+func (k Keeper) ChannelOpenTry(goCtx context.Context, msg *channeltypes.MsgChannelOpenTry) (*channeltypes.MsgChannelOpenTryResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ // Lookup module by port capability
+ module, portCap, err := k.PortKeeper.LookupModuleByPort(ctx, msg.PortId)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ _, channelID, cap, err := channel.HandleMsgChannelOpenTry(ctx, k.ChannelKeeper, portCap, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ if err = cbs.OnChanOpenTry(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, channelID, cap, msg.Channel.Counterparty, msg.Channel.Version, msg.CounterpartyVersion); err != nil {
+ return nil, sdkerrors.Wrap(err, "channel open try callback failed")
+ }
+
+ return &channeltypes.MsgChannelOpenTryResponse{}, nil
+}
+
+// ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck.
+func (k Keeper) ChannelOpenAck(goCtx context.Context, msg *channeltypes.MsgChannelOpenAck) (*channeltypes.MsgChannelOpenAckResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ _, err = channel.HandleMsgChannelOpenAck(ctx, k.ChannelKeeper, cap, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = cbs.OnChanOpenAck(ctx, msg.PortId, msg.ChannelId, msg.CounterpartyVersion); err != nil {
+ return nil, sdkerrors.Wrap(err, "channel open ack callback failed")
+ }
+
+ return &channeltypes.MsgChannelOpenAckResponse{}, nil
+}
+
+// ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.
+func (k Keeper) ChannelOpenConfirm(goCtx context.Context, msg *channeltypes.MsgChannelOpenConfirm) (*channeltypes.MsgChannelOpenConfirmResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ _, err = channel.HandleMsgChannelOpenConfirm(ctx, k.ChannelKeeper, cap, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = cbs.OnChanOpenConfirm(ctx, msg.PortId, msg.ChannelId); err != nil {
+ return nil, sdkerrors.Wrap(err, "channel open confirm callback failed")
+ }
+
+ return &channeltypes.MsgChannelOpenConfirmResponse{}, nil
+}
+
+// ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.
+func (k Keeper) ChannelCloseInit(goCtx context.Context, msg *channeltypes.MsgChannelCloseInit) (*channeltypes.MsgChannelCloseInitResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ if err = cbs.OnChanCloseInit(ctx, msg.PortId, msg.ChannelId); err != nil {
+ return nil, sdkerrors.Wrap(err, "channel close init callback failed")
+ }
+
+ _, err = channel.HandleMsgChannelCloseInit(ctx, k.ChannelKeeper, cap, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ return &channeltypes.MsgChannelCloseInitResponse{}, nil
+}
+
+// ChannelCloseConfirm defines a rpc handler method for MsgChannelCloseConfirm.
+func (k Keeper) ChannelCloseConfirm(goCtx context.Context, msg *channeltypes.MsgChannelCloseConfirm) (*channeltypes.MsgChannelCloseConfirmResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ if err = cbs.OnChanCloseConfirm(ctx, msg.PortId, msg.ChannelId); err != nil {
+ return nil, sdkerrors.Wrap(err, "channel close confirm callback failed")
+ }
+
+ _, err = channel.HandleMsgChannelCloseConfirm(ctx, k.ChannelKeeper, cap, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ return &channeltypes.MsgChannelCloseConfirmResponse{}, nil
+}
+
+// RecvPacket defines a rpc handler method for MsgRecvPacket.
+func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacket) (*channeltypes.MsgRecvPacketResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.DestinationPort, msg.Packet.DestinationChannel)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ // Perform TAO verification
+ if err := k.ChannelKeeper.RecvPacket(ctx, cap, msg.Packet, msg.ProofCommitment, msg.ProofHeight); err != nil {
+ return nil, sdkerrors.Wrap(err, "receive packet verification failed")
+ }
+
+ // Perform application logic callback
+ _, ack, err := cbs.OnRecvPacket(ctx, msg.Packet)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "receive packet callback failed")
+ }
+
+ // Set packet acknowledgement only if the acknowledgement is not nil.
+ // NOTE: IBC applications modules may call the WriteAcknowledgement asynchronously if the
+ // acknowledgement is nil.
+ if ack != nil {
+ if err := k.ChannelKeeper.WriteAcknowledgement(ctx, cap, msg.Packet, ack); err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"tx", "msg", "ibc", msg.Type()},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("source-port", msg.Packet.SourcePort),
+ telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
+ telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
+ telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
+ },
+ )
+ }()
+
+ return &channeltypes.MsgRecvPacketResponse{}, nil
+}
+
+// Timeout defines a rpc handler method for MsgTimeout.
+func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*channeltypes.MsgTimeoutResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ // Perform TAO verification
+ if err := k.ChannelKeeper.TimeoutPacket(ctx, msg.Packet, msg.ProofUnreceived, msg.ProofHeight, msg.NextSequenceRecv); err != nil {
+ return nil, sdkerrors.Wrap(err, "timeout packet verification failed")
+ }
+
+ // Perform application logic callback
+ _, err = cbs.OnTimeoutPacket(ctx, msg.Packet)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "timeout packet callback failed")
+ }
+
+ // Delete packet commitment
+ if err = k.ChannelKeeper.TimeoutExecuted(ctx, cap, msg.Packet); err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "timeout", "packet"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("source-port", msg.Packet.SourcePort),
+ telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
+ telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
+ telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
+ telemetry.NewLabel("timeout-type", "height"),
+ },
+ )
+ }()
+
+ return &channeltypes.MsgTimeoutResponse{}, nil
+}
+
+// TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose.
+func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeoutOnClose) (*channeltypes.MsgTimeoutOnCloseResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ // Perform TAO verification
+ if err := k.ChannelKeeper.TimeoutOnClose(ctx, cap, msg.Packet, msg.ProofUnreceived, msg.ProofClose, msg.ProofHeight, msg.NextSequenceRecv); err != nil {
+ return nil, sdkerrors.Wrap(err, "timeout on close packet verification failed")
+ }
+
+ // Perform application logic callback
+ // NOTE: MsgTimeout and MsgTimeoutOnClose use the same "OnTimeoutPacket"
+ // application logic callback.
+ _, err = cbs.OnTimeoutPacket(ctx, msg.Packet)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "timeout packet callback failed")
+ }
+
+ // Delete packet commitment
+ if err = k.ChannelKeeper.TimeoutExecuted(ctx, cap, msg.Packet); err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "timeout", "packet"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("source-port", msg.Packet.SourcePort),
+ telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
+ telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
+ telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
+ telemetry.NewLabel("timeout-type", "channel-closed"),
+ },
+ )
+ }()
+
+ return &channeltypes.MsgTimeoutOnCloseResponse{}, nil
+}
+
+// Acknowledgement defines a rpc handler method for MsgAcknowledgement.
+func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAcknowledgement) (*channeltypes.MsgAcknowledgementResponse, error) {
+ ctx := sdk.UnwrapSDKContext(goCtx)
+
+ // Lookup module by channel capability
+ module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+
+ // Perform TAO verification
+ if err := k.ChannelKeeper.AcknowledgePacket(ctx, cap, msg.Packet, msg.Acknowledgement, msg.ProofAcked, msg.ProofHeight); err != nil {
+ return nil, sdkerrors.Wrap(err, "acknowledge packet verification failed")
+ }
+
+ // Perform application logic callback
+ _, err = cbs.OnAcknowledgementPacket(ctx, msg.Packet, msg.Acknowledgement)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "acknowledge packet callback failed")
+ }
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"tx", "msg", "ibc", msg.Type()},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("source-port", msg.Packet.SourcePort),
+ telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
+ telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
+ telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
+ },
+ )
+ }()
+
+ return &channeltypes.MsgAcknowledgementResponse{}, nil
+}
diff --git a/core/keeper/msg_server_test.go b/core/keeper/msg_server_test.go
new file mode 100644
index 00000000..1af4cdc1
--- /dev/null
+++ b/core/keeper/msg_server_test.go
@@ -0,0 +1,714 @@
+package keeper_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+)
+
+const height = 10
+
+var (
+ timeoutHeight = clienttypes.NewHeight(0, 10000)
+ maxSequence = uint64(10)
+)
+
+type KeeperTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+// SetupTest creates a coordinator with 2 test chains.
+func (suite *KeeperTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)
+ suite.coordinator.CommitNBlocks(suite.chainA, 2)
+ suite.coordinator.CommitNBlocks(suite.chainB, 2)
+}
+
+func TestIBCTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+
+// tests the IBC handler receiving a packet on ordered and unordered channels.
+// It verifies that the storing of an acknowledgement on success occurs. It
+// tests high level properties like ordering and basic sanity checks. More
+// rigorous testing of 'RecvPacket' can be found in the
+// 04-channel/keeper/packet_test.go.
+func (suite *KeeperTestSuite) TestHandleRecvPacket() {
+ var (
+ packet channeltypes.Packet
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {"success: ORDERED", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }, true},
+ {"success: UNORDERED", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }, true},
+ {"success: UNORDERED out of order packet", func() {
+ // setup uses an UNORDERED channel
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ // attempts to receive packet with sequence 10 without receiving packet with sequence 1
+ for i := uint64(1); i < 10; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }
+ }, true},
+ {"failure: ORDERED out of order packet", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+
+ // attempts to receive packet with sequence 10 without receiving packet with sequence 1
+ for i := uint64(1); i < 10; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }
+ }, false},
+ {"channel does not exist", func() {
+ // any non-nil value of packet is valid
+ suite.Require().NotNil(packet)
+ }, false},
+ {"packet not sent", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ }, false},
+ {"ORDERED: packet already received (replay)", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ }, false},
+ {"UNORDERED: packet already received (replay)", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ // get proof of packet commitment from chainA
+ packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainA.QueryProof(packetKey)
+
+ msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress())
+
+ // ante-handle RecvPacket
+ _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ // replay should fail since state changes occur
+ _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
+ suite.Require().Error(err)
+
+ // verify ack was written
+ ack, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ suite.Require().NotNil(ack)
+ suite.Require().True(found)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// tests the IBC handler acknowledgement of a packet on ordered and unordered
+// channels. It verifies that the deletion of packet commitments from state
+// occurs. It test high level properties like ordering and basic sanity
+// checks. More rigorous testing of 'AcknowledgePacket'
+// can be found in the 04-channel/keeper/packet_test.go.
+func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
+ var (
+ packet channeltypes.Packet
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {"success: ORDERED", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ }, true},
+ {"success: UNORDERED", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ }, true},
+ {"success: UNORDERED acknowledge out of order packet", func() {
+ // setup uses an UNORDERED channel
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ // attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment)
+ for i := uint64(1); i < 10; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ }
+ }, true},
+ {"failure: ORDERED acknowledge out of order packet", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+
+ // attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment
+ for i := uint64(1); i < 10; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+ }
+ }, false},
+ {"channel does not exist", func() {
+ // any non-nil value of packet is valid
+ suite.Require().NotNil(packet)
+ }, false},
+ {"packet not received", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }, false},
+ {"ORDERED: packet already acknowledged (replay)", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.TestHash)
+ suite.Require().NoError(err)
+ }, false},
+ {"UNORDERED: packet already acknowledged (replay)", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.TestHash)
+ suite.Require().NoError(err)
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+ ibctesting.TestHash = ibctesting.MockAcknowledgement
+
+ tc.malleate()
+
+ packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight := suite.chainB.QueryProof(packetKey)
+
+ msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())
+
+ _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ // replay should an error
+ _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ suite.Require().Error(err)
+
+ // verify packet commitment was deleted on source chain
+ has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ suite.Require().False(has)
+
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// tests the IBC handler timing out a packet on ordered and unordered channels.
+// It verifies that the deletion of a packet commitment occurs. It tests
+// high level properties like ordering and basic sanity checks. More
+// rigorous testing of 'TimeoutPacket' and 'TimeoutExecuted' can be found in
+// the 04-channel/keeper/timeout_test.go.
+func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
+ var (
+ packet channeltypes.Packet
+ packetKey []byte
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {"success: ORDERED", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // need to update chainA client to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ }, true},
+ {"success: UNORDERED", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // need to update chainA client to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ }, true},
+ {"success: UNORDERED timeout out of order packet", func() {
+ // setup uses an UNORDERED channel
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+
+ // attempts to timeout the last packet sent without timing out the first packet
+ // packet sequences begin at 1
+ for i := uint64(1); i < maxSequence; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }
+
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ }, true},
+ {"success: ORDERED timeout out of order packet", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+
+ // attempts to timeout the last packet sent without timing out the first packet
+ // packet sequences begin at 1
+ for i := uint64(1); i < maxSequence; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }
+
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+
+ }, true},
+ {"channel does not exist", func() {
+ // any non-nil value of packet is valid
+ suite.Require().NotNil(packet)
+
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ }, false},
+ {"UNORDERED: packet not sent", func() {
+ _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ proof, proofHeight := suite.chainB.QueryProof(packetKey)
+
+ msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())
+
+ _, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ // replay should return an error
+ _, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ suite.Require().Error(err)
+
+ // verify packet commitment was deleted on source chain
+ has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ suite.Require().False(has)
+
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// tests the IBC handler timing out a packet via channel closure on ordered
+// and unordered channels. It verifies that the deletion of a packet
+// commitment occurs. It tests high level properties like ordering and basic
+// sanity checks. More rigorous testing of 'TimeoutOnClose' and
+//'TimeoutExecuted' can be found in the 04-channel/keeper/timeout_test.go.
+func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
+ var (
+ packet channeltypes.Packet
+ packetKey []byte
+ counterpartyChannel ibctesting.TestChannel
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {"success: ORDERED", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ counterpartyChannel = ibctesting.TestChannel{
+ PortID: channelB.PortID,
+ ID: channelB.ID,
+ CounterpartyClientID: clientA,
+ }
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // need to update chainA client to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+
+ // close counterparty channel
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ }, true},
+ {"success: UNORDERED", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ counterpartyChannel = ibctesting.TestChannel{
+ PortID: channelB.PortID,
+ ID: channelB.ID,
+ CounterpartyClientID: clientA,
+ }
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // need to update chainA client to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+
+ // close counterparty channel
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ }, true},
+ {"success: UNORDERED timeout out of order packet", func() {
+ // setup uses an UNORDERED channel
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ counterpartyChannel = ibctesting.TestChannel{
+ PortID: channelB.PortID,
+ ID: channelB.ID,
+ CounterpartyClientID: clientA,
+ }
+
+ // attempts to timeout the last packet sent without timing out the first packet
+ // packet sequences begin at 1
+ for i := uint64(1); i < maxSequence; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }
+
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+
+ // close counterparty channel
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ }, true},
+ {"success: ORDERED timeout out of order packet", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ counterpartyChannel = ibctesting.TestChannel{
+ PortID: channelB.PortID,
+ ID: channelB.ID,
+ CounterpartyClientID: clientA,
+ }
+
+ // attempts to timeout the last packet sent without timing out the first packet
+ // packet sequences begin at 1
+ for i := uint64(1); i < maxSequence; i++ {
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }
+
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+
+ // close counterparty channel
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ }, true},
+ {"channel does not exist", func() {
+ // any non-nil value of packet is valid
+ suite.Require().NotNil(packet)
+
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ }, false},
+ {"UNORDERED: packet not sent", func() {
+ clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packetKey = host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ counterpartyChannel = ibctesting.TestChannel{
+ PortID: channelB.PortID,
+ ID: channelB.ID,
+ CounterpartyClientID: clientA,
+ }
+
+ // close counterparty channel
+ suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ }, false},
+ {"ORDERED: channel not closed", func() {
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ counterpartyChannel = ibctesting.TestChannel{
+ PortID: channelB.PortID,
+ ID: channelB.ID,
+ CounterpartyClientID: clientA,
+ }
+
+ // create packet commitment
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // need to update chainA client to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ }, false},
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ proof, proofHeight := suite.chainB.QueryProof(packetKey)
+
+ channelKey := host.ChannelKey(counterpartyChannel.PortID, counterpartyChannel.ID)
+ proofClosed, _ := suite.chainB.QueryProof(channelKey)
+
+ msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, proofClosed, proofHeight, suite.chainA.SenderAccount.GetAddress())
+
+ _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ // replay should return an error
+ _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ suite.Require().Error(err)
+
+ // verify packet commitment was deleted on source chain
+ has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ suite.Require().False(has)
+
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *KeeperTestSuite) TestUpgradeClient() {
+ var (
+ clientA string
+ upgradedClient exported.ClientState
+ upgradedConsState exported.ConsensusState
+ lastHeight exported.Height
+ msg *clienttypes.MsgUpgradeClient
+ )
+
+ newClientHeight := clienttypes.NewHeight(1, 1)
+
+ cases := []struct {
+ name string
+ setup func()
+ expPass bool
+ }{
+ {
+ name: "successful upgrade",
+ setup: func() {
+
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
+ upgradedClient = upgradedClient.ZeroCustomFields()
+
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // last Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradeClient, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+
+ msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState,
+ proofUpgradeClient, proofUpgradedConsState, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ expPass: true,
+ },
+ {
+ name: "VerifyUpgrade fails",
+ setup: func() {
+
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
+ upgradedClient = upgradedClient.ZeroCustomFields()
+
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // last Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress())
+ suite.Require().NoError(err)
+ },
+ expPass: false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc := tc
+ clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ tc.setup()
+
+ _, err := keeper.Keeper.UpgradeClient(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+
+ if tc.expPass {
+ suite.Require().NoError(err, "upgrade handler failed on valid case: %s", tc.name)
+ newClient, ok := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(ok)
+ newChainSpecifiedClient := newClient.ZeroCustomFields()
+ suite.Require().Equal(upgradedClient, newChainSpecifiedClient)
+ } else {
+ suite.Require().Error(err, "upgrade handler passed on invalid case: %s", tc.name)
+ }
+ }
+}
diff --git a/core/module.go b/core/module.go
new file mode 100644
index 00000000..6527ab71
--- /dev/null
+++ b/core/module.go
@@ -0,0 +1,200 @@
+package ibc
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+
+ "github.com/gorilla/mux"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+)
+
+var (
+ _ module.AppModule = AppModule{}
+ _ module.AppModuleBasic = AppModuleBasic{}
+ _ module.AppModuleSimulation = AppModule{}
+)
+
+// AppModuleBasic defines the basic application module used by the ibc module.
+type AppModuleBasic struct{}
+
+var _ module.AppModuleBasic = AppModuleBasic{}
+
+// Name returns the ibc module's name.
+func (AppModuleBasic) Name() string {
+ return host.ModuleName
+}
+
+// RegisterLegacyAminoCodec does nothing. IBC does not support amino.
+func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {}
+
+// DefaultGenesis returns default genesis state as raw bytes for the ibc
+// module.
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage {
+ return cdc.MustMarshalJSON(types.DefaultGenesisState())
+}
+
+// ValidateGenesis performs genesis state validation for the ibc module.
+func (AppModuleBasic) ValidateGenesis(cdc codec.JSONMarshaler, config client.TxEncodingConfig, bz json.RawMessage) error {
+ var gs types.GenesisState
+ if err := cdc.UnmarshalJSON(bz, &gs); err != nil {
+ return fmt.Errorf("failed to unmarshal %s genesis state: %w", host.ModuleName, err)
+ }
+
+ return gs.Validate()
+}
+
+// RegisterRESTRoutes does nothing. IBC does not support legacy REST routes.
+func (AppModuleBasic) RegisterRESTRoutes(client.Context, *mux.Router) {}
+
+// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the ibc module.
+func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) {
+ clienttypes.RegisterQueryHandlerClient(context.Background(), mux, clienttypes.NewQueryClient(clientCtx))
+ connectiontypes.RegisterQueryHandlerClient(context.Background(), mux, connectiontypes.NewQueryClient(clientCtx))
+ channeltypes.RegisterQueryHandlerClient(context.Background(), mux, channeltypes.NewQueryClient(clientCtx))
+}
+
+// GetTxCmd returns the root tx command for the ibc module.
+func (AppModuleBasic) GetTxCmd() *cobra.Command {
+ return cli.GetTxCmd()
+}
+
+// GetQueryCmd returns no root query command for the ibc module.
+func (AppModuleBasic) GetQueryCmd() *cobra.Command {
+ return cli.GetQueryCmd()
+}
+
+// RegisterInterfaces registers module concrete types into protobuf Any.
+func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ types.RegisterInterfaces(registry)
+}
+
+// AppModule implements an application module for the ibc module.
+type AppModule struct {
+ AppModuleBasic
+ keeper *keeper.Keeper
+
+ // create localhost by default
+ createLocalhost bool
+}
+
+// NewAppModule creates a new AppModule object
+func NewAppModule(k *keeper.Keeper) AppModule {
+ return AppModule{
+ keeper: k,
+ }
+}
+
+// Name returns the ibc module's name.
+func (AppModule) Name() string {
+ return host.ModuleName
+}
+
+// RegisterInvariants registers the ibc module invariants.
+func (am AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {
+ // TODO:
+}
+
+// Route returns the message routing key for the ibc module.
+func (am AppModule) Route() sdk.Route {
+ return sdk.NewRoute(host.RouterKey, NewHandler(*am.keeper))
+}
+
+// QuerierRoute returns the ibc module's querier route name.
+func (AppModule) QuerierRoute() string {
+ return host.QuerierRoute
+}
+
+// LegacyQuerierHandler returns nil. IBC does not support the legacy querier.
+func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sdk.Querier {
+ return nil
+}
+
+// RegisterServices registers module services.
+func (am AppModule) RegisterServices(cfg module.Configurator) {
+ clienttypes.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+ connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+ channeltypes.RegisterMsgServer(cfg.MsgServer(), am.keeper)
+ types.RegisterQueryService(cfg.QueryServer(), am.keeper)
+}
+
+// InitGenesis performs genesis initialization for the ibc module. It returns
+// no validator updates.
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, bz json.RawMessage) []abci.ValidatorUpdate {
+ var gs types.GenesisState
+ err := cdc.UnmarshalJSON(bz, &gs)
+ if err != nil {
+ panic(fmt.Sprintf("failed to unmarshal %s genesis state: %s", host.ModuleName, err))
+ }
+ InitGenesis(ctx, *am.keeper, am.createLocalhost, &gs)
+ return []abci.ValidatorUpdate{}
+}
+
+// ExportGenesis returns the exported genesis state as raw bytes for the ibc
+// module.
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage {
+ return cdc.MustMarshalJSON(ExportGenesis(ctx, *am.keeper))
+}
+
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule) ConsensusVersion() uint64 { return 1 }
+
+// BeginBlock returns the begin blocker for the ibc module.
+func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
+ ibcclient.BeginBlocker(ctx, am.keeper.ClientKeeper)
+}
+
+// EndBlock returns the end blocker for the ibc module. It returns no validator
+// updates.
+func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate {
+ return []abci.ValidatorUpdate{}
+}
+
+//____________________________________________________________________________
+
+// AppModuleSimulation functions
+
+// GenerateGenesisState creates a randomized GenState of the ibc module.
+func (AppModule) GenerateGenesisState(simState *module.SimulationState) {
+ simulation.RandomizedGenState(simState)
+}
+
+// ProposalContents doesn't return any content functions for governance proposals.
+func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent {
+ return nil
+}
+
+// RandomizedParams returns nil since IBC doesn't register parameter changes.
+func (AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange {
+ return nil
+}
+
+// RegisterStoreDecoder registers a decoder for ibc module's types
+func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) {
+ sdr[host.StoreKey] = simulation.NewDecodeStore(*am.keeper)
+}
+
+// WeightedOperations returns the all the ibc module operations with their respective weights.
+func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation {
+ return nil
+}
diff --git a/core/simulation/decoder.go b/core/simulation/decoder.go
new file mode 100644
index 00000000..459eebb8
--- /dev/null
+++ b/core/simulation/decoder.go
@@ -0,0 +1,32 @@
+package simulation
+
+import (
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ clientsim "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation"
+ connectionsim "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation"
+ channelsim "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+)
+
+// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
+// Value to the corresponding ibc type.
+func NewDecodeStore(k keeper.Keeper) func(kvA, kvB kv.Pair) string {
+ return func(kvA, kvB kv.Pair) string {
+ if res, found := clientsim.NewDecodeStore(k.ClientKeeper, kvA, kvB); found {
+ return res
+ }
+
+ if res, found := connectionsim.NewDecodeStore(k.Codec(), kvA, kvB); found {
+ return res
+ }
+
+ if res, found := channelsim.NewDecodeStore(k.Codec(), kvA, kvB); found {
+ return res
+ }
+
+ panic(fmt.Sprintf("invalid %s key prefix: %s", host.ModuleName, string(kvA.Key)))
+ }
+}
diff --git a/core/simulation/decoder_test.go b/core/simulation/decoder_test.go
new file mode 100644
index 00000000..09515727
--- /dev/null
+++ b/core/simulation/decoder_test.go
@@ -0,0 +1,80 @@
+package simulation_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+func TestDecodeStore(t *testing.T) {
+ app := simapp.Setup(false)
+ dec := simulation.NewDecodeStore(*app.IBCKeeper)
+
+ clientID := "clientidone"
+ connectionID := "connectionidone"
+ channelID := "channelidone"
+ portID := "portidone"
+
+ clientState := &ibctmtypes.ClientState{
+ FrozenHeight: clienttypes.NewHeight(0, 10),
+ }
+ connection := connectiontypes.ConnectionEnd{
+ ClientId: "clientidone",
+ Versions: []*connectiontypes.Version{connectiontypes.NewVersion("1", nil)},
+ }
+ channel := channeltypes.Channel{
+ State: channeltypes.OPEN,
+ Version: "1.0",
+ }
+
+ kvPairs := kv.Pairs{
+ Pairs: []kv.Pair{
+ {
+ Key: host.FullClientStateKey(clientID),
+ Value: app.IBCKeeper.ClientKeeper.MustMarshalClientState(clientState),
+ },
+ {
+ Key: host.ConnectionKey(connectionID),
+ Value: app.IBCKeeper.Codec().MustMarshalBinaryBare(&connection),
+ },
+ {
+ Key: host.ChannelKey(portID, channelID),
+ Value: app.IBCKeeper.Codec().MustMarshalBinaryBare(&channel),
+ },
+ {
+ Key: []byte{0x99},
+ Value: []byte{0x99},
+ },
+ },
+ }
+ tests := []struct {
+ name string
+ expectedLog string
+ }{
+ {"ClientState", fmt.Sprintf("ClientState A: %v\nClientState B: %v", clientState, clientState)},
+ {"ConnectionEnd", fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connection, connection)},
+ {"Channel", fmt.Sprintf("Channel A: %v\nChannel B: %v", channel, channel)},
+ {"other", ""},
+ }
+
+ for i, tt := range tests {
+ i, tt := i, tt
+ t.Run(tt.name, func(t *testing.T) {
+ if i == len(tests)-1 {
+ require.Panics(t, func() { dec(kvPairs.Pairs[i], kvPairs.Pairs[i]) }, tt.name)
+ } else {
+ require.Equal(t, tt.expectedLog, dec(kvPairs.Pairs[i], kvPairs.Pairs[i]), tt.name)
+ }
+ })
+ }
+}
diff --git a/core/simulation/genesis.go b/core/simulation/genesis.go
new file mode 100644
index 00000000..d71f4492
--- /dev/null
+++ b/core/simulation/genesis.go
@@ -0,0 +1,63 @@
+package simulation
+
+// DONTCOVER
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/rand"
+
+ "github.com/cosmos/cosmos-sdk/types/module"
+ clientsims "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectionsims "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channelsims "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+)
+
+// Simulation parameter constants
+const (
+ clientGenesis = "client_genesis"
+ connectionGenesis = "connection_genesis"
+ channelGenesis = "channel_genesis"
+)
+
+// RandomizedGenState generates a random GenesisState for evidence
+func RandomizedGenState(simState *module.SimulationState) {
+ var (
+ clientGenesisState clienttypes.GenesisState
+ connectionGenesisState connectiontypes.GenesisState
+ channelGenesisState channeltypes.GenesisState
+ )
+
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, clientGenesis, &clientGenesisState, simState.Rand,
+ func(r *rand.Rand) { clientGenesisState = clientsims.GenClientGenesis(r, simState.Accounts) },
+ )
+
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, connectionGenesis, &connectionGenesisState, simState.Rand,
+ func(r *rand.Rand) { connectionGenesisState = connectionsims.GenConnectionGenesis(r, simState.Accounts) },
+ )
+
+ simState.AppParams.GetOrGenerate(
+ simState.Cdc, channelGenesis, &channelGenesisState, simState.Rand,
+ func(r *rand.Rand) { channelGenesisState = channelsims.GenChannelGenesis(r, simState.Accounts) },
+ )
+
+ ibcGenesis := types.GenesisState{
+ ClientGenesis: clientGenesisState,
+ ConnectionGenesis: connectionGenesisState,
+ ChannelGenesis: channelGenesisState,
+ }
+
+ bz, err := json.MarshalIndent(&ibcGenesis, "", " ")
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("Selected randomly generated %s parameters:\n%s\n", host.ModuleName, bz)
+ simState.GenState[host.ModuleName] = simState.Cdc.MustMarshalJSON(&ibcGenesis)
+}
diff --git a/core/simulation/genesis_test.go b/core/simulation/genesis_test.go
new file mode 100644
index 00000000..54aff75a
--- /dev/null
+++ b/core/simulation/genesis_test.go
@@ -0,0 +1,49 @@
+package simulation_test
+
+import (
+ "encoding/json"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+)
+
+// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState.
+// Abonormal scenarios are not tested here.
+func TestRandomizedGenState(t *testing.T) {
+ interfaceRegistry := codectypes.NewInterfaceRegistry()
+ cdc := codec.NewProtoCodec(interfaceRegistry)
+
+ s := rand.NewSource(1)
+ r := rand.New(s)
+
+ simState := module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ Rand: r,
+ NumBonded: 3,
+ Accounts: simtypes.RandomAccounts(r, 3),
+ InitialStake: 1000,
+ GenState: make(map[string]json.RawMessage),
+ }
+
+ // Remark: the current RandomizedGenState function
+ // is actually not random as it does not utilize concretely the random value r.
+ // This tests will pass for any value of r.
+ simulation.RandomizedGenState(&simState)
+
+ var ibcGenesis types.GenesisState
+ simState.Cdc.MustUnmarshalJSON(simState.GenState[host.ModuleName], &ibcGenesis)
+
+ require.NotNil(t, ibcGenesis.ClientGenesis)
+ require.NotNil(t, ibcGenesis.ConnectionGenesis)
+ require.NotNil(t, ibcGenesis.ChannelGenesis)
+}
diff --git a/core/spec/01_concepts.md b/core/spec/01_concepts.md
new file mode 100644
index 00000000..4347fb67
--- /dev/null
+++ b/core/spec/01_concepts.md
@@ -0,0 +1,405 @@
+
+
+# Concepts
+
+> NOTE: if you are not familiar with the IBC terminology and concepts, please read
+this [document](https://github.com/cosmos/ics/blob/master/ibc/1_IBC_TERMINOLOGY.md) as prerequisite reading.
+
+## Client Creation, Updates, and Upgrades
+
+IBC clients are on chain light clients. The light client is responsible for verifying
+counterparty state. A light client can be created by any user submitting a valid initial
+`ClientState` and `ConsensusState`. The client identifier is auto generated using the
+client type and the global client counter appended in the format: `{client-type}-{N}`.
+Clients are given a client identifier prefixed store to store their associated client
+state and consensus states. Consensus states are stored using their associated height.
+
+Clients can be updated by any user submitting a valid `Header`. The client state callback
+to `CheckHeaderAndUpdateState` is responsible for verifying the header against previously
+stored state. The function should also return the updated client state and consensus state
+if the header is considered a valid update. A light client, such as Tendermint, may have
+client specific parameters like `TrustLevel` which must be considered valid in relation
+to the `Header`. The update height is not necessarily the lastest height of the light
+client. Updates may fill in missing consensus state heights.
+
+Clients may be upgraded. The upgrade should be verified using `VerifyUpgrade`. It is not
+a requirement to allow for light client upgrades. For example, the solo machine client
+will simply return an error on `VerifyUpgrade`. Clients which implement upgrades
+are expected to account for, but not necessarily support, planned and unplanned upgrades.
+
+## Client Misbehaviour
+
+IBC clients must freeze when the counterparty chain becomes byzantine and
+takes actions that could fool the light client into accepting invalid state
+transitions. Thus, relayers are able to submit Misbehaviour proofs that prove
+that a counterparty chain has signed two Headers for the same height. This
+constitutes misbehaviour as the IBC client could have accepted either header
+as valid. Upon verifying the misbehaviour the IBC client must freeze at that
+height so that any proof verifications for the frozen height or later fail.
+
+Note, there is a difference between the chain-level Misbehaviour that IBC is
+concerned with and the validator-level Evidence that Tendermint is concerned
+with. Tendermint must be able to detect, submit, and punish any evidence of
+individual validators breaking the Tendermint consensus protocol and attempting
+to mount an attack. IBC clients must only act when an attack is successful
+and the chain has successfully forked. In this case, valid Headers submitted
+to the IBC client can no longer be trusted and the client must freeze.
+
+Governance may then choose to override a frozen client and provide the correct,
+canonical Header so that the client can continue operating after the Misbehaviour
+submission.
+
+## ClientUpdateProposal
+
+A governance proposal may be passed to update a specified client using another client
+known as the "substitute client". This is useful in unfreezing clients or updating
+expired clients, thereby making the effected channels active again. Each client is
+expected to implement this functionality. A client may choose to disallow an update
+by a governance proposal by returning an error in the client state function 'CheckSubstituteAndUpdateState'.
+
+The localhost client cannot be updated by a governance proposal.
+
+The solo machine client requires the boolean flag 'AllowUpdateAfterProposal' to be set
+to true in order to be updated by a proposal. This is set upon client creation and cannot
+be updated later.
+
+The tendermint client has two flags update flags, 'AllowUpdateAfterExpiry' and
+'AllowUpdateAfterMisbehaviour'. The former flag can only be used to unexpire clients. The
+latter flag can be used to unfreeze a client and if necessary it will also unexpire the client.
+It is best practice to initialize a new substitute client instead of using an existing one
+This avoids potential issues of the substitute becoming frozen due to misbehaviour or the
+subject client becoming refrozen due to misbehaviour not being expired at the time the
+proposal passes. These boolean flags are set upon client creation and cannot be updated later.
+
+The `CheckSubstituteAndUpdateState` function provides the light client with its own client
+store, the client store of the substitute, the substitute client state, and the intitial
+height that should be used when referring to the substitute client. Most light client
+implementations should copy consensus states from the substitute to the subject, but
+are not required to do so. Light clients may copy informationa as they deem necessary.
+
+It is not recommended to use a substitute client in normal operations since the subject
+light client will be given unrestricted access to the substitute client store. Governance
+should not pass votes which enable byzantine light client modules from modifying the state
+of the substitute.
+
+## IBC Client Heights
+
+IBC Client Heights are represented by the struct:
+
+```go
+type Height struct {
+ RevisionNumber uint64
+ RevisionHeight uint64
+}
+```
+
+The `RevisionNumber` represents the revision of the chain that the height is representing.
+An revision typically represents a continuous, monotonically increasing range of block-heights.
+The `RevisionHeight` represents the height of the chain within the given revision.
+
+On any reset of the `RevisionHeight`, for example, when hard-forking a Tendermint chain,
+the `RevisionNumber` will get incremented. This allows IBC clients to distinguish between a
+block-height `n` of a previous revision of the chain (at revision `p`) and block-height `n` of the current
+revision of the chain (at revision `e`).
+
+`Heights` that share the same revision number can be compared by simply comparing their respective `RevisionHeights`.
+Heights that do not share the same revision number will only be compared using their respective `RevisionNumbers`.
+Thus a height `h` with revision number `e+1` will always be greater than a height `g` with revision number `e`,
+**REGARDLESS** of the difference in revision heights.
+
+Ex:
+
+```go
+Height{RevisionNumber: 3, RevisionHeight: 0} > Height{RevisionNumber: 2, RevisionHeight: 100000000000}
+```
+
+When a Tendermint chain is running a particular revision, relayers can simply submit headers and proofs with the revision number
+given by the chain's chainID, and the revision height given by the Tendermint block height. When a chain updates using a hard-fork
+and resets its block-height, it is responsible for updating its chain-id to increment the revision number.
+IBC Tendermint clients then verifies the revision number against their `ChainId` and treat the `RevisionHeight` as the Tendermint block-height.
+
+Tendermint chains wishing to use revisions to maintain persistent IBC connections even across height-resetting upgrades must format their chain-ids
+in the following manner: `{chainID}-{revision_number}`. On any height-resetting upgrade, the chainID **MUST** be updated with a higher revision number
+than the previous value.
+
+Ex:
+
+- Before upgrade ChainID: `gaiamainnet-3`
+- After upgrade ChainID: `gaiamainnet-4`
+
+Clients that do not require revisions, such as the solo-machine client, simply hardcode `0` into the revision number whenever they
+need to return an IBC height when implementing IBC interfaces and use the `RevisionHeight` exclusively.
+
+Other client-types may implement their own logic to verify the IBC Heights that relayers provide in their `Update`, `Misbehavior`, and
+`Verify` functions respectively.
+
+The IBC interfaces expect an `ibcexported.Height` interface, however all clients should use the concrete implementation provided in
+`02-client/types` and reproduced above.
+
+## Connection Handshake
+
+The connection handshake occurs in 4 steps as defined in [ICS 03](https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics).
+
+`ConnOpenInit` is the first attempt to initialize a connection on the executing chain.
+The handshake is expected to succeed if the version selected is supported. The connection
+identifier for the counterparty connection must be left empty indicating that the counterparty
+must select its own identifier. The connection identifier is auto derived in the format:
+`connection{N}` where N is the next sequence to be used. The counter begins at 0 and increments
+by 1. The connection is set and stored in the INIT state upon success.
+
+`ConnOpenTry` is a response to a chain executing `ConnOpenInit`. The executing chain will validate
+the chain level parameters the counterparty has stored such as its chainID. The executing chain
+will also verify that if a previous connection exists for the specified connection identifier
+that all the parameters match and its previous state was in INIT. This may occur when both
+chains execute `ConnOpenInit` simultaneously. If the connection does not exist then a connection
+identifier is generated in the same format done in `ConnOpenInit`. The executing chain will verify
+that the counterparty created a connection in INIT state. The executing chain will also verify
+The `ClientState` and `ConsensusState` the counterparty stores for the executing chain. The
+executing chain will select a version from the intersection of its supported versions and the
+versions set by the counterparty. The connection is set and stored in the TRYOPEN state upon
+success.
+
+`ConnOpenAck` may be called on a chain when the counterparty connection has entered TRYOPEN. A
+previous connection on the executing chain must exist in either INIT or TRYOPEN. The executing
+chain will verify the version the counterparty selected. If the counterparty selected its own
+connection identifier, it will be validated in the basic validation of a `MsgConnOpenAck`.
+The counterparty connection state is verified along with the `ClientState` and `ConsensusState`
+stored for the executing chain. The connection is set and stored in the OPEN state upon success.
+
+`ConnOpenConfirm` is a response to a chain executing `ConnOpenAck`. The executing chain's connection
+must be in TRYOPEN. The counterparty connection state is verified to be in the OPEN state. The
+connection is set and stored in the OPEN state upon success.
+
+## Connection Version Negotiation
+
+During the handshake procedure for connections a version is agreed
+upon between the two parties. This occurs during the first 3 steps of the
+handshake.
+
+During `ConnOpenInit`, party A is expected to set all the versions they wish
+to support within their connection state. It is expected that this set of
+versions is from most preferred to least preferred. This is not a strict
+requirement for the SDK implementation of IBC because the party calling
+`ConnOpenTry` will greedily select the latest version it supports that the
+counterparty supports as well. A specific version can optionally be passed
+as `Version` to ensure that the handshake will either complete with that
+version or fail.
+
+During `ConnOpenTry`, party B will select a version from the counterparty's
+supported versions. Priority will be placed on the latest supported version.
+If a matching version cannot be found an error is returned.
+
+During `ConnOpenAck`, party A will verify that they can support the version
+party B selected. If they do not support the selected version an error is
+returned. After this step, the connection version is considered agreed upon.
+
+
+A `Version` is defined as follows:
+
+```go
+type Version struct {
+ // unique version identifier
+ Identifier string
+ // list of features compatible with the specified identifier
+ Features []string
+}
+```
+
+A version must contain a non empty identifier. Empty feature sets are allowed, but each
+feature must be a non empty string.
+
+::: warning
+A set of versions should not contain two versions with the same
+identifier, but differing feature sets. This will result in undefined behavior
+with regards to version selection in `ConnOpenTry`. Each version in a set of
+versions should have a unique version identifier.
+:::
+
+## Channel Handshake
+
+The channel handshake occurs in 4 steps as defined in [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics).
+
+`ChanOpenInit` is the first attempt to initialize a channel on top of an existing connection.
+The handshake is expected to succeed if the version selected for the existing connection is a
+supported IBC version. The portID must correspond to a port already binded upon `InitChain`.
+The channel identifier for the counterparty channel must be left empty indicating that the
+counterparty must select its own identifier. The channel identifier is auto derived in the
+format: `channel{N}` where N is the next sequence to be used. The channel is set and stored
+in the INIT state upon success. The channel parameters `NextSequenceSend`, `NextSequenceRecv`,
+and `NextSequenceAck` are all set to 1 and a channel capability is created for the given
+portID and channelID path.
+
+`ChanOpenTry` is a response to a chain executing `ChanOpenInit`. If the executing chain is calling
+`ChanOpenTry` after previously executing `ChanOpenInit` then the provided channel parameters must
+match the previously selected parameters. If the previous channel does not exist then a channel
+identifier is generated in the same format as done in `ChanOpenInit`. The connection the channel
+is created on top of must be an OPEN state and its IBC version must support the desired channel
+type being created (ORDERED, UNORDERED, etc). The executing chain will verify that the channel
+state of the counterparty is in INIT. The executing chain will set and store the channel state
+in TRYOPEN. The channel parameters `NextSequenceSend`, `NextSequenceRecv`, and `NextSequenceAck`
+are all set to 1 and a channel capability is created for the given portID and channelID path only
+if the channel did not previously exist.
+
+`ChanOpenAck` may be called on a chain when the counterparty channel has entered TRYOPEN. A
+previous channel on the executing chain must exist be in either INIT or TRYOPEN state. If the
+counterparty selected its own channel identifier, it will be validated in the basic validation
+of `MsgChanOpenAck`. The executing chain verifies that the counterparty channel state is in
+TRYOPEN. The channel is set and stored in the OPEN state upon success.
+
+`ChanOpenConfirm` is a response to a chain executing `ChanOpenAck`. The executing chain's
+previous channel state must be in TRYOPEN. The executing chain verifies that the counterparty
+channel state is OPEN. The channel is set and stored in the OPEN state upon success.
+
+## Channel Version Negotiation
+
+During the channel handshake procedure a version must be agreed upon between
+the two parties. The selection process is largely left to the callers and
+the verification of valid versioning must be handled by application developers
+in the channel handshake callbacks.
+
+During `ChanOpenInit`, a version string is passed in and set in party A's
+channel state.
+
+During `ChanOpenTry`, a version string for party A and for party B are passed
+in. The party A version string must match the version string used in
+`ChanOpenInit` otherwise channel state verification will fail. The party B
+version string could be anything (even different than the proposed one by
+party A). However, the proposed version by party B is expected to be fully
+supported by party A.
+
+During the `ChanOpenAck` callback, the application module is expected to verify
+the version proposed by party B using the `MsgChanOpenAck` `CounterpartyVersion`
+field. The application module should throw an error if the version string is
+not valid.
+
+In general empty version strings are to be considered valid options for an
+application module.
+
+Application modules may implement their own versioning system, such as semantic
+versioning, or they may lean upon the versioning system used for in connection
+version negotiation. To use the connection version semantics the application
+would simply pass the proto encoded version into each of the handshake calls
+and decode the version string into a `Version` instance to do version verification
+in the handshake callbacks.
+
+Implementations which do not feel they would benefit from versioning can do
+basic string matching using a single compatible version.
+
+## Sending, Receiving, Acknowledging Packets
+
+Terminology:
+**Packet Commitment** A hash of the packet stored on the sending chain.
+**Packet Receipt** A single bit indicating that a packet has been received.
+Used for timeouts.
+**Acknowledgement** Data written to indicate the result of receiving a packet.
+Typically conveying either success or failure of the receive.
+
+A packet may be associated with one of the following states:
+- the packet does not exist (ie it has not been sent)
+- the packet has been sent but not received (the packet commitment exists on the
+sending chain, but no receipt exists on the receiving chain)
+- the packet has been received but not acknowledged (packet commitment exists
+on the sending chain, a receipt exists on the receiving chain, but no acknowledgement
+exists on the receiving chain)
+- the packet has been acknowledgement but the acknowledgement has not been relayed
+(the packet commitment exists on the sending chain, the receipt and acknowledgement
+exist on the receiving chain)
+- the packet has completed its life cycle (the packet commitment does not exist on
+the sending chain, but a receipt and acknowledgement exist on the receiving chain)
+
+Sending of a packet is initiated by a call to the `ChannelKeeper.SendPacket`
+function by an application module. Packets being sent will be verified for
+correctness (core logic only). If the packet is valid, a hash of the packet
+will be stored as a packet commitment using the packet sequence in the key.
+Packet commitments are stored on the sending chain.
+
+A message should be sent to the receving chain indicating that the packet
+has been committed on the sending chain and should be received on the
+receiving chain. The light client on the receiving chain, which verifies
+the sending chain's state, should be updated to the lastest sending chain
+state if possible. The verification will fail if the latest state of the
+light client does not include the packet commitment. The receiving chain
+is responsible for verifying that the counterparty set the hash of the
+packet. If verification of the packet to be received is successful, the
+receiving chain should store a receipt of the packet and call application
+logic if necessary. An acknowledgement may be processed and stored at this time (synchronously)
+or at another point in the future (asynchronously).
+
+Acknowledgements written on the receiving chain may be verified on the
+sending chain. If the sending chain successfully verifies the acknowledgement
+then it may delete the packet commitment stored at that sequence. There is
+no requirement for acknowledgements to be written. Only the hash of the
+acknowledgement is stored on the chain. Application logic may be executed
+in conjunction with verifying an acknowledgement. For example, in fungible
+cross-chain token transfer, a failed acknowledgement results in locked or
+burned funds being refunded.
+
+Relayers are responsible for reconstructing packets between the sending,
+receiving, and acknowledging of packets.
+
+IBC applications sending and receiving packets are expected to appropriately
+handle data contained within a packet. For example, cross-chain token
+transfers will unmarshal the data into proto definitions representing
+a token transfer.
+
+Future optimizations may allow for storage cleanup. Stored packet
+commitments could be removed from channels which do not write
+packet acknowledgements and acknowledgements could be removed
+when a packet has completed its life cycle.
+
+## Timing out Packets
+
+A packet may be timed out on the receiving chain if the packet timeout height or timestamp has
+been surpassed on the receving chain or the channel has closed. A timed out
+packet can only occur if the packet has never been received on the receiving
+chain. ORDERED channels will verify that the packet sequence is greater than
+the `NextSequenceRecv` on the receiving chain. UNORDERED channels will verify
+that the packet receipt has not been written on the receiving chain. A timeout
+on channel closure will additionally verify that the counterparty channel has
+been closed. A successful timeout may execute application logic as appropriate.
+
+Both the packet's timeout timestamp and the timeout height must have been
+surpassed on the receiving chain for a timeout to be valid. A timeout timestamp
+or timeout height with a 0 value indicates the timeout field may be ignored.
+Each packet is required to have at least one valid timeout field.
+
+## Closing Channels
+
+Closing a channel occurs in occurs in 2 handshake steps as defined in [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics).
+
+`ChanCloseInit` will close a channel on the executing chain if the channel exists, it is not
+already closed and the connection it exists upon is OPEN. Channels can only be closed by a
+calling module or in the case of a packet timeout on an ORDERED channel.
+
+`ChanCloseConfirm` is a response to a counterparty channel executing `ChanCloseInit`. The channel
+on the executing chain will be closed if the channel exists, the channel is not already closed,
+the connection the channel exists upon is OPEN and the executing chain successfully verifies
+that the counterparty channel has been closed.
+
+## Port and Channel Capabilities
+
+## Hostname Validation
+
+Hostname validation is implemented as defined in [ICS 24](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements).
+
+The 24-host sub-module parses and validates identifiers. It also builds
+the key paths used to store IBC related information.
+
+A valid identifier must conatin only alphanumeric characters or the
+following list of allowed characters:
+".", "\_", "+", "-", "#", "[", "]", "<", ">"
+
+- Client identifiers must contain between 9 and 64 characters.
+- Connection identifiers must contain between 10 and 64 characters.
+- Channel identifiers must contain between 10 and 64 characters.
+- Port identifiers must contain between 2 and 64 characters.
+
+## Proofs
+
+Proofs for counterparty state validation are provided as bytes. These bytes
+can be unmarshaled into proto definitions as necessary by light clients.
+For example, the Tendermint light client will use the bytes as a merkle
+proof where as the solo machine client will unmarshal the proof into
+several layers proto definitions used for signature verficiation.
diff --git a/core/spec/02_state.md b/core/spec/02_state.md
new file mode 100644
index 00000000..2c85a525
--- /dev/null
+++ b/core/spec/02_state.md
@@ -0,0 +1,28 @@
+
+
+# State
+
+The paths for the values stored in state is defined [here](https://github.com/cosmos/ics/blob/master/spec/ics-024-host-requirements/README.md#path-space).
+Additionally, the SDK adds a prefix to the path to be able to aggregate the values for querying purposes.
+The client type is not stored since it can be obtained through the client state.
+
+| Prefix | Path | Value type |
+|--------|-----------------------------------------------------------------------------|----------------|
+| "0/" | "clients/{identifier}/clientState" | ClientState |
+| "0/" | "clients/{identifier}/consensusStates/{height}" | ConsensusState |
+| "0/" | "clients/{identifier}/connections" | []string |
+| "0/" | "nextClientSequence | uint64 |
+| "0/" | "connections/{identifier}" | ConnectionEnd |
+| "0/" | "nextConnectionSequence" | uint64 |
+| "0/" | "ports/{identifier}" | CapabilityKey |
+| "0/" | "channelEnds/ports/{identifier}/channels/{identifier}" | ChannelEnd |
+| "0/" | "nextChannelSequence" | uint64 |
+| "0/" | "capabilities/ports/{identifier}/channels/{identifier}" | CapabilityKey |
+| "0/" | "nextSequenceSend/ports/{identifier}/channels/{identifier}" | uint64 |
+| "0/" | "nextSequenceRecv/ports/{identifier}/channels/{identifier}" | uint64 |
+| "0/" | "nextSequenceAck/ports/{identifier}/channels/{identifier}" | uint64 |
+| "0/" | "commitments/ports/{identifier}/channels/{identifier}/sequences/{sequence}" | bytes |
+| "0/" | "receipts/ports/{identifier}/channels/{identifier}/sequences/{sequence}" | bytes |
+| "0/" | "acks/ports/{identifier}/channels/{identifier}/sequences/{sequence}" | bytes |
diff --git a/core/spec/03_state_transitions.md b/core/spec/03_state_transitions.md
new file mode 100644
index 00000000..518ff924
--- /dev/null
+++ b/core/spec/03_state_transitions.md
@@ -0,0 +1,106 @@
+
+
+# State Transitions
+
+The described state transitions assume successful message exection.
+
+## Create Client
+
+`MsgCreateClient` will initialize and store a `ClientState` and `ConsensusState` in the sub-store
+created using a generated client identifier.
+
+## Update Client
+
+`MsgUpdateClient` will update the `ClientState` and create a new `ConsensusState` for the
+update height.
+
+## Misbehaviour
+
+`MsgSubmitMisbehaviour` will freeze a client.
+
+## Upgrade Client
+
+`MsgUpgradeClient` will upgrade the `ClientState` and `ConsensusState` to the update chain level
+parameters and if applicable will update to the new light client implementation.
+
+## Client Update Proposal
+
+An Update Client Proposal will unfreeze a client (if necessary) and set an updated `ClientState`.
+The light client may make optional modifications to the client prefixed store of the subject client
+including copying `ConsensusStates` from the substitute to the subject.
+
+## Connection Open Init
+
+`MsgConnectionOpenInit` will initialize a connection state in INIT.
+
+## Connection Open Try
+
+`MsgConnectionOpenTry` will initialize or update a connection state to be in TRYOPEN.
+
+## Connection Open Ack
+
+`MsgConnectionOpenAck` will update a connection state from INIT or TRYOPEN to be in OPEN.
+
+## Connection Open Confirm
+
+`MsgConnectionOpenAck` will update a connection state from TRYOPEN to OPEN.
+
+## Channel Open Init
+
+`MsgChannelOpenInit` will initialize a channel state in INIT. It will create a channel capability
+and set all Send, Receive and Ack Sequences to 1 for the channel.
+
+## Channel Open Try
+
+`MsgChannelOpenTry` will initialize or update a channel state to be in TRYOPEN. If the channel
+is being initialized, It will create a channel capability and set all Send, Receive and Ack
+Sequences to 1 for the channel.
+
+## Channel Open Ack
+
+`MsgChannelOpenAck` will update the channel state to OPEN. It will set the version and channel
+identifier for its counterparty.
+
+## Channel Open Confirm
+
+`MsgChannelOpenConfirm` will update the channel state to OPEN.
+
+## Channel Close Init
+
+`MsgChannelCloseInit` will update the channel state to CLOSED.
+
+## Channel Close Confirm
+
+`MsgChannelCloseConfirm` will update the channel state to CLOSED.
+
+## Send Packet
+
+A application calling `ChannelKeeper.SendPacket` will incremenet the next sequence send and set
+a hash of the packet as the packet commitment.
+
+## Receive Packet
+
+`MsgRecvPacket` will increment the next sequence receive for ORDERED channels and set a packet
+receipt for UNORDERED channels.
+
+## Write Acknowledgement
+
+`WriteAcknowledgement` may be executed synchronously during the execution of `MsgRecvPacket` or
+asynchonously by an application module. It writes an acknowledgement to the store.
+
+## Acknowledge Packet
+
+`MsgAcknowledgePacket` deletes the packet commitment and for ORDERED channels increments next
+sequences ack.
+
+## Timeout Packet
+
+`MsgTimeoutPacket` deletes the packet commitment and for ORDERED channels sets the channel state
+to CLOSED.
+
+## Timeout Packet on Channel Closure
+
+`MsgTimeoutOnClose` deletes the packet commitment and for ORDERED channels sets the channel state
+to CLOSED.
diff --git a/core/spec/04_messages.md b/core/spec/04_messages.md
new file mode 100644
index 00000000..3728e6d6
--- /dev/null
+++ b/core/spec/04_messages.md
@@ -0,0 +1,497 @@
+
+
+# Messages
+
+In this section we describe the processing of the IBC messages and the corresponding updates to the state.
+
+## ICS 02 - Client
+
+### MsgCreateClient
+
+A light client is created using the `MsgCreateClient`.
+
+```go
+type MsgCreateClient struct {
+ ClientState *types.Any // proto-packed client state
+ ConsensusState *types.Any // proto-packed consensus state
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `ClientState` is empty or invalid
+- `ConsensusState` is empty or invalid
+- `Signer` is empty
+
+The message creates and stores a light client with an initial consensus state using a generated client
+identifier.
+
+### MsgUpdateClient
+
+A light client is updated with a new header using the `MsgUpdateClient`.
+
+```go
+type MsgUpdateClient struct {
+ ClientId string
+ Header *types.Any // proto-packed header
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `ClientId` is invalid (not alphanumeric or not within 10-20 characters)
+- `Header` is empty or invalid
+- `Signer` is empty
+- A `ClientState` hasn't been created for the given ID
+- The client is frozen due to misbehaviour and cannot be updated
+- The header fails to provide a valid update for the client
+
+The message validates the header and updates the client state and consensus state for the
+header height.
+
+### MsgUpgradeClient
+```go
+type MsgUpgradeClient struct {
+ ClientId string
+ ClientState *types.Any // proto-packed client state
+ UpgradeHeight *Height
+ ProofUpgrade []byte
+ Signer string
+}
+```
+
+This message is expected to fail if:
+
+- `ClientId` is invalid (not alphanumeric or not within 10-20 characters)
+- `ClientState` is empty or invalid
+- `UpgradeHeight` is empty or zero
+- `ProofUpgrade` is empty
+- `Signer` is empty
+- A `ClientState` hasn't been created for the given ID
+- The client is frozen due to misbehaviour and cannot be upgraded
+- The upgrade proof fails
+
+The message upgrades the client state and consensus state upon successful validation of a
+chain upgrade.
+
+### MsgSubmitMisbehaviour
+
+Submit a evidence of light client misbehaviour to freeze the client state and prevent additional packets from being relayed.
+
+```go
+type MsgSubmitMisbehaviour struct {
+ ClientId string
+ Misbehaviour *types.Any // proto-packed misbehaviour
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `ClientId` is invalid (not alphanumeric or not within 10-20 characters)
+- `Misbehaviour` is empty or invalid
+- `Signer` is empty
+- A `ClientState` hasn't been created for the given ID
+- `Misbehaviour` check failed
+
+The message verifies the misbehaviour and freezes the client.
+
+## ICS 03 - Connection
+
+### MsgConnectionOpenInit
+
+A connection is initialized on a light client using the `MsgConnectionOpenInit`.
+
+```go
+type MsgConnectionOpenInit struct {
+ ClientId string
+ Counterparty Counterparty
+ Version string
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+- `ClientId` is invalid (see naming requirements)
+- `Counterparty` is empty
+- 'Version' is not empty and invalid
+- `Signer` is empty
+- A Client hasn't been created for the given ID
+- A Connection for the given ID already exists
+
+The message creates a connection for the given ID with an INIT state.
+
+### MsgConnectionOpenTry
+
+When a counterparty connection is initialized then a connection is initialized on a light client
+using the `MsgConnectionOpenTry`.
+
+```go
+type MsgConnectionOpenTry struct {
+ ClientId string
+ PreviousConnectionId string
+ ClientState *types.Any // proto-packed counterparty client
+ Counterparty Counterparty
+ CounterpartyVersions []string
+ ProofHeight Height
+ ProofInit []byte
+ ProofClient []byte
+ ProofConsensus []byte
+ ConsensusHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `ClientId` is invalid (see naming requirements)
+- `PreviousConnectionId` is not empty and invalid (see naming requirements)
+- `ClientState` is not a valid client of the executing chain
+- `Counterparty` is empty
+- `CounterpartyVersions` is empty
+- `ProofHeight` is zero
+- `ProofInit` is empty
+- `ProofClient` is empty
+- `ProofConsensus` is empty
+- `ConsensusHeight` is zero
+- `Signer` is empty
+- A Client hasn't been created for the given ID
+- If a previous connection exists but does not match the supplied parameters.
+- `ProofInit` does not prove that the counterparty connection is in state INIT
+- `ProofClient` does not prove that the counterparty has stored the `ClientState` provided in message
+- `ProofConsensus` does not prove that the counterparty has the correct consensus state for this chain
+
+The message creates a connection for a generated connection ID with an TRYOPEN State. If a previous
+connection already exists, it updates the connection state from INIT to TRYOPEN.
+
+### MsgConnectionOpenAck
+
+When a counterparty connection is initialized then a connection is opened on a light client
+using the `MsgConnectionOpenAck`.
+
+```go
+type MsgConnectionOpenAck struct {
+ ConnectionId string
+ CounterpartyConnectionId string
+ Version string
+ ClientState *types.Any // proto-packed counterparty client
+ ProofHeight Height
+ ProofTry []byte
+ ProofClient []byte
+ ProofConsensus []byte
+ ConsensusHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `ConnectionId` is invalid (see naming requirements)
+- `CounterpartyConnectionId` is invalid (see naming requirements)
+- `Version` is empty
+- `ClientState` is not a valid client of the executing chain
+- `ProofHeight` is zero
+- `ProofTry` is empty
+- `ProofClient` is empty
+- `ProofConsensus` is empty
+- `ConsensusHeight` is zero
+- `Signer` is empty
+- `ProofTry` does not prove that the counterparty connection is in state TRYOPEN
+- `ProofClient` does not prove that the counterparty has stored the `ClientState` provided by message
+- `ProofConsensus` does not prove that the counterparty has the correct consensus state for this chain
+
+The message sets the connection state for the given ID to OPEN. `CounterpartyConnectionId`
+should be the `ConnectionId` used by the counterparty connection.
+
+### MsgConnectionOpenConfirm
+
+When a counterparty connection is opened then a connection is opened on a light client using
+the `MsgConnectionOpenConfirm`.
+
+```go
+type MsgConnectionOpenConfirm struct {
+ ConnectionId string
+ ProofAck []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `ConnectionId` is invalid (see naming requirements)
+- `ProofAck` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- A Connection with the given ID does not exist
+- `ProofAck` does not prove that the counterparty connection is in state OPEN
+
+The message sets the connection state for the given ID to OPEN.
+
+## ICS 04 - Channels
+
+### MsgChannelOpenInit
+
+A channel handshake is initiated by a chain A using the `MsgChannelOpenInit`
+message.
+
+```go
+type MsgChannelOpenInit struct {
+ PortId string
+ Channel Channel
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `PortId` is invalid (see naming requirements)
+- `Channel` is empty
+- `Signer` is empty
+- A Channel End exists for the given Channel ID and Port ID
+
+The message creates a channel on chain A with an INIT state for a generated Channel ID
+and Port ID.
+
+### MsgChannelOpenTry
+
+A channel handshake initialization attempt is acknowledged by a chain B using
+the `MsgChannelOpenTry` message.
+
+```go
+type MsgChannelOpenTry struct {
+ PortId string
+ PreviousChannelId string
+ Channel Channel
+ CounterpartyVersion string
+ ProofInit []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `PortId` is invalid (see naming requirements)
+- `PreviousChannelId` is not empty and invalid (see naming requirements)
+- `Channel` is empty
+- `CounterpartyVersion` is empty
+- `ProofInit` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- A previous channel exists and does not match the provided parameters.
+- `ProofInit` does not prove that the counterparty's Channel state is in INIT
+
+The message creates a channel on chain B with an TRYOPEN state for using a generated Channel ID
+and given Port ID if the previous channel does not already exist. Otherwise it udates the
+previous channel state from INIT to TRYOPEN.
+
+
+### MsgChannelOpenAck
+
+A channel handshake is opened by a chain A using the `MsgChannelOpenAck` message.
+
+```go
+type MsgChannelOpenAck struct {
+ PortId string
+ ChannelId string
+ CounterpartyChannelId string
+ CounterpartyVersion string
+ ProofTry []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `PortId` is invalid (see naming requirements)
+- `ChannelId` is invalid (see naming requirements)
+- `CounterpartyChannelId` is invalid (see naming requirements)
+- `CounterpartyVersion` is empty
+- `ProofTry` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- `ProofTry` does not prove that the counterparty's Channel state is in TRYOPEN
+
+The message sets a channel on chain A to state OPEN for the given Channel ID and Port ID.
+`CounterpartyChannelId` should be the `ChannelId` used by the counterparty channel.
+
+### MsgChannelOpenConfirm
+
+A channel handshake is confirmed and opened by a chain B using the `MsgChannelOpenConfirm`
+message.
+
+```go
+type MsgChannelOpenConfirm struct {
+ PortId string
+ ChannelId string
+ ProofAck []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `PortId` is invalid (see naming requirements)
+- `ChannelId` is invalid (see naming requirements)
+- `ProofAck` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- `ProofAck` does not prove that the counterparty's Channel state is in OPEN
+
+The message sets a channel on chain B to state OPEN for the given Channel ID and Port ID.
+
+### MsgChannelCloseInit
+
+A channel is closed on chain A using the `MsgChannelCloseInit`.
+
+```go
+type MsgChannelCloseInit struct {
+ PortId string
+ ChannelId string
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `PortId` is invalid (see naming requirements)
+- `ChannelId` is invalid (see naming requirements)
+- `Signer` is empty
+- A Channel for the given Port ID and Channel ID does not exist or is already closed
+
+The message closes a channel on chain A for the given Port ID and Channel ID.
+
+### MsgChannelCloseConfirm
+
+A channel is closed on chain B using the `MsgChannelCloseConfirm`.
+
+```go
+type MsgChannelCloseConfirm struct {
+ PortId string
+ ChannelId string
+ ProofInit []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `PortId` is invalid (see naming requirements)
+- `ChannelId` is invalid (see naming requirements)
+- `ProofInit` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- A Channel for the given Port ID and Channel ID does not exist or is already closed
+- `ProofInit` does not prove that the counterparty set its channel to state CLOSED
+
+The message closes a channel on chain B for the given Port ID and Channel ID.
+
+### MsgRecvPacket
+
+A packet is received on chain B using the `MsgRecvPacket`.
+
+```go
+type MsgRecvPacket struct {
+ Packet Packet
+ Proof []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `Proof` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- `Packet` fails basic validation
+- `Proof` does not prove that the counterparty sent the `Packet`.
+
+The message receives a packet on chain B.
+
+### MsgTimeout
+
+A packet is timed out on chain A using the `MsgTimeout`.
+
+```go
+type MsgTimeout struct {
+ Packet Packet
+ Proof []byte
+ ProofHeight Height
+ NextSequenceRecv uint64
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `Proof` is empty
+- `ProofHeight` is zero
+- `NextSequenceRecv` is zero
+- `Signer` is empty
+- `Packet` fails basic validation
+- `Proof` does not prove that the packet has not been received on the counterparty chain.
+
+The message times out a packet that was sent on chain A and never received on chain B.
+
+### MsgTimeoutOnClose
+
+A packet is timed out on chain A due to the closure of the channel end on chain B using
+the `MsgTimeoutOnClose`.
+
+```go
+type MsgTimeoutOnClose struct {
+ Packet Packet
+ Proof []byte
+ ProofClose []byte
+ ProofHeight Height
+ NextSequenceRecv uint64
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `Proof` is empty
+- `ProofClose` is empty
+- `ProofHeight` is zero
+- `NextSequenceRecv` is zero
+- `Signer` is empty
+- `Packet` fails basic validation
+- `Proof` does not prove that the packet has not been received on the counterparty chain.
+- `ProofClose` does not prove that the counterparty channel end has been closed.
+
+The message times out a packet that was sent on chain A and never received on chain B.
+
+### MsgAcknowledgement
+
+A packet is acknowledged on chain A using the `MsgAcknowledgement`.
+
+```go
+type MsgAcknowledgement struct {
+ Packet Packet
+ Acknowledgement []byte
+ Proof []byte
+ ProofHeight Height
+ Signer sdk.AccAddress
+}
+```
+
+This message is expected to fail if:
+
+- `Proof` is empty
+- `ProofHeight` is zero
+- `Signer` is empty
+- `Packet` fails basic validation
+- `Acknowledgement` is empty
+- `Proof` does not prove that the counterparty received the `Packet`.
+
+The message acknowledges that the packet sent from chainA was received on chain B.
diff --git a/core/spec/05_callbacks.md b/core/spec/05_callbacks.md
new file mode 100644
index 00000000..dd747380
--- /dev/null
+++ b/core/spec/05_callbacks.md
@@ -0,0 +1,80 @@
+
+
+# Callbacks
+
+Application modules implementing the IBC module must implement the following callbacks as found in [05-port](../05-port/types/module.go).
+More information on how to implement these callbacks can be found in the [implementation guide](../../../../docs/ibc/custom.md).
+
+```go
+// IBCModule defines an interface that implements all the callbacks
+// that modules must define as specified in ICS-26
+type IBCModule interface {
+ OnChanOpenInit(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portId string,
+ channelId string,
+ channelCap *capability.Capability,
+ counterparty channeltypes.Counterparty,
+ version string,
+ ) error
+
+ OnChanOpenTry(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portId,
+ channelId string,
+ channelCap *capability.Capability,
+ counterparty channeltypes.Counterparty,
+ version,
+ counterpartyVersion string,
+ ) error
+
+ OnChanOpenAck(
+ ctx sdk.Context,
+ portId,
+ channelId string,
+ counterpartyVersion string,
+ ) error
+
+ OnChanOpenConfirm(
+ ctx sdk.Context,
+ portId,
+ channelId string,
+ ) error
+
+ OnChanCloseInit(
+ ctx sdk.Context,
+ portId,
+ channelId string,
+ ) error
+
+ OnChanCloseConfirm(
+ ctx sdk.Context,
+ portId,
+ channelId string,
+ ) error
+
+ // OnRecvPacket must return the acknowledgement bytes
+ // In the case of an asynchronous acknowledgement, nil should be returned.
+ OnRecvPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ ) (*sdk.Result, []byte, error)
+
+ OnAcknowledgementPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ acknowledgement []byte,
+ ) (*sdk.Result, error)
+
+ OnTimeoutPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ ) (*sdk.Result, error)
+}
+```
diff --git a/core/spec/06_events.md b/core/spec/06_events.md
new file mode 100644
index 00000000..528a30cf
--- /dev/null
+++ b/core/spec/06_events.md
@@ -0,0 +1,241 @@
+
+
+# Events
+
+The IBC module emits the following events. It can be expected that the type `message`,
+with an attirbute key of `action` will represent the first event for each message
+being processed as emitted by the SDK's baseapp. Each IBC TAO message will
+also emit its module name in the format 'ibc_sub-modulename'.
+
+All the events for the Channel handshakes, `SendPacket`, `RecvPacket`, `AcknowledgePacket`,
+`TimeoutPacket` and `TimeoutOnClose` will emit additional events not specified here due to
+callbacks to IBC applications.
+
+## ICS 02 - Client
+
+### MsgCreateClient
+
+| Type | Attribute Key | Attribute Value |
+|---------------|------------------|-------------------|
+| create_client | client_id | {clientId} |
+| create_client | client_type | {clientType} |
+| create_client | consensus_height | {consensusHeight} |
+| message | action | create_client |
+| message | module | ibc_client |
+
+### MsgUpdateClient
+
+| Type | Attribute Key | Attribute Value |
+|---------------|------------------|-------------------|
+| update_client | client_id | {clientId} |
+| update_client | client_type | {clientType} |
+| update_client | consensus_height | {consensusHeight} |
+| message | action | update_client |
+| message | module | ibc_client |
+
+### MsgSubmitMisbehaviour
+
+| Type | Attribute Key | Attribute Value |
+|---------------------|------------------|---------------------|
+| client_misbehaviour | client_id | {clientId} |
+| client_misbehaviour | client_type | {clientType} |
+| client_misbehaviour | consensus_height | {consensusHeight} |
+| message | action | client_misbehaviour |
+| message | module | evidence |
+| message | sender | {senderAddress} |
+| submit_evidence | evidence_hash | {evidenceHash} |
+
+### UpdateClientProposal
+
+| Type | Attribute Key | Attribute Value |
+|------------------------|------------------|-------------------|
+| update_client_proposal | client_id | {clientId} |
+| update_client_proposal | client_type | {clientType} |
+| update_client_proposal | consensus_height | {consensusHeight} |
+
+
+
+## ICS 03 - Connection
+
+### MsgConnectionOpenInit
+
+| Type | Attribute Key | Attribute Value |
+|----------------------|----------------------------|-----------------------------|
+| connection_open_init | connection_id | {connectionId} |
+| connection_open_init | client_id | {clientId} |
+| connection_open_init | counterparty_client_id | {counterparty.clientId} |
+| message | action | connection_open_init |
+| message | module | ibc_connection |
+
+### MsgConnectionOpenTry
+
+| Type | Attribute Key | Attribute Value |
+|---------------------|----------------------------|-----------------------------|
+| connection_open_try | connection_id | {connectionId} |
+| connection_open_try | client_id | {clientId} |
+| connection_open_try | counterparty_client_id | {counterparty.clientId |
+| connection_open_try | counterparty_connection_id | {counterparty.connectionId} |
+| message | action | connection_open_try |
+| message | module | ibc_connection |
+
+### MsgConnectionOpenAck
+
+| Type | Attribute Key | Attribute Value |
+|----------------------|----------------------------|-----------------------------|
+| connection_open_ack | connection_id | {connectionId} |
+| connection_open_ack | client_id | {clientId} |
+| connection_open_ack | counterparty_client_id | {counterparty.clientId} |
+| connection_open_ack | counterparty_connection_id | {counterparty.connectionId} |
+| message | module | ibc_connection |
+| message | action | connection_open_ack |
+
+### MsgConnectionOpenConfirm
+
+| Type | Attribute Key | Attribute Value |
+|-------------------------|----------------------------|-----------------------------|
+| connection_open_confirm | connection_id | {connectionId} |
+| connection_open_confirm | client_id | {clientId} |
+| connection_open_confirm | counterparty_client_id | {counterparty.clientId} |
+| connection_open_confirm | counterparty_connection_id | {counterparty.connectionId} |
+| message | action | connection_open_confirm |
+| message | module | ibc_connection |
+
+## ICS 04 - Channel
+
+### MsgChannelOpenInit
+
+| Type | Attribute Key | Attribute Value |
+|-------------------|-------------------------|----------------------------------|
+| channel_open_init | port_id | {portId} |
+| channel_open_init | channel_id | {channelId} |
+| channel_open_init | counterparty_port_id | {channel.counterparty.portId} |
+| channel_open_init | connection_id | {channel.connectionHops} |
+| message | action | channel_open_init |
+| message | module | ibc_channel |
+
+### MsgChannelOpenTry
+
+| Type | Attribute Key | Attribute Value |
+|------------------|-------------------------|----------------------------------|
+| channel_open_try | port_id | {portId} |
+| channel_open_try | channel_id | {channelId} |
+| channel_open_try | counterparty_port_id | {channel.counterparty.portId} |
+| channel_open_try | counterparty_channel_id | {channel.counterparty.channelId} |
+| channel_open_try | connection_id | {channel.connectionHops} |
+| message | action | channel_open_try |
+| message | module | ibc_channel |
+
+### MsgChannelOpenAck
+
+| Type | Attribute Key | Attribute Value |
+|------------------|-------------------------|----------------------------------|
+| channel_open_ack | port_id | {portId} |
+| channel_open_ack | channel_id | {channelId} |
+| channel_open_ack | counterparty_port_id | {channel.counterparty.portId} |
+| channel_open_ack | counterparty_channel_id | {channel.counterparty.channelId} |
+| channel_open_ack | connection_id | {channel.connectionHops} |
+| message | action | channel_open_ack |
+| message | module | ibc_channel |
+
+### MsgChannelOpenConfirm
+
+| Type | Attribute Key | Attribute Value |
+|----------------------|-------------------------|----------------------------------|
+| channel_open_confirm | port_id | {portId} |
+| channel_open_confirm | channel_id | {channelId} |
+| channel_open_confirm | counterparty_port_id | {channel.counterparty.portId} |
+| channel_open_confirm | counterparty_channel_id | {channel.counterparty.channelId} |
+| channel_open_confirm | connection_id | {channel.connectionHops} |
+| message | module | ibc_channel |
+| message | action | channel_open_confirm |
+
+### MsgChannelCloseInit
+
+| Type | Attribute Key | Attribute Value |
+|--------------------|-------------------------|----------------------------------|
+| channel_close_init | port_id | {portId} |
+| channel_close_init | channel_id | {channelId} |
+| channel_close_init | counterparty_port_id | {channel.counterparty.portId} |
+| channel_close_init | counterparty_channel_id | {channel.counterparty.channelId} |
+| channel_close_init | connection_id | {channel.connectionHops} |
+| message | action | channel_close_init |
+| message | module | ibc_channel |
+
+### MsgChannelCloseConfirm
+
+| Type | Attribute Key | Attribute Value |
+|-----------------------|-------------------------|----------------------------------|
+| channel_close_confirm | port_id | {portId} |
+| channel_close_confirm | channel_id | {channelId} |
+| channel_close_confirm | counterparty_port_id | {channel.counterparty.portId} |
+| channel_close_confirm | counterparty_channel_id | {channel.counterparty.channelId} |
+| channel_close_confirm | connection_id | {channel.connectionHops} |
+| message | action | channel_close_confirm |
+| message | module | ibc_channel |
+
+### SendPacket (application module call)
+
+| Type | Attribute Key | Attribute Value |
+|-------------|--------------------------|----------------------------------|
+| send_packet | packet_data | {data} |
+| send_packet | packet_timeout_height | {timeoutHeight} |
+| send_packet | packet_timeout_timestamp | {timeoutTimestamp} |
+| send_packet | packet_sequence | {sequence} |
+| send_packet | packet_src_port | {sourcePort} |
+| send_packet | packet_src_channel | {sourceChannel} |
+| send_packet | packet_dst_port | {destinationPort} |
+| send_packet | packet_dst_channel | {destinationChannel} |
+| send_packet | packet_channel_ordering | {channel.Ordering} |
+| message | action | application-module-defined-field |
+| message | module | ibc-channel |
+
+### MsgRecvPacket
+
+| Type | Attribute Key | Attribute Value |
+|-------------|--------------------------|----------------------|
+| recv_packet | packet_data | {data} |
+| recv_packet | packet_ack | {acknowledgement} |
+| recv_packet | packet_timeout_height | {timeoutHeight} |
+| recv_packet | packet_timeout_timestamp | {timeoutTimestamp} |
+| recv_packet | packet_sequence | {sequence} |
+| recv_packet | packet_src_port | {sourcePort} |
+| recv_packet | packet_src_channel | {sourceChannel} |
+| recv_packet | packet_dst_port | {destinationPort} |
+| recv_packet | packet_dst_channel | {destinationChannel} |
+| recv_packet | packet_channel_ordering | {channel.Ordering} |
+| message | action | recv_packet |
+| message | module | ibc-channel |
+
+### MsgAcknowledgePacket
+
+| Type | Attribute Key | Attribute Value |
+|--------------------|--------------------------|----------------------|
+| acknowledge_packet | packet_timeout_height | {timeoutHeight} |
+| acknowledge_packet | packet_timeout_timestamp | {timeoutTimestamp} |
+| acknowledge_packet | packet_sequence | {sequence} |
+| acknowledge_packet | packet_src_port | {sourcePort} |
+| acknowledge_packet | packet_src_channel | {sourceChannel} |
+| acknowledge_packet | packet_dst_port | {destinationPort} |
+| acknowledge_packet | packet_dst_channel | {destinationChannel} |
+| acknowledge_packet | packet_channel_ordering | {channel.Ordering} |
+| message | action | acknowledge_packet |
+| message | module | ibc-channel |
+
+### MsgTimeoutPacket & MsgTimeoutOnClose
+
+| Type | Attribute Key | Attribute Value |
+|----------------|--------------------------|----------------------|
+| timeout_packet | packet_timeout_height | {timeoutHeight} |
+| timeout_packet | packet_timeout_timestamp | {timeoutTimestamp} |
+| timeout_packet | packet_sequence | {sequence} |
+| timeout_packet | packet_src_port | {sourcePort} |
+| timeout_packet | packet_src_channel | {sourceChannel} |
+| timeout_packet | packet_dst_port | {destinationPort} |
+| timeout_packet | packet_dst_channel | {destinationChannel} |
+| timeout_packet | packet_channel_ordering | {channel.Ordering} |
+| message | action | timeout_packet |
+| message | module | ibc-channel |
+
+
diff --git a/core/spec/07_params.md b/core/spec/07_params.md
new file mode 100644
index 00000000..67e79ef8
--- /dev/null
+++ b/core/spec/07_params.md
@@ -0,0 +1,21 @@
+
+
+# Parameters
+
+## Clients
+
+The ibc clients contain the following parameters:
+
+| Key | Type | Default Value |
+|------------------|------|---------------|
+| `AllowedClients` | []string | `"06-solomachine","07-tendermint"` |
+
+### AllowedClients
+
+The allowed clients parameter defines an allowlist of client types supported by the chain. A client
+that is not registered on this list will fail upon creation or on genesis validation. Note that,
+since the client type is an arbitrary string, chains they must not register two light clients which
+return the same value for the `ClientType()` function, otherwise the allowlist check can be
+bypassed.
diff --git a/core/spec/README.md b/core/spec/README.md
new file mode 100644
index 00000000..f6de9749
--- /dev/null
+++ b/core/spec/README.md
@@ -0,0 +1,26 @@
+
+
+# `ibc core`
+
+## Abstract
+
+This paper defines the implementation of the IBC protocol on the Cosmos SDK, the
+changes made to the specification and where to find each specific ICS spec within
+the module.
+
+For the general specification please refer to the [Interchain Standards](https://github.com/cosmos/ics).
+
+## Contents
+
+1. **[Concepts](01_concepts.md)**
+2. **[State](02_state.md)**
+3. **[State Transitions](03_state_transitions.md)**
+4. **[Messages](04_messages.md)**
+5. **[Callbacks](05_callbacks.md)**
+6. **[Events](06_events.md)**
+7. **[Params](07_params.md)**
diff --git a/core/types/codec.go b/core/types/codec.go
new file mode 100644
index 00000000..db110ac9
--- /dev/null
+++ b/core/types/codec.go
@@ -0,0 +1,23 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+)
+
+// RegisterInterfaces registers x/ibc interfaces into protobuf Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ clienttypes.RegisterInterfaces(registry)
+ connectiontypes.RegisterInterfaces(registry)
+ channeltypes.RegisterInterfaces(registry)
+ solomachinetypes.RegisterInterfaces(registry)
+ ibctmtypes.RegisterInterfaces(registry)
+ localhosttypes.RegisterInterfaces(registry)
+ commitmenttypes.RegisterInterfaces(registry)
+}
diff --git a/core/types/genesis.go b/core/types/genesis.go
new file mode 100644
index 00000000..f7d78e5c
--- /dev/null
+++ b/core/types/genesis.go
@@ -0,0 +1,38 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+var _ codectypes.UnpackInterfacesMessage = GenesisState{}
+
+// DefaultGenesisState returns the ibc module's default genesis state.
+func DefaultGenesisState() *GenesisState {
+ return &GenesisState{
+ ClientGenesis: clienttypes.DefaultGenesisState(),
+ ConnectionGenesis: connectiontypes.DefaultGenesisState(),
+ ChannelGenesis: channeltypes.DefaultGenesisState(),
+ }
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (gs GenesisState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return gs.ClientGenesis.UnpackInterfaces(unpacker)
+}
+
+// Validate performs basic genesis state validation returning an error upon any
+// failure.
+func (gs *GenesisState) Validate() error {
+ if err := gs.ClientGenesis.Validate(); err != nil {
+ return err
+ }
+
+ if err := gs.ConnectionGenesis.Validate(); err != nil {
+ return err
+ }
+
+ return gs.ChannelGenesis.Validate()
+}
diff --git a/core/types/genesis.pb.go b/core/types/genesis.pb.go
new file mode 100644
index 00000000..fc52b6f1
--- /dev/null
+++ b/core/types/genesis.pb.go
@@ -0,0 +1,440 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/core/types/v1/genesis.proto
+
+package types
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ types1 "github.com/cosmos/ibc-go/core/03-connection/types"
+ types2 "github.com/cosmos/ibc-go/core/04-channel/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// GenesisState defines the ibc module's genesis state.
+type GenesisState struct {
+ // ICS002 - Clients genesis state
+ ClientGenesis types.GenesisState `protobuf:"bytes,1,opt,name=client_genesis,json=clientGenesis,proto3" json:"client_genesis" yaml:"client_genesis"`
+ // ICS003 - Connections genesis state
+ ConnectionGenesis types1.GenesisState `protobuf:"bytes,2,opt,name=connection_genesis,json=connectionGenesis,proto3" json:"connection_genesis" yaml:"connection_genesis"`
+ // ICS004 - Channel genesis state
+ ChannelGenesis types2.GenesisState `protobuf:"bytes,3,opt,name=channel_genesis,json=channelGenesis,proto3" json:"channel_genesis" yaml:"channel_genesis"`
+}
+
+func (m *GenesisState) Reset() { *m = GenesisState{} }
+func (m *GenesisState) String() string { return proto.CompactTextString(m) }
+func (*GenesisState) ProtoMessage() {}
+func (*GenesisState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f0cf35a95987cc01, []int{0}
+}
+func (m *GenesisState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *GenesisState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GenesisState.Merge(m, src)
+}
+func (m *GenesisState) XXX_Size() int {
+ return m.Size()
+}
+func (m *GenesisState) XXX_DiscardUnknown() {
+ xxx_messageInfo_GenesisState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GenesisState proto.InternalMessageInfo
+
+func (m *GenesisState) GetClientGenesis() types.GenesisState {
+ if m != nil {
+ return m.ClientGenesis
+ }
+ return types.GenesisState{}
+}
+
+func (m *GenesisState) GetConnectionGenesis() types1.GenesisState {
+ if m != nil {
+ return m.ConnectionGenesis
+ }
+ return types1.GenesisState{}
+}
+
+func (m *GenesisState) GetChannelGenesis() types2.GenesisState {
+ if m != nil {
+ return m.ChannelGenesis
+ }
+ return types2.GenesisState{}
+}
+
+func init() {
+ proto.RegisterType((*GenesisState)(nil), "ibcgo.core.types.v1.GenesisState")
+}
+
+func init() { proto.RegisterFile("ibcgo/core/types/v1/genesis.proto", fileDescriptor_f0cf35a95987cc01) }
+
+var fileDescriptor_f0cf35a95987cc01 = []byte{
+ // 313 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x4a, 0xc3, 0x30,
+ 0x1c, 0xc7, 0xdb, 0x09, 0x1e, 0xaa, 0x4e, 0xac, 0x7f, 0xd0, 0x81, 0x99, 0x4b, 0x61, 0x78, 0x31,
+ 0xa1, 0x7a, 0x13, 0xbc, 0xec, 0xe2, 0xbd, 0xde, 0xbc, 0x48, 0x1b, 0x42, 0x1b, 0x68, 0x93, 0xb1,
+ 0xc6, 0xe2, 0xde, 0xc2, 0xc7, 0xda, 0x71, 0x47, 0x4f, 0x63, 0xb4, 0x6f, 0xe0, 0x13, 0xc8, 0x92,
+ 0xd8, 0xb5, 0xe4, 0x56, 0xbe, 0xfd, 0xfc, 0xbe, 0x9f, 0xfc, 0xf3, 0x26, 0x2c, 0x21, 0xa9, 0xc0,
+ 0x44, 0x2c, 0x28, 0x96, 0xcb, 0x39, 0x2d, 0x71, 0x15, 0xe2, 0x94, 0x72, 0x5a, 0xb2, 0x12, 0xcd,
+ 0x17, 0x42, 0x0a, 0xff, 0x5c, 0x21, 0x68, 0x87, 0x20, 0x85, 0xa0, 0x2a, 0x1c, 0x5d, 0xa4, 0x22,
+ 0x15, 0xea, 0x3f, 0xde, 0x7d, 0x69, 0x74, 0x04, 0x3b, 0x6d, 0x24, 0x67, 0x94, 0x4b, 0xab, 0x6e,
+ 0x34, 0xed, 0x32, 0x82, 0x73, 0x4a, 0x24, 0x13, 0xdc, 0xe6, 0x82, 0x2e, 0x97, 0xc5, 0x9c, 0xd3,
+ 0xdc, 0x82, 0xe0, 0x76, 0xe0, 0x1d, 0xbf, 0xea, 0xe4, 0x4d, 0xc6, 0x92, 0xfa, 0x99, 0x37, 0xd4,
+ 0xe2, 0x0f, 0x03, 0x5e, 0xbb, 0x77, 0xee, 0xfd, 0xd1, 0x23, 0x44, 0x9d, 0x5d, 0x68, 0x02, 0x55,
+ 0x21, 0xea, 0xce, 0xce, 0x6e, 0x57, 0x9b, 0xb1, 0xf3, 0xbb, 0x19, 0x5f, 0x2e, 0xe3, 0x22, 0x7f,
+ 0x86, 0xfd, 0x1e, 0x18, 0x9d, 0xe8, 0xc0, 0x8c, 0xf8, 0x5f, 0x9e, 0xbf, 0x5f, 0x7e, 0x6b, 0x1b,
+ 0x28, 0xdb, 0xb4, 0x67, 0x6b, 0x29, 0xcb, 0x38, 0x31, 0xc6, 0x1b, 0x63, 0xb4, 0xfa, 0x60, 0x74,
+ 0xb6, 0x0f, 0xff, 0xcd, 0xb9, 0x77, 0x6a, 0x0e, 0xa4, 0xd5, 0x1e, 0x28, 0x6d, 0xd0, 0xd3, 0x6a,
+ 0xc4, 0x72, 0x02, 0xe3, 0xbc, 0x32, 0xce, 0x7e, 0x13, 0x8c, 0x86, 0x26, 0x31, 0x43, 0xb3, 0x97,
+ 0x55, 0x0d, 0xdc, 0x75, 0x0d, 0xdc, 0x6d, 0x0d, 0xdc, 0xef, 0x06, 0x38, 0xeb, 0x06, 0x38, 0x3f,
+ 0x0d, 0x70, 0xde, 0x83, 0x94, 0xc9, 0xec, 0x33, 0x41, 0x44, 0x14, 0x98, 0x88, 0xb2, 0x10, 0x25,
+ 0x66, 0x09, 0x79, 0xe8, 0x3d, 0xa7, 0xe4, 0x50, 0x5d, 0xd4, 0xd3, 0x5f, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0x44, 0x1f, 0x35, 0xd8, 0x69, 0x02, 0x00, 0x00,
+}
+
+func (m *GenesisState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ChannelGenesis.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.ConnectionGenesis.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ClientGenesis.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenesis(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *GenesisState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ClientGenesis.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ l = m.ConnectionGenesis.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ l = m.ChannelGenesis.Size()
+ n += 1 + l + sovGenesis(uint64(l))
+ return n
+}
+
+func sovGenesis(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenesis(x uint64) (n int) {
+ return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *GenesisState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientGenesis", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ClientGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConnectionGenesis", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConnectionGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChannelGenesis", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ChannelGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenesis(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenesis(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenesis
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenesis
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/core/types/query.go b/core/types/query.go
new file mode 100644
index 00000000..fba69b3a
--- /dev/null
+++ b/core/types/query.go
@@ -0,0 +1,26 @@
+package types
+
+import (
+ "github.com/gogo/protobuf/grpc"
+
+ client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// QueryServer defines the IBC interfaces that the gRPC query server must implement
+type QueryServer interface {
+ clienttypes.QueryServer
+ connectiontypes.QueryServer
+ channeltypes.QueryServer
+}
+
+// RegisterQueryService registers each individual IBC submodule query service
+func RegisterQueryService(server grpc.Server, queryService QueryServer) {
+ client.RegisterQueryService(server, queryService)
+ connection.RegisterQueryService(server, queryService)
+ channel.RegisterQueryService(server, queryService)
+}
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000..a699c10a
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,114 @@
+
+
+# `ibc`
+
+## Abstract
+
+This specification defines the implementation of the IBC protocol on the Cosmos SDK, the
+changes made to the specification and where to find each specific ICS spec within
+the module.
+
+For the general specification please refer to the [Interchain Standards](https://github.com/cosmos/ics).
+
+## Contents
+
+1. **Applications**
+
+ 1.1. [Transfer](./../applications/transfer/spec/README.md)
+2. **[Core](./../core/spec/README.md)**
+3. **Light Clients**
+
+ 3.1 [Solo Machine Client](./../light-clients/06-solomachine/spec/README.md)
+
+ 3.2 [Tendermint Client](./../light-clients/07-tendermint/spec/README.md)
+
+ 3.3 [Localhost Client](./../light-clients/09-localhost/spec/README.md)
+
+## Implementation Details
+
+As stated above, the IBC implementation on the Cosmos SDK introduces some changes
+to the general specification, in order to avoid code duplication and to take
+advantage of the SDK architectural components such as the transaction routing
+through `Handlers`.
+
+### Interchain Standards reference
+
+The following list is a mapping from each Interchain Standard to their implementation
+in the SDK's `x/ibc` module:
+
+* [ICS 002 - Client Semantics](https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics): Implemented in [`x/ibc/core/02-client`](https://github.com/cosmos/tree/master/ibc/core/02-client)
+* [ICS 003 - Connection Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-003-connection-semantics): Implemented in [`x/ibc/core/03-connection`](https://github.com/cosmos/tree/master/ibc/core/03-connection)
+* [ICS 004 - Channel and Packet Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-004-channel-and-packet-semantics): Implemented in [`x/ibc/core/04-channel`](https://github.com/cosmos/tree/master/ibc/core/04-channel)
+* [ICS 005 - Port Allocation](https://github.com/cosmos/ics/blob/master/spec/ics-005-port-allocation): Implemented in [`x/ibc/core/05-port`](https://github.com/cosmos/tree/master/ibc/core/05-port)
+* [ICS 006 - Solo Machine Client](https://github.com/cosmos/ics/blob/master/spec/ics-006-solo-machine-client): Implemented in [`x/ibc/light-clients/06-solomachine`](https://github.com/cosmos/tree/master/ibc/solomachine)
+* [ICS 007 - Tendermint Client](https://github.com/cosmos/ics/blob/master/spec/ics-007-tendermint-client): Implemented in [`x/ibc/light-clients/07-tendermint`](https://github.com/cosmos/tree/master/ibc/light-clients/07-tendermint)
+* [ICS 009 - Loopback Client](https://github.com/cosmos/ics/blob/master/spec/ics-009-loopback-client): Implemented in [`x/ibc/light-clients/09-localhost`](https://github.com/cosmos/tree/master/ibc/light-clients/09-localhost)
+* [ICS 018- Relayer Algorithms](https://github.com/cosmos/ics/tree/master/spec/ics-018-relayer-algorithms): Implemented in it's own [relayer repository](https://github.com/cosmos/relayer)
+* [ICS 020 - Fungible Token Transfer](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer): Implemented in [`x/ibc/applications/transfer`](https://github.com/cosmos/tree/master/ibc/applications/transfer)
+* [ICS 023 - Vector Commitments](https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments): Implemented in [`x/ibc/core/23-commitment`](https://github.com/cosmos/tree/master/ibc/core/23-commitment)
+* [ICS 024 - Host Requirements](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements): Implemented in [`x/ibc/core/24-host`](https://github.com/cosmos/tree/master/ibc/core/24-host)
+* [ICS 025 - Handler Interface](https://github.com/cosmos/ics/tree/master/spec/ics-025-handler-interface): `Handler` interfaces are implemented at the top level in `x/ibc/handler.go`,
+which call each ICS submodule's handlers (i.e `x/ibc/*/{XX-ICS}/handler.go`).
+* [ICS 026 - Routing Module](https://github.com/cosmos/ics/blob/master/spec/ics-026-routing-module): Replaced by [ADR 15 - IBC Packet Receiver](../../../docs/architecture/adr-015-ibc-packet-receiver.md).
+
+### Architecture Decision Records (ADR)
+
+The following ADR provide the design and architecture decision of IBC-related components.
+
+* [ADR 001 - Coin Source Tracing](../../../docs/architecture/adr-001-coin-source-tracing.md): standard to hash the ICS20's fungible token
+denomination trace path in order to support special characters and limit the maximum denomination length.
+* [ADR 17 - Historical Header Module](../../../docs/architecture/adr-017-historical-header-module.md): Introduces the ability to introspect past
+consensus states in order to verify their membership in the counterparty clients.
+* [ADR 19 - Protobuf State Encoding](../../../docs/architecture/adr-019-protobuf-state-encoding.md): Migration from Amino to Protobuf for state encoding.
+* [ADR 020 - Protocol Buffer Transaction Encoding](./../../docs/architecture/adr-020-protobuf-transaction-encoding.md): Client side migration to Protobuf.
+* [ADR 021 - Protocol Buffer Query Encoding](../../../docs/architecture/adr-020-protobuf-query-encoding.md): Queries migration to Protobuf.
+* [ADR 026 - IBC Client Recovery Mechanisms](../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md): Allows IBC Clients to be recovered after freezing or expiry.
+
+### SDK Modules
+
+* [`x/capability`](https://github.com/cosmos/tree/master/x/capability): The capability module provides object-capability keys support through scoped keepers in order to authenticate usage of ports or channels. Check [ADR 3 - Dynamic Capability Store](../../../docs/architecture/adr-003-dynamic-capability-store.md) for more details.
+
+## IBC module architecture
+
+> **NOTE for auditors**: If you're not familiar with the overall module structure from
+the SDK modules, please check this [document](../../../docs/building-modules/structure.md) as
+prerequisite reading.
+
+For ease of auditing, every Interchain Standard has been developed in its own
+package. The development team separated the IBC TAO (Transport, Authentication, Ordering) ICS specifications from the IBC application level
+specification. The following tree describes the architecture of the directories that
+the `ibc` (TAO) and `ibc-transfer` ([ICS20](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer)) modules:
+
+```shell
+x/ibc
+├── applications/
+│ └──transfer/
+├── core/
+│ ├── 02-client/
+│ ├── 03-connection/
+│ ├── 04-channel/
+│ ├── 05-port/
+│ ├── 23-commitment/
+│ ├── 24-host/
+│ ├── client
+│ │ └── cli
+│ │ └── cli.go
+│ ├── keeper
+│ │ ├── keeper.go
+│ │ └── querier.go
+│ ├── types
+│ │ ├── errors.go
+│ │ └── keys.go
+│ ├── handler.go
+│ └── module.go
+├── light-clients/
+│ ├── 06-solomachine/
+│ ├── 07-tendermint/
+│ └── 09-localhost/
+└── testing/
+```
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
new file mode 100644
index 00000000..1c9f5c29
--- /dev/null
+++ b/docs/ibc/proto-docs.md
@@ -0,0 +1,7521 @@
+
+
+
+
+ Protocol Documentation
+
+
+
+
+
+
+
+
+
+
+ Protocol Documentation
+
+ Table of Contents
+
+
+
+
+
+
+
ibcgo/apps/transfer/v1/transfer.proto Top
+
+
+
+
+ DenomTrace
+ DenomTrace contains the base denomination for ICS20 fungible tokens and the
source tracing information path.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ path
+ string
+
+ path defines the chain of port/channel identifiers used for tracing the
+source of the fungible token.
+
+
+
+ base_denom
+ string
+
+ base denomination of the relayed fungible token.
+
+
+
+
+
+
+
+
+
+ FungibleTokenPacketData
+ FungibleTokenPacketData defines a struct for the packet payload
See FungibleTokenPacketData spec:
https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ denom
+ string
+
+ the token denomination to be transferred
+
+
+
+ amount
+ uint64
+
+ the token amount to be transferred
+
+
+
+ sender
+ string
+
+ the sender address
+
+
+
+ receiver
+ string
+
+ the recipient address on the destination chain
+
+
+
+
+
+
+
+
+
+ Params
+ Params defines the set of IBC transfer parameters.
NOTE: To prevent a single token from being transferred, set the
TransfersEnabled parameter to true and then set the bank module's SendEnabled
parameter for the denomination to false.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ send_enabled
+ bool
+
+ send_enabled enables or disables all cross-chain token transfers from this
+chain.
+
+
+
+ receive_enabled
+ bool
+
+ receive_enabled enables or disables all cross-chain token transfers to this
+chain.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/apps/transfer/v1/genesis.proto Top
+
+
+
+
+ GenesisState
+ GenesisState defines the ibc-transfer genesis state
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+
+
+
+
+ denom_traces
+ DenomTrace
+ repeated
+
+
+
+
+ params
+ Params
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/apps/transfer/v1/query.proto Top
+
+
+
+
+ QueryDenomTraceRequest
+ QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ hash
+ string
+
+ hash (in hex format) of the denomination trace information.
+
+
+
+
+
+
+
+
+
+ QueryDenomTraceResponse
+ QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
method.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ denom_trace
+ DenomTrace
+
+ denom_trace returns the requested denomination trace information.
+
+
+
+
+
+
+
+
+
+ QueryDenomTracesRequest
+ QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
method
+
+
+
+
+
+
+
+
+ QueryDenomTracesResponse
+ QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
method.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ denom_traces
+ DenomTrace
+ repeated
+ denom_traces returns all denominations trace information.
+
+
+
+ pagination
+ cosmos.base.query.v1beta1.PageResponse
+
+ pagination defines the pagination in the response.
+
+
+
+
+
+
+
+
+
+ QueryParamsRequest
+ QueryParamsRequest is the request type for the Query/Params RPC method.
+
+
+
+
+
+ QueryParamsResponse
+ QueryParamsResponse is the response type for the Query/Params RPC method.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ params
+ Params
+
+ params defines the parameters of the module.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Query
+ Query provides defines the gRPC querier service.
+
+
+
+
+
+ Methods with HTTP bindings
+
+
+
+ Method Name
+ Method
+ Pattern
+ Body
+
+
+
+
+
+
+
+ DenomTrace
+ GET
+ /ibc/apps/transfer/v1/denom_traces/{hash}
+
+
+
+
+
+
+
+ DenomTraces
+ GET
+ /ibc/apps/transfer/v1/denom_traces
+
+
+
+
+
+
+
+ Params
+ GET
+ /ibc/apps/transfer/v1/params
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/client/v1/client.proto Top
+
+
+
+
+ ClientConsensusStates
+ ClientConsensusStates defines all the stored consensus states for a given
client.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client identifier
+
+
+
+ consensus_states
+ ConsensusStateWithHeight
+ repeated
+ consensus states and their heights associated with the client
+
+
+
+
+
+
+
+
+
+ ClientUpdateProposal
+ ClientUpdateProposal is a governance proposal. If it passes, the substitute
client's consensus states starting from the 'initial height' are copied over
to the subjects client state. The proposal handler may fail if the subject
and the substitute do not match in client and chain parameters (with
exception to latest height, frozen height, and chain-id). The updated client
must also be valid (cannot be expired).
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ title
+ string
+
+ the title of the update proposal
+
+
+
+ description
+ string
+
+ the description of the proposal
+
+
+
+ subject_client_id
+ string
+
+ the client identifier for the client to be updated if the proposal passes
+
+
+
+ substitute_client_id
+ string
+
+ the substitute client identifier for the client standing in for the subject
+client
+
+
+
+ initial_height
+ Height
+
+ the intital height to copy consensus states from the substitute to the
+subject
+
+
+
+
+
+
+
+
+
+ ConsensusStateWithHeight
+ ConsensusStateWithHeight defines a consensus state with an additional height
field.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ height
+ Height
+
+ consensus state height
+
+
+
+ consensus_state
+ google.protobuf.Any
+
+ consensus state
+
+
+
+
+
+
+
+
+
+ Height
+ Height is a monotonically increasing data type
that can be compared against another Height for the purposes of updating and
freezing clients
Normally the RevisionHeight is incremented at each height while keeping
RevisionNumber the same. However some consensus algorithms may choose to
reset the height in certain conditions e.g. hard forks, state-machine
breaking changes In these cases, the RevisionNumber is incremented so that
height continues to be monitonically increasing even as the RevisionHeight
gets reset
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ revision_number
+ uint64
+
+ the revision that the client is currently on
+
+
+
+ revision_height
+ uint64
+
+ the height within the given revision
+
+
+
+
+
+
+
+
+
+ IdentifiedClientState
+ IdentifiedClientState defines a client state with an additional client
identifier field.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client identifier
+
+
+
+ client_state
+ google.protobuf.Any
+
+ client state
+
+
+
+
+
+
+
+
+
+ Params
+ Params defines the set of IBC light client parameters.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ allowed_clients
+ string
+ repeated
+ allowed_clients defines the list of allowed client state types.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/apps/transfer/v1/tx.proto Top
+
+
+
+
+ MsgTransfer
+ MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
ICS20 enabled chains. See ICS Spec here:
https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ source_port
+ string
+
+ the port on which the packet will be sent
+
+
+
+ source_channel
+ string
+
+ the channel by which the packet will be sent
+
+
+
+ token
+ cosmos.base.v1beta1.Coin
+
+ the tokens to be transferred
+
+
+
+ sender
+ string
+
+ the sender address
+
+
+
+ receiver
+ string
+
+ the recipient address on the destination chain
+
+
+
+ timeout_height
+ ibcgo.core.client.v1.Height
+
+ Timeout height relative to the current block height.
+The timeout is disabled when set to 0.
+
+
+
+ timeout_timestamp
+ uint64
+
+ Timeout timestamp (in nanoseconds) relative to the current block timestamp.
+The timeout is disabled when set to 0.
+
+
+
+
+
+
+
+
+
+ MsgTransferResponse
+ MsgTransferResponse defines the Msg/Transfer response type.
+
+
+
+
+
+
+
+
+
+
+
+ Msg
+ Msg defines the ibc/transfer Msg service.
+
+
+ Method Name Request Type Response Type Description
+
+
+
+
+ Transfer
+ MsgTransfer
+ MsgTransferResponse
+ Transfer defines a rpc handler method for MsgTransfer.
+
+
+
+
+
+
+
+
+
+
ibcgo/core/channel/v1/channel.proto Top
+
+
+
+
+ Acknowledgement
+ Acknowledgement is the recommended acknowledgement format to be used by
app-specific protocols.
NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
conflicts with other protobuf message formats used for acknowledgements.
The first byte of any message with this format will be the non-ASCII values
`0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ result
+ bytes
+
+
+
+
+
+ error
+ string
+
+
+
+
+
+
+
+
+
+
+
+ Channel
+ Channel defines pipeline for exactly-once packet delivery between specific
modules on separate blockchains, which has at least one end capable of
sending packets and one end capable of receiving packets.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ state
+ State
+
+ current state of the channel end
+
+
+
+ ordering
+ Order
+
+ whether the channel is ordered or unordered
+
+
+
+ counterparty
+ Counterparty
+
+ counterparty channel end
+
+
+
+ connection_hops
+ string
+ repeated
+ list of connection identifiers, in order, along which packets sent on
+this channel will travel
+
+
+
+ version
+ string
+
+ opaque channel version, which is agreed upon during the handshake
+
+
+
+
+
+
+
+
+
+ Counterparty
+ Counterparty defines a channel end counterparty
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port on the counterparty chain which owns the other end of the channel.
+
+
+
+ channel_id
+ string
+
+ channel end on the counterparty chain
+
+
+
+
+
+
+
+
+
+ IdentifiedChannel
+ IdentifiedChannel defines a channel with additional port and channel
identifier fields.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ state
+ State
+
+ current state of the channel end
+
+
+
+ ordering
+ Order
+
+ whether the channel is ordered or unordered
+
+
+
+ counterparty
+ Counterparty
+
+ counterparty channel end
+
+
+
+ connection_hops
+ string
+ repeated
+ list of connection identifiers, in order, along which packets sent on
+this channel will travel
+
+
+
+ version
+ string
+
+ opaque channel version, which is agreed upon during the handshake
+
+
+
+ port_id
+ string
+
+ port identifier
+
+
+
+ channel_id
+ string
+
+ channel identifier
+
+
+
+
+
+
+
+
+
+ Packet
+ Packet defines a type that carries data across different chains through IBC
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ sequence
+ uint64
+
+ number corresponds to the order of sends and receives, where a Packet
+with an earlier sequence number must be sent and received before a Packet
+with a later sequence number.
+
+
+
+ source_port
+ string
+
+ identifies the port on the sending chain.
+
+
+
+ source_channel
+ string
+
+ identifies the channel end on the sending chain.
+
+
+
+ destination_port
+ string
+
+ identifies the port on the receiving chain.
+
+
+
+ destination_channel
+ string
+
+ identifies the channel end on the receiving chain.
+
+
+
+ data
+ bytes
+
+ actual opaque bytes transferred directly to the application module
+
+
+
+ timeout_height
+ ibcgo.core.client.v1.Height
+
+ block height after which the packet times out
+
+
+
+ timeout_timestamp
+ uint64
+
+ block timestamp (in nanoseconds) after which the packet times out
+
+
+
+
+
+
+
+
+
+ PacketState
+ PacketState defines the generic type necessary to retrieve and store
packet commitments, acknowledgements, and receipts.
Caller is responsible for knowing the context necessary to interpret this
state as a commitment, acknowledgement, or a receipt.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ channel port identifier.
+
+
+
+ channel_id
+ string
+
+ channel unique identifier.
+
+
+
+ sequence
+ uint64
+
+ packet sequence.
+
+
+
+ data
+ bytes
+
+ embedded data that represents packet state.
+
+
+
+
+
+
+
+
+
+
+
+ Order
+ Order defines if a channel is ORDERED or UNORDERED
+
+
+ Name Number Description
+
+
+
+
+ ORDER_NONE_UNSPECIFIED
+ 0
+ zero-value for channel ordering
+
+
+
+ ORDER_UNORDERED
+ 1
+ packets can be delivered in any order, which may differ from the order in
+which they were sent.
+
+
+
+ ORDER_ORDERED
+ 2
+ packets are delivered exactly in the order which they were sent
+
+
+
+
+
+ State
+ State defines if a channel is in one of the following states:
CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
+
+
+ Name Number Description
+
+
+
+
+ STATE_UNINITIALIZED_UNSPECIFIED
+ 0
+ Default State
+
+
+
+ STATE_INIT
+ 1
+ A channel has just started the opening handshake.
+
+
+
+ STATE_TRYOPEN
+ 2
+ A channel has acknowledged the handshake step on the counterparty chain.
+
+
+
+ STATE_OPEN
+ 3
+ A channel has completed the handshake. Open channels are
+ready to send and receive packets.
+
+
+
+ STATE_CLOSED
+ 4
+ A channel has been closed and can no longer be used to send or receive
+packets.
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/channel/v1/genesis.proto Top
+
+
+
+
+ GenesisState
+ GenesisState defines the ibc channel submodule's genesis state.
+
+
+
+
+
+
+
+
+ PacketSequence
+ PacketSequence defines the genesis type necessary to retrieve and store
next send and receive sequences.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+
+
+
+
+ channel_id
+ string
+
+
+
+
+
+ sequence
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/channel/v1/query.proto Top
+
+
+
+
+ QueryChannelClientStateRequest
+ QueryChannelClientStateRequest is the request type for the Query/ClientState
RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+
+
+
+
+
+
+ QueryChannelClientStateResponse
+ QueryChannelClientStateResponse is the Response type for the
Query/QueryChannelClientState RPC method
+
+
+
+
+
+
+
+
+ QueryChannelConsensusStateRequest
+ QueryChannelConsensusStateRequest is the request type for the
Query/ConsensusState RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+ revision_number
+ uint64
+
+ revision number of the consensus state
+
+
+
+ revision_height
+ uint64
+
+ revision height of the consensus state
+
+
+
+
+
+
+
+
+
+ QueryChannelConsensusStateResponse
+ QueryChannelClientStateResponse is the Response type for the
Query/QueryChannelClientState RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ consensus_state
+ google.protobuf.Any
+
+ consensus state associated with the channel
+
+
+
+ client_id
+ string
+
+ client ID associated with the consensus state
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryChannelRequest
+ QueryChannelRequest is the request type for the Query/Channel RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+
+
+
+
+
+
+ QueryChannelResponse
+ QueryChannelResponse is the response type for the Query/Channel RPC method.
Besides the Channel end, it includes a proof and the height from which the
proof was retrieved.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ channel
+ Channel
+
+ channel associated with the request identifiers
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryChannelsRequest
+ QueryChannelsRequest is the request type for the Query/Channels RPC method
+
+
+
+
+
+
+
+
+ QueryChannelsResponse
+ QueryChannelsResponse is the response type for the Query/Channels RPC method.
+
+
+
+
+
+
+
+
+ QueryConnectionChannelsRequest
+ QueryConnectionChannelsRequest is the request type for the
Query/QueryConnectionChannels RPC method
+
+
+
+
+
+
+
+
+ QueryConnectionChannelsResponse
+ QueryConnectionChannelsResponse is the Response type for the
Query/QueryConnectionChannels RPC method
+
+
+
+
+
+
+
+
+ QueryNextSequenceReceiveRequest
+ QueryNextSequenceReceiveRequest is the request type for the
Query/QueryNextSequenceReceiveRequest RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+
+
+
+
+
+
+ QueryNextSequenceReceiveResponse
+ QuerySequenceResponse is the request type for the
Query/QueryNextSequenceReceiveResponse RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ next_sequence_receive
+ uint64
+
+ next sequence receive number
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryPacketAcknowledgementRequest
+ QueryPacketAcknowledgementRequest is the request type for the
Query/PacketAcknowledgement RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+ sequence
+ uint64
+
+ packet sequence
+
+
+
+
+
+
+
+
+
+ QueryPacketAcknowledgementResponse
+ QueryPacketAcknowledgementResponse defines the client query response for a
packet which also includes a proof and the height from which the
proof was retrieved
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ acknowledgement
+ bytes
+
+ packet associated with the request fields
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryPacketAcknowledgementsRequest
+ QueryPacketAcknowledgementsRequest is the request type for the
Query/QueryPacketCommitments RPC method
+
+
+
+
+
+
+
+
+ QueryPacketAcknowledgementsResponse
+ QueryPacketAcknowledgemetsResponse is the request type for the
Query/QueryPacketAcknowledgements RPC method
+
+
+
+
+
+
+
+
+ QueryPacketCommitmentRequest
+ QueryPacketCommitmentRequest is the request type for the
Query/PacketCommitment RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+ sequence
+ uint64
+
+ packet sequence
+
+
+
+
+
+
+
+
+
+ QueryPacketCommitmentResponse
+ QueryPacketCommitmentResponse defines the client query response for a packet
which also includes a proof and the height from which the proof was
retrieved
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ commitment
+ bytes
+
+ packet associated with the request fields
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryPacketCommitmentsRequest
+ QueryPacketCommitmentsRequest is the request type for the
Query/QueryPacketCommitments RPC method
+
+
+
+
+
+
+
+
+ QueryPacketCommitmentsResponse
+ QueryPacketCommitmentsResponse is the request type for the
Query/QueryPacketCommitments RPC method
+
+
+
+
+
+
+
+
+ QueryPacketReceiptRequest
+ QueryPacketReceiptRequest is the request type for the
Query/PacketReceipt RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+ sequence
+ uint64
+
+ packet sequence
+
+
+
+
+
+
+
+
+
+ QueryPacketReceiptResponse
+ QueryPacketReceiptResponse defines the client query response for a packet
receipt which also includes a proof, and the height from which the proof was
retrieved
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ received
+ bool
+
+ success flag for if receipt exists
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryUnreceivedAcksRequest
+ QueryUnreceivedAcks is the request type for the
Query/UnreceivedAcks RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+ packet_ack_sequences
+ uint64
+ repeated
+ list of acknowledgement sequences
+
+
+
+
+
+
+
+
+
+ QueryUnreceivedAcksResponse
+ QueryUnreceivedAcksResponse is the response type for the
Query/UnreceivedAcks RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ sequences
+ uint64
+ repeated
+ list of unreceived acknowledgement sequences
+
+
+
+ height
+ ibcgo.core.client.v1.Height
+
+ query block height
+
+
+
+
+
+
+
+
+
+ QueryUnreceivedPacketsRequest
+ QueryUnreceivedPacketsRequest is the request type for the
Query/UnreceivedPackets RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+ port unique identifier
+
+
+
+ channel_id
+ string
+
+ channel unique identifier
+
+
+
+ packet_commitment_sequences
+ uint64
+ repeated
+ list of packet sequences
+
+
+
+
+
+
+
+
+
+ QueryUnreceivedPacketsResponse
+ QueryUnreceivedPacketsResponse is the response type for the
Query/UnreceivedPacketCommitments RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ sequences
+ uint64
+ repeated
+ list of unreceived packet sequences
+
+
+
+ height
+ ibcgo.core.client.v1.Height
+
+ query block height
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Query
+ Query provides defines the gRPC querier service
+
+
+
+
+
+ Methods with HTTP bindings
+
+
+
+ Method Name
+ Method
+ Pattern
+ Body
+
+
+
+
+
+
+
+ Channel
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}
+
+
+
+
+
+
+
+ Channels
+ GET
+ /ibc/core/channel/v1/channels
+
+
+
+
+
+
+
+ ConnectionChannels
+ GET
+ /ibc/core/channel/v1/connections/{connection}/channels
+
+
+
+
+
+
+
+ ChannelClientState
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/client_state
+
+
+
+
+
+
+
+ ChannelConsensusState
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/consensus_state/revision/{revision_number}/height/{revision_height}
+
+
+
+
+
+
+
+ PacketCommitment
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{sequence}
+
+
+
+
+
+
+
+ PacketCommitments
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments
+
+
+
+
+
+
+
+ PacketReceipt
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_receipts/{sequence}
+
+
+
+
+
+
+
+ PacketAcknowledgement
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acks/{sequence}
+
+
+
+
+
+
+
+ PacketAcknowledgements
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acknowledgements
+
+
+
+
+
+
+
+ UnreceivedPackets
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_commitment_sequences}/unreceived_packets
+
+
+
+
+
+
+
+ UnreceivedAcks
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_ack_sequences}/unreceived_acks
+
+
+
+
+
+
+
+ NextSequenceReceive
+ GET
+ /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/next_sequence
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/channel/v1/tx.proto Top
+
+
+
+
+ MsgAcknowledgement
+ MsgAcknowledgement receives incoming IBC acknowledgement
+
+
+
+
+
+
+
+
+ MsgAcknowledgementResponse
+ MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.
+
+
+
+
+
+ MsgChannelCloseConfirm
+ MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
to acknowledge the change of channel state to CLOSED on Chain A.
+
+
+
+
+
+
+
+
+ MsgChannelCloseConfirmResponse
+ MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response
type.
+
+
+
+
+
+ MsgChannelCloseInit
+ MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
to close a channel with Chain B.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+
+
+
+
+ channel_id
+ string
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgChannelCloseInitResponse
+ MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
+
+
+
+
+
+ MsgChannelOpenAck
+ MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
the change of channel state to TRYOPEN on Chain B.
+
+
+
+
+
+
+
+
+ MsgChannelOpenAckResponse
+ MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.
+
+
+
+
+
+ MsgChannelOpenConfirm
+ MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
acknowledge the change of channel state to OPEN on Chain A.
+
+
+
+
+
+
+
+
+ MsgChannelOpenConfirmResponse
+ MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response
type.
+
+
+
+
+
+ MsgChannelOpenInit
+ MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
is called by a relayer on Chain A.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+
+
+
+
+ channel
+ Channel
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgChannelOpenInitResponse
+ MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
+
+
+
+
+
+ MsgChannelOpenTry
+ MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
on Chain B.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ port_id
+ string
+
+
+
+
+
+ previous_channel_id
+ string
+
+ in the case of crossing hello's, when both chains call OpenInit, we need
+the channel identifier of the previous channel in state INIT
+
+
+
+ channel
+ Channel
+
+
+
+
+
+ counterparty_version
+ string
+
+
+
+
+
+ proof_init
+ bytes
+
+
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgChannelOpenTryResponse
+ MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.
+
+
+
+
+
+ MsgRecvPacket
+ MsgRecvPacket receives incoming IBC packet
+
+
+
+
+
+
+
+
+ MsgRecvPacketResponse
+ MsgRecvPacketResponse defines the Msg/RecvPacket response type.
+
+
+
+
+
+ MsgTimeout
+ MsgTimeout receives timed-out packet
+
+
+
+
+
+
+
+
+ MsgTimeoutOnClose
+ MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
+
+
+
+
+
+
+
+
+ MsgTimeoutOnCloseResponse
+ MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
+
+
+
+
+
+ MsgTimeoutResponse
+ MsgTimeoutResponse defines the Msg/Timeout response type.
+
+
+
+
+
+
+
+
+
+
+
+ Msg
+ Msg defines the ibc/channel Msg service.
+
+
+
+
+
+
+
ibcgo/core/client/v1/genesis.proto Top
+
+
+
+
+
+ GenesisMetadata defines the genesis type for metadata that clients may return
with ExportMetadata
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ key
+ bytes
+
+ store key of metadata without clientID-prefix
+
+
+
+ value
+ bytes
+
+ metadata value
+
+
+
+
+
+
+
+
+
+ GenesisState
+ GenesisState defines the ibc client submodule's genesis state.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ clients
+ IdentifiedClientState
+ repeated
+ client states with their corresponding identifiers
+
+
+
+ clients_consensus
+ ClientConsensusStates
+ repeated
+ consensus states from each client
+
+
+
+ clients_metadata
+ IdentifiedGenesisMetadata
+ repeated
+ metadata from each client
+
+
+
+ params
+ Params
+
+
+
+
+
+ create_localhost
+ bool
+
+ create localhost on initialization
+
+
+
+ next_client_sequence
+ uint64
+
+ the sequence for the next generated client identifier
+
+
+
+
+
+
+
+
+
+
+ IdentifiedGenesisMetadata has the client metadata with the corresponding
client id.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+
+
+
+
+ client_metadata
+ GenesisMetadata
+ repeated
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/client/v1/query.proto Top
+
+
+
+
+ QueryClientParamsRequest
+ QueryClientParamsRequest is the request type for the Query/ClientParams RPC
method.
+
+
+
+
+
+ QueryClientParamsResponse
+ QueryClientParamsResponse is the response type for the Query/ClientParams RPC
method.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ params
+ Params
+
+ params defines the parameters of the module.
+
+
+
+
+
+
+
+
+
+ QueryClientStateRequest
+ QueryClientStateRequest is the request type for the Query/ClientState RPC
method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client state unique identifier
+
+
+
+
+
+
+
+
+
+ QueryClientStateResponse
+ QueryClientStateResponse is the response type for the Query/ClientState RPC
method. Besides the client state, it includes a proof and the height from
which the proof was retrieved.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_state
+ google.protobuf.Any
+
+ client state associated with the request identifier
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryClientStatesRequest
+ QueryClientStatesRequest is the request type for the Query/ClientStates RPC
method
+
+
+
+
+
+
+
+
+ QueryClientStatesResponse
+ QueryClientStatesResponse is the response type for the Query/ClientStates RPC
method.
+
+
+
+
+
+
+
+
+ QueryConsensusStateRequest
+ QueryConsensusStateRequest is the request type for the Query/ConsensusState
RPC method. Besides the consensus state, it includes a proof and the height
from which the proof was retrieved.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client identifier
+
+
+
+ revision_number
+ uint64
+
+ consensus state revision number
+
+
+
+ revision_height
+ uint64
+
+ consensus state revision height
+
+
+
+ latest_height
+ bool
+
+ latest_height overrrides the height field and queries the latest stored
+ConsensusState
+
+
+
+
+
+
+
+
+
+ QueryConsensusStateResponse
+ QueryConsensusStateResponse is the response type for the Query/ConsensusState
RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ consensus_state
+ google.protobuf.Any
+
+ consensus state associated with the client identifier at the given height
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryConsensusStatesRequest
+ QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
RPC method.
+
+
+
+
+
+
+
+
+ QueryConsensusStatesResponse
+ QueryConsensusStatesResponse is the response type for the
Query/ConsensusStates RPC method
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Query
+ Query provides defines the gRPC querier service
+
+
+
+
+
+ Methods with HTTP bindings
+
+
+
+ Method Name
+ Method
+ Pattern
+ Body
+
+
+
+
+
+
+
+ ClientState
+ GET
+ /ibc/core/client/v1/client_states/{client_id}
+
+
+
+
+
+
+
+ ClientStates
+ GET
+ /ibc/core/client/v1/client_states
+
+
+
+
+
+
+
+ ConsensusState
+ GET
+ /ibc/core/client/v1/consensus_states/{client_id}/revision/{revision_number}/height/{revision_height}
+
+
+
+
+
+
+
+ ConsensusStates
+ GET
+ /ibc/core/client/v1/consensus_states/{client_id}
+
+
+
+
+
+
+
+ ClientParams
+ GET
+ /ibc/client/v1/params
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/client/v1/tx.proto Top
+
+
+
+
+ MsgCreateClient
+ MsgCreateClient defines a message to create an IBC client
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_state
+ google.protobuf.Any
+
+ light client state
+
+
+
+ consensus_state
+ google.protobuf.Any
+
+ consensus state associated with the client that corresponds to a given
+height.
+
+
+
+ signer
+ string
+
+ signer address
+
+
+
+
+
+
+
+
+
+ MsgCreateClientResponse
+ MsgCreateClientResponse defines the Msg/CreateClient response type.
+
+
+
+
+
+ MsgSubmitMisbehaviour
+ MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
light client misbehaviour.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client unique identifier
+
+
+
+ misbehaviour
+ google.protobuf.Any
+
+ misbehaviour used for freezing the light client
+
+
+
+ signer
+ string
+
+ signer address
+
+
+
+
+
+
+
+
+
+ MsgSubmitMisbehaviourResponse
+ MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response
type.
+
+
+
+
+
+ MsgUpdateClient
+ MsgUpdateClient defines an sdk.Msg to update a IBC client state using
the given header.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client unique identifier
+
+
+
+ header
+ google.protobuf.Any
+
+ header to update the light client
+
+
+
+ signer
+ string
+
+ signer address
+
+
+
+
+
+
+
+
+
+ MsgUpdateClientResponse
+ MsgUpdateClientResponse defines the Msg/UpdateClient response type.
+
+
+
+
+
+ MsgUpgradeClient
+ MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
state
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client unique identifier
+
+
+
+ client_state
+ google.protobuf.Any
+
+ upgraded client state
+
+
+
+ consensus_state
+ google.protobuf.Any
+
+ upgraded consensus state, only contains enough information to serve as a
+basis of trust in update logic
+
+
+
+ proof_upgrade_client
+ bytes
+
+ proof that old chain committed to new client
+
+
+
+ proof_upgrade_consensus_state
+ bytes
+
+ proof that old chain committed to new consensus state
+
+
+
+ signer
+ string
+
+ signer address
+
+
+
+
+
+
+
+
+
+ MsgUpgradeClientResponse
+ MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.
+
+
+
+
+
+
+
+
+
+
+
+ Msg
+ Msg defines the ibc/client Msg service.
+
+
+
+
+
+
+
ibcgo/core/commitment/v1/commitment.proto Top
+
+
+
+
+ MerklePath
+ MerklePath is the path used to verify commitment proofs, which can be an
arbitrary structured object (defined by a commitment type).
MerklePath is represented from root-to-leaf
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ key_path
+ string
+ repeated
+
+
+
+
+
+
+
+
+
+
+ MerklePrefix
+ MerklePrefix is merkle path prefixed to the key.
The constructed key from the Path and the key will be append(Path.KeyPath,
append(Path.KeyPrefix, key...))
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ key_prefix
+ bytes
+
+
+
+
+
+
+
+
+
+
+
+ MerkleProof
+ MerkleProof is a wrapper type over a chain of CommitmentProofs.
It demonstrates membership or non-membership for an element or set of
elements, verifiable in conjunction with a known commitment root. Proofs
should be succinct.
MerkleProofs are ordered from leaf-to-root
+
+
+
+
+
+
+
+
+ MerkleRoot
+ MerkleRoot defines a merkle root hash.
In the Cosmos SDK, the AppHash of a block header becomes the root.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ hash
+ bytes
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/connection/v1/connection.proto Top
+
+
+
+
+ ClientPaths
+ ClientPaths define all the connection paths for a client state.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ paths
+ string
+ repeated
+ list of connection paths
+
+
+
+
+
+
+
+
+
+ ConnectionEnd
+ ConnectionEnd defines a stateful object on a chain connected to another
separate one.
NOTE: there must only be 2 defined ConnectionEnds to establish
a connection between two chains.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client associated with this connection.
+
+
+
+ versions
+ Version
+ repeated
+ IBC version which can be utilised to determine encodings or protocols for
+channels or packets utilising this connection.
+
+
+
+ state
+ State
+
+ current state of the connection end.
+
+
+
+ counterparty
+ Counterparty
+
+ counterparty chain associated with this connection.
+
+
+
+ delay_period
+ uint64
+
+ delay period that must pass before a consensus state can be used for
+packet-verification NOTE: delay period logic is only implemented by some
+clients.
+
+
+
+
+
+
+
+
+
+ ConnectionPaths
+ ConnectionPaths define all the connection paths for a given client state.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client state unique identifier
+
+
+
+ paths
+ string
+ repeated
+ list of connection paths
+
+
+
+
+
+
+
+
+
+ Counterparty
+ Counterparty defines the counterparty chain associated with a connection end.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ identifies the client on the counterparty chain associated with a given
+connection.
+
+
+
+ connection_id
+ string
+
+ identifies the connection end on the counterparty chain associated with a
+given connection.
+
+
+
+ prefix
+ ibcgo.core.commitment.v1.MerklePrefix
+
+ commitment merkle prefix of the counterparty chain.
+
+
+
+
+
+
+
+
+
+ IdentifiedConnection
+ IdentifiedConnection defines a connection with additional connection
identifier field.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ id
+ string
+
+ connection identifier.
+
+
+
+ client_id
+ string
+
+ client associated with this connection.
+
+
+
+ versions
+ Version
+ repeated
+ IBC version which can be utilised to determine encodings or protocols for
+channels or packets utilising this connection
+
+
+
+ state
+ State
+
+ current state of the connection end.
+
+
+
+ counterparty
+ Counterparty
+
+ counterparty chain associated with this connection.
+
+
+
+ delay_period
+ uint64
+
+ delay period associated with this connection.
+
+
+
+
+
+
+
+
+
+ Version
+ Version defines the versioning scheme used to negotiate the IBC verison in
the connection handshake.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ identifier
+ string
+
+ unique version identifier
+
+
+
+ features
+ string
+ repeated
+ list of features compatible with the specified identifier
+
+
+
+
+
+
+
+
+
+
+
+ State
+ State defines if a connection is in one of the following states:
INIT, TRYOPEN, OPEN or UNINITIALIZED.
+
+
+ Name Number Description
+
+
+
+
+ STATE_UNINITIALIZED_UNSPECIFIED
+ 0
+ Default State
+
+
+
+ STATE_INIT
+ 1
+ A connection end has just started the opening handshake.
+
+
+
+ STATE_TRYOPEN
+ 2
+ A connection end has acknowledged the handshake step on the counterparty
+chain.
+
+
+
+ STATE_OPEN
+ 3
+ A connection end has completed the handshake.
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/connection/v1/genesis.proto Top
+
+
+
+
+ GenesisState
+ GenesisState defines the ibc connection submodule's genesis state.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connections
+ IdentifiedConnection
+ repeated
+
+
+
+
+ client_connection_paths
+ ConnectionPaths
+ repeated
+
+
+
+
+ next_connection_sequence
+ uint64
+
+ the sequence for the next generated connection identifier
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/connection/v1/query.proto Top
+
+
+
+
+ QueryClientConnectionsRequest
+ QueryClientConnectionsRequest is the request type for the
Query/ClientConnections RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client identifier associated with a connection
+
+
+
+
+
+
+
+
+
+ QueryClientConnectionsResponse
+ QueryClientConnectionsResponse is the response type for the
Query/ClientConnections RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection_paths
+ string
+ repeated
+ slice of all the connection paths associated with a client.
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was generated
+
+
+
+
+
+
+
+
+
+ QueryConnectionClientStateRequest
+ QueryConnectionClientStateRequest is the request type for the
Query/ConnectionClientState RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection_id
+ string
+
+ connection identifier
+
+
+
+
+
+
+
+
+
+ QueryConnectionClientStateResponse
+ QueryConnectionClientStateResponse is the response type for the
Query/ConnectionClientState RPC method
+
+
+
+
+
+
+
+
+ QueryConnectionConsensusStateRequest
+ QueryConnectionConsensusStateRequest is the request type for the
Query/ConnectionConsensusState RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection_id
+ string
+
+ connection identifier
+
+
+
+ revision_number
+ uint64
+
+
+
+
+
+ revision_height
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+ QueryConnectionConsensusStateResponse
+ QueryConnectionConsensusStateResponse is the response type for the
Query/ConnectionConsensusState RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ consensus_state
+ google.protobuf.Any
+
+ consensus state associated with the channel
+
+
+
+ client_id
+ string
+
+ client ID associated with the consensus state
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryConnectionRequest
+ QueryConnectionRequest is the request type for the Query/Connection RPC
method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection_id
+ string
+
+ connection unique identifier
+
+
+
+
+
+
+
+
+
+ QueryConnectionResponse
+ QueryConnectionResponse is the response type for the Query/Connection RPC
method. Besides the connection end, it includes a proof and the height from
which the proof was retrieved.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection
+ ConnectionEnd
+
+ connection associated with the request identifier
+
+
+
+ proof
+ bytes
+
+ merkle proof of existence
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+ height at which the proof was retrieved
+
+
+
+
+
+
+
+
+
+ QueryConnectionsRequest
+ QueryConnectionsRequest is the request type for the Query/Connections RPC
method
+
+
+
+
+
+
+
+
+ QueryConnectionsResponse
+ QueryConnectionsResponse is the response type for the Query/Connections RPC
method.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Query
+ Query provides defines the gRPC querier service
+
+
+
+
+
+ Methods with HTTP bindings
+
+
+
+ Method Name
+ Method
+ Pattern
+ Body
+
+
+
+
+
+
+
+ Connection
+ GET
+ /ibc/core/connection/v1/connections/{connection_id}
+
+
+
+
+
+
+
+ Connections
+ GET
+ /ibc/core/connection/v1/connections
+
+
+
+
+
+
+
+ ClientConnections
+ GET
+ /ibc/core/connection/v1/client_connections/{client_id}
+
+
+
+
+
+
+
+ ConnectionClientState
+ GET
+ /ibc/core/connection/v1/connections/{connection_id}/client_state
+
+
+
+
+
+
+
+ ConnectionConsensusState
+ GET
+ /ibc/core/connection/v1/connections/{connection_id}/consensus_state/revision/{revision_number}/height/{revision_height}
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/core/connection/v1/tx.proto Top
+
+
+
+
+ MsgConnectionOpenAck
+ MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
acknowledge the change of connection state to TRYOPEN on Chain B.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection_id
+ string
+
+
+
+
+
+ counterparty_connection_id
+ string
+
+
+
+
+
+ version
+ Version
+
+
+
+
+
+ client_state
+ google.protobuf.Any
+
+
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+
+
+
+
+ proof_try
+ bytes
+
+ proof of the initialization the connection on Chain B: `UNITIALIZED ->
+TRYOPEN`
+
+
+
+ proof_client
+ bytes
+
+ proof of client state included in message
+
+
+
+ proof_consensus
+ bytes
+
+ proof of client consensus state
+
+
+
+ consensus_height
+ ibcgo.core.client.v1.Height
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgConnectionOpenAckResponse
+ MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.
+
+
+
+
+
+ MsgConnectionOpenConfirm
+ MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
acknowledge the change of connection state to OPEN on Chain A.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ connection_id
+ string
+
+
+
+
+
+ proof_ack
+ bytes
+
+ proof for the change of the connection state on Chain A: `INIT -> OPEN`
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgConnectionOpenConfirmResponse
+ MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm
response type.
+
+
+
+
+
+ MsgConnectionOpenInit
+ MsgConnectionOpenInit defines the msg sent by an account on Chain A to
initialize a connection with Chain B.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+
+
+
+
+ counterparty
+ Counterparty
+
+
+
+
+
+ version
+ Version
+
+
+
+
+
+ delay_period
+ uint64
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgConnectionOpenInitResponse
+ MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
type.
+
+
+
+
+
+ MsgConnectionOpenTry
+ MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
connection on Chain B.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+
+
+
+
+ previous_connection_id
+ string
+
+ in the case of crossing hello's, when both chains call OpenInit, we need
+the connection identifier of the previous connection in state INIT
+
+
+
+ client_state
+ google.protobuf.Any
+
+
+
+
+
+ counterparty
+ Counterparty
+
+
+
+
+
+ delay_period
+ uint64
+
+
+
+
+
+ counterparty_versions
+ Version
+ repeated
+
+
+
+
+ proof_height
+ ibcgo.core.client.v1.Height
+
+
+
+
+
+ proof_init
+ bytes
+
+ proof of the initialization the connection on Chain A: `UNITIALIZED ->
+INIT`
+
+
+
+ proof_client
+ bytes
+
+ proof of client state included in message
+
+
+
+ proof_consensus
+ bytes
+
+ proof of client consensus state
+
+
+
+ consensus_height
+ ibcgo.core.client.v1.Height
+
+
+
+
+
+ signer
+ string
+
+
+
+
+
+
+
+
+
+
+
+ MsgConnectionOpenTryResponse
+ MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.
+
+
+
+
+
+
+
+
+
+
+
+ Msg
+ Msg defines the ibc/connection Msg service.
+
+
+
+
+
+
+
ibcgo/core/types/v1/genesis.proto Top
+
+
+
+
+ GenesisState
+ GenesisState defines the ibc module's genesis state.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/lightclients/localhost/v1/localhost.proto Top
+
+
+
+
+ ClientState
+ ClientState defines a loopback (localhost) client. It requires (read-only)
access to keys outside the client prefix.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/lightclients/solomachine/v1/solomachine.proto Top
+
+
+
+
+ ChannelStateData
+ ChannelStateData returns the SignBytes data for channel state
verification.
+
+
+
+
+
+
+
+
+ ClientState
+ ClientState defines a solo machine client that tracks the current consensus
state and if the client is frozen.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ sequence
+ uint64
+
+ latest sequence of the client state
+
+
+
+ frozen_sequence
+ uint64
+
+ frozen sequence of the solo machine
+
+
+
+ consensus_state
+ ConsensusState
+
+
+
+
+
+ allow_update_after_proposal
+ bool
+
+ when set to true, will allow governance to update a solo machine client.
+The client will be unfrozen if it is frozen.
+
+
+
+
+
+
+
+
+
+ ClientStateData
+ ClientStateData returns the SignBytes data for client state verification.
+
+
+
+
+
+
+
+
+ ConnectionStateData
+ ConnectionStateData returns the SignBytes data for connection state
verification.
+
+
+
+
+
+
+
+
+ ConsensusState
+ ConsensusState defines a solo machine consensus state. The sequence of a
consensus state is contained in the "height" key used in storing the
consensus state.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ public_key
+ google.protobuf.Any
+
+ public key of the solo machine
+
+
+
+ diversifier
+ string
+
+ diversifier allows the same public key to be re-used across different solo
+machine clients (potentially on different chains) without being considered
+misbehaviour.
+
+
+
+ timestamp
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+ ConsensusStateData
+ ConsensusStateData returns the SignBytes data for consensus state
verification.
+
+
+
+
+
+
+
+
+
+ Header defines a solo machine consensus header
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ sequence
+ uint64
+
+ sequence to update solo machine public key at
+
+
+
+ timestamp
+ uint64
+
+
+
+
+
+ signature
+ bytes
+
+
+
+
+
+ new_public_key
+ google.protobuf.Any
+
+
+
+
+
+ new_diversifier
+ string
+
+
+
+
+
+
+
+
+
+
+
+
+ HeaderData returns the SignBytes data for update verification.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ new_pub_key
+ google.protobuf.Any
+
+ header public key
+
+
+
+ new_diversifier
+ string
+
+ header diversifier
+
+
+
+
+
+
+
+
+
+ Misbehaviour
+ Misbehaviour defines misbehaviour for a solo machine which consists
of a sequence and two signatures over different messages at that sequence.
+
+
+
+
+
+
+
+
+ NextSequenceRecvData
+ NextSequenceRecvData returns the SignBytes data for verification of the next
sequence to be received.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ path
+ bytes
+
+
+
+
+
+ next_seq_recv
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+ PacketAcknowledgementData
+ PacketAcknowledgementData returns the SignBytes data for acknowledgement
verification.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ path
+ bytes
+
+
+
+
+
+ acknowledgement
+ bytes
+
+
+
+
+
+
+
+
+
+
+
+ PacketCommitmentData
+ PacketCommitmentData returns the SignBytes data for packet commitment
verification.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ path
+ bytes
+
+
+
+
+
+ commitment
+ bytes
+
+
+
+
+
+
+
+
+
+
+
+ PacketReceiptAbsenceData
+ PacketReceiptAbsenceData returns the SignBytes data for
packet receipt absence verification.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ path
+ bytes
+
+
+
+
+
+
+
+
+
+
+
+ SignBytes
+ SignBytes defines the signed bytes used for signature verification.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ sequence
+ uint64
+
+
+
+
+
+ timestamp
+ uint64
+
+
+
+
+
+ diversifier
+ string
+
+
+
+
+
+ data_type
+ DataType
+
+ type of the data used
+
+
+
+ data
+ bytes
+
+ marshaled data
+
+
+
+
+
+
+
+
+
+ SignatureAndData
+ SignatureAndData contains a signature and the data signed over to create that
signature.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ signature
+ bytes
+
+
+
+
+
+ data_type
+ DataType
+
+
+
+
+
+ data
+ bytes
+
+
+
+
+
+ timestamp
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+ TimestampedSignatureData
+ TimestampedSignatureData contains the signature data and the timestamp of the
signature.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ signature_data
+ bytes
+
+
+
+
+
+ timestamp
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+
+
+ DataType
+ DataType defines the type of solo machine proof being created. This is done
to preserve uniqueness of different data sign byte encodings.
+
+
+ Name Number Description
+
+
+
+
+ DATA_TYPE_UNINITIALIZED_UNSPECIFIED
+ 0
+ Default State
+
+
+
+ DATA_TYPE_CLIENT_STATE
+ 1
+ Data type for client state verification
+
+
+
+ DATA_TYPE_CONSENSUS_STATE
+ 2
+ Data type for consensus state verification
+
+
+
+ DATA_TYPE_CONNECTION_STATE
+ 3
+ Data type for connection state verification
+
+
+
+ DATA_TYPE_CHANNEL_STATE
+ 4
+ Data type for channel state verification
+
+
+
+ DATA_TYPE_PACKET_COMMITMENT
+ 5
+ Data type for packet commitment verification
+
+
+
+ DATA_TYPE_PACKET_ACKNOWLEDGEMENT
+ 6
+ Data type for packet acknowledgement verification
+
+
+
+ DATA_TYPE_PACKET_RECEIPT_ABSENCE
+ 7
+ Data type for packet receipt absence verification
+
+
+
+ DATA_TYPE_NEXT_SEQUENCE_RECV
+ 8
+ Data type for next sequence recv verification
+
+
+
+ DATA_TYPE_HEADER
+ 9
+ Data type for header verification
+
+
+
+
+
+
+
+
+
+
+
+
+
ibcgo/lightclients/tendermint/v1/tendermint.proto Top
+
+
+
+
+ ClientState
+ ClientState from Tendermint tracks the current validator set, latest height,
and a possible frozen height.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ chain_id
+ string
+
+
+
+
+
+ trust_level
+ Fraction
+
+
+
+
+
+ trusting_period
+ google.protobuf.Duration
+
+ duration of the period since the LastestTimestamp during which the
+submitted headers are valid for upgrade
+
+
+
+ unbonding_period
+ google.protobuf.Duration
+
+ duration of the staking unbonding period
+
+
+
+ max_clock_drift
+ google.protobuf.Duration
+
+ defines how much new (untrusted) header's Time can drift into the future.
+
+
+
+ frozen_height
+ ibcgo.core.client.v1.Height
+
+ Block height when the client was frozen due to a misbehaviour
+
+
+
+ latest_height
+ ibcgo.core.client.v1.Height
+
+ Latest height the client was updated to
+
+
+
+ proof_specs
+ ics23.ProofSpec
+ repeated
+ Proof specifications used in verifying counterparty state
+
+
+
+ upgrade_path
+ string
+ repeated
+ Path at which next upgraded client will be committed.
+Each element corresponds to the key for a single CommitmentProof in the
+chained proof. NOTE: ClientState must stored under
+`{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored
+under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using
+the default upgrade module, upgrade_path should be []string{"upgrade",
+"upgradedIBCState"}`
+
+
+
+ allow_update_after_expiry
+ bool
+
+ This flag, when set to true, will allow governance to recover a client
+which has expired
+
+
+
+ allow_update_after_misbehaviour
+ bool
+
+ This flag, when set to true, will allow governance to unfreeze a client
+whose chain has experienced a misbehaviour event
+
+
+
+
+
+
+
+
+
+ ConsensusState
+ ConsensusState defines the consensus state from Tendermint.
+
+
+
+
+
+
+
+
+ Fraction
+ Fraction defines the protobuf message type for tmmath.Fraction that only
supports positive values.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ numerator
+ uint64
+
+
+
+
+
+ denominator
+ uint64
+
+
+
+
+
+
+
+
+
+
+
+
+ Header defines the Tendermint client consensus Header.
It encapsulates all the information necessary to update from a trusted
Tendermint ConsensusState. The inclusion of TrustedHeight and
TrustedValidators allows this update to process correctly, so long as the
ConsensusState for the TrustedHeight exists, this removes race conditions
among relayers The SignedHeader and ValidatorSet are the new untrusted update
fields for the client. The TrustedHeight is the height of a stored
ConsensusState on the client that will be used to verify the new untrusted
header. The Trusted ConsensusState must be within the unbonding period of
current time in order to correctly verify, and the TrustedValidators must
hash to TrustedConsensusState.NextValidatorsHash since that is the last
trusted validator set at the TrustedHeight.
+
+
+
+
+
+
+
+
+ Misbehaviour
+ Misbehaviour is a wrapper over two conflicting Headers
that implements Misbehaviour interface expected by ICS-02
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+
+
+
+
+ header_1
+ Header
+
+
+
+
+
+ header_2
+ Header
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Scalar Value Types
+
+
+ .proto Type Notes C++ Java Python Go C# PHP Ruby
+
+
+
+
+ double
+
+ double
+ double
+ float
+ float64
+ double
+ float
+ Float
+
+
+
+ float
+
+ float
+ float
+ float
+ float32
+ float
+ float
+ Float
+
+
+
+ int32
+ Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead.
+ int32
+ int
+ int
+ int32
+ int
+ integer
+ Bignum or Fixnum (as required)
+
+
+
+ int64
+ Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead.
+ int64
+ long
+ int/long
+ int64
+ long
+ integer/string
+ Bignum
+
+
+
+ uint32
+ Uses variable-length encoding.
+ uint32
+ int
+ int/long
+ uint32
+ uint
+ integer
+ Bignum or Fixnum (as required)
+
+
+
+ uint64
+ Uses variable-length encoding.
+ uint64
+ long
+ int/long
+ uint64
+ ulong
+ integer/string
+ Bignum or Fixnum (as required)
+
+
+
+ sint32
+ Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s.
+ int32
+ int
+ int
+ int32
+ int
+ integer
+ Bignum or Fixnum (as required)
+
+
+
+ sint64
+ Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s.
+ int64
+ long
+ int/long
+ int64
+ long
+ integer/string
+ Bignum
+
+
+
+ fixed32
+ Always four bytes. More efficient than uint32 if values are often greater than 2^28.
+ uint32
+ int
+ int
+ uint32
+ uint
+ integer
+ Bignum or Fixnum (as required)
+
+
+
+ fixed64
+ Always eight bytes. More efficient than uint64 if values are often greater than 2^56.
+ uint64
+ long
+ int/long
+ uint64
+ ulong
+ integer/string
+ Bignum
+
+
+
+ sfixed32
+ Always four bytes.
+ int32
+ int
+ int
+ int32
+ int
+ integer
+ Bignum or Fixnum (as required)
+
+
+
+ sfixed64
+ Always eight bytes.
+ int64
+ long
+ int/long
+ int64
+ long
+ integer/string
+ Bignum
+
+
+
+ bool
+
+ bool
+ boolean
+ boolean
+ bool
+ bool
+ boolean
+ TrueClass/FalseClass
+
+
+
+ string
+ A string must always contain UTF-8 encoded or 7-bit ASCII text.
+ string
+ String
+ str/unicode
+ string
+ string
+ string
+ String (UTF-8)
+
+
+
+ bytes
+ May contain any arbitrary sequence of bytes.
+ string
+ ByteString
+ str
+ []byte
+ ByteString
+ string
+ String (ASCII-8BIT)
+
+
+
+
+
+
+
diff --git a/docs/protodoc-markdown.tmpl b/docs/protodoc-markdown.tmpl
new file mode 100644
index 00000000..e69de29b
diff --git a/go.mod b/go.mod
new file mode 100644
index 00000000..34d93acc
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,23 @@
+go 1.15
+
+module github.com/cosmos/ibc-go
+
+replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
+
+require (
+ github.com/armon/go-metrics v0.3.6
+ github.com/confio/ics23/go v0.6.3
+ github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733
+ github.com/gogo/protobuf v1.3.3
+ github.com/golang/protobuf v1.4.3
+ github.com/gorilla/mux v1.8.0
+ github.com/grpc-ecosystem/grpc-gateway v1.16.0
+ github.com/pkg/errors v0.9.1
+ github.com/spf13/cobra v1.1.3
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.7.0
+ github.com/tendermint/tendermint v0.34.7
+ github.com/tendermint/tm-db v0.6.4
+ google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
+ google.golang.org/grpc v1.35.0
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 00000000..667a3c40
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,1035 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/99designs/keyring v1.1.6 h1:kVDC2uCgVwecxCk+9zoCt2uEL6dt+dfVzMvGgnVcIuM=
+github.com/99designs/keyring v1.1.6/go.mod h1:16e0ds7LGQQcT59QqkTg72Hh5ShM51Byv5PEmW6uoRU=
+github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
+github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
+github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg=
+github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
+github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8=
+github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/Workiva/go-datastructures v1.0.52 h1:PLSK6pwn8mYdaoaCZEMsXBpBotr4HHn9abU0yMQt0NI=
+github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
+github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.6 h1:x/tmtOF9cDBoXH7XoAGOz2qqm1DknFD1590XmD/DUJ8=
+github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
+github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0=
+github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
+github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M=
+github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94=
+github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
+github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
+github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
+github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
+github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
+github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
+github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
+github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
+github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
+github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
+github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
+github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/coinbase/rosetta-sdk-go v0.5.8/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM=
+github.com/coinbase/rosetta-sdk-go v0.5.9 h1:CuGQE3HFmYwdEACJnuOtVI9cofqPsGvq6FdFIzaOPKI=
+github.com/coinbase/rosetta-sdk-go v0.5.9/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM=
+github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
+github.com/confio/ics23/go v0.6.3 h1:PuGK2V1NJWZ8sSkNDq91jgT/cahFEW9RGp4Y5jxulf0=
+github.com/confio/ics23/go v0.6.3/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733 h1:C6B8uY33CgpL3yJt1vxOUFRPDtAjEIjbaFLL0av/8Y0=
+github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733/go.mod h1:J7BQ+xrmuiF5xG+F/Ep+d30XUQmlpIjcPX4Lp0u4oks=
+github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
+github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
+github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
+github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd/go.mod h1:3xOIaNNX19p0QrX0VqWa6voPRoJRGGYtny+DH8NEPvE=
+github.com/cosmos/iavl v0.15.0-rc5/go.mod h1:WqoPL9yPTQ85QBMT45OOUzPxG/U/JcJoN7uMjgxke/I=
+github.com/cosmos/iavl v0.15.3 h1:xE9r6HW8GeKeoYJN4zefpljZ1oukVScP/7M8oj6SUts=
+github.com/cosmos/iavl v0.15.3/go.mod h1:OLjQiAQ4fGD2KDZooyJG9yz+p2ao2IAYSbke8mVvSA4=
+github.com/cosmos/ledger-cosmos-go v0.11.1 h1:9JIYsGnXP613pb2vPjFeMMjBI5lEDsEaF6oYorTy6J4=
+github.com/cosmos/ledger-cosmos-go v0.11.1/go.mod h1:J8//BsAGTo3OC/vDLjMRFLW6q0WAaXvHnVc7ZmE8iUY=
+github.com/cosmos/ledger-go v0.9.2 h1:Nnao/dLwaVTk1Q5U9THldpUMMXU94BOTWPddSmVB6pI=
+github.com/cosmos/ledger-go v0.9.2/go.mod h1:oZJ2hHAZROdlHiwTg4t7kP+GKIIkBT+o6c9QWFanOyI=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/danieljoos/wincred v1.0.2 h1:zf4bhty2iLuwgjgpraD2E9UbvO+fe54XXGJbOwe23fU=
+github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U=
+github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
+github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I=
+github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
+github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
+github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
+github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
+github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
+github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI=
+github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
+github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
+github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b h1:HBah4D48ypg3J7Np4N+HY/ZR76fx3HEUGxDU6Uk39oQ=
+github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
+github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/enigmampc/btcutil v1.0.3-0.20200723161021-e2fb6adb2a25 h1:2vLKys4RBU4pn2T/hjXMbvwTr1Cvy5THHrQkbeY9HRk=
+github.com/enigmampc/btcutil v1.0.3-0.20200723161021-e2fb6adb2a25/go.mod h1:hTr8+TLQmkUkgcuh3mcr5fjrT9c64ZzsBCdCEC6UppY=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/ethereum/go-ethereum v1.9.23/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM=
+github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
+github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
+github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
+github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
+github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
+github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
+github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
+github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
+github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
+github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
+github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
+github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
+github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
+github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
+github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0=
+github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
+github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
+github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
+github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.1/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 h1:FlFbCRLd5Jr4iYXZufAvgWN6Ao0JrI5chLINnUXDDr0=
+github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.14.7/go.mod h1:oYZKL012gGh6LMyg/xA7Q2yq6j8bu0wa+9w14EEthWU=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU=
+github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0=
+github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
+github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is=
+github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s=
+github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc=
+github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
+github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
+github.com/improbable-eng/grpc-web v0.14.0 h1:GdoK+cXABdB+1keuqsV1drSFO2XLYIxqt/4Rj8SWGBk=
+github.com/improbable-eng/grpc-web v0.14.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
+github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM=
+github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
+github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA=
+github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
+github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=
+github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY=
+github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
+github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0=
+github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM=
+github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
+github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs=
+github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
+github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/otiai10/copy v1.5.0 h1:SoXDGnlTUZoqB/wSuj/Y5L6T5i6iN4YRAcMCd+JnLNU=
+github.com/otiai10/copy v1.5.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
+github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
+github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=
+github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
+github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
+github.com/otiai10/mint v1.3.2 h1:VYWnrP5fXmz1MXvjuUvcBrXSjGE6xjON+axB/UrpO3E=
+github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
+github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
+github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
+github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
+github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
+github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
+github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA=
+github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
+github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
+github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/regen-network/cosmos-proto v0.3.1 h1:rV7iM4SSFAagvy8RiyhiACbWEGotmqzywPxOvwMdxcg=
+github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNwWejXC7QqCOnH3O0+YM=
+github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4=
+github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI=
+github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
+github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
+github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10=
+github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4=
+github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc=
+github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
+github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
+github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
+github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
+github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk=
+github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
+github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
+github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
+github.com/tendermint/btcd v0.1.1 h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s=
+github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrnDD5pN+U=
+github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2 h1:crekJuQ57yIBDuKd3/dMJ00ZvOHURuv9RGJSi2hWTW4=
+github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2/go.mod h1:gBPw8WV2Erm4UGHlBRiM3zaEBst4bsuihmMCNQdgP/s=
+github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI=
+github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15/go.mod h1:z4YtwM70uOnk8h0pjJYlj3zdYwi9l03By6iAIF5j/Pk=
+github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E=
+github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME=
+github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4=
+github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg=
+github.com/tendermint/tendermint v0.34.0/go.mod h1:Aj3PIipBFSNO21r+Lq3TtzQ+uKESxkbA3yo/INM4QwQ=
+github.com/tendermint/tendermint v0.34.7 h1:lvBJFNqpDuEzKfLZKtUXOL5dMOpqHonHlO6LCujyl6E=
+github.com/tendermint/tendermint v0.34.7/go.mod h1:JVuu3V1ZexOaZG8VJMRl8lnfrGw6hEB2TVnoUwKRbss=
+github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI=
+github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8=
+github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ=
+github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw=
+github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0=
+github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
+github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tidwall/sjson v1.1.2/go.mod h1:SEzaDwxiPzKzNfUEO4HbYF/m4UCSJDsGgNqsS1LvdoY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vmihailenco/msgpack/v5 v5.0.0-beta.9/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo=
+github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
+github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
+github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88=
+golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs=
+golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73 h1:+yTMTeazSO5iBqU9NR53hgriivQQbYa5Uuaj8r3qKII=
+google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201119123407-9b1e624d6bc4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f h1:izedQ6yVIc5mZsRuXzmSreCOlzI0lCU1HpG8yEdMiKw=
+google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
+gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10=
+gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
+gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k=
+nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/light-clients/06-solomachine/doc.go b/light-clients/06-solomachine/doc.go
new file mode 100644
index 00000000..3673f3c3
--- /dev/null
+++ b/light-clients/06-solomachine/doc.go
@@ -0,0 +1,7 @@
+/*
+Package solomachine implements a concrete `ConsensusState`, `Header`,
+`Misbehaviour` and `Equivocation` types for the Solo Machine light client.
+This implementation is based off the ICS 06 specification:
+https://github.com/cosmos/ics/tree/master/spec/ics-006-solo-machine-client
+*/
+package solomachine
diff --git a/light-clients/06-solomachine/module.go b/light-clients/06-solomachine/module.go
new file mode 100644
index 00000000..bafbd015
--- /dev/null
+++ b/light-clients/06-solomachine/module.go
@@ -0,0 +1,10 @@
+package solomachine
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+)
+
+// Name returns the solo machine client name.
+func Name() string {
+ return types.SubModuleName
+}
diff --git a/light-clients/06-solomachine/spec/01_concepts.md b/light-clients/06-solomachine/spec/01_concepts.md
new file mode 100644
index 00000000..de486b71
--- /dev/null
+++ b/light-clients/06-solomachine/spec/01_concepts.md
@@ -0,0 +1,160 @@
+
+
+# Concepts
+
+## Client State
+
+The `ClientState` for a solo machine light client stores the latest sequence, the frozen sequence,
+the latest consensus state, and client flag indicating if the client should be allowed to be updated
+after a governance proposal.
+
+If the client is not frozen then the frozen sequence is 0.
+
+## Consensus State
+
+The consensus states stores the public key, diversifier, and timestamp of the solo machine light client.
+
+The diversifier is used to prevent accidental misbehaviour if the same public key is used across
+different chains with the same client identifier. It should be unique to the chain the light client
+is used on.
+
+## Public Key
+
+The public key can be a single public key or a multi-signature public key. The public key type used
+must fulfill the tendermint public key interface (this will become the SDK public key interface in the
+near future). The public key must be registered on the application codec otherwise encoding/decoding
+errors will arise. The public key stored in the consensus state is represented as a protobuf `Any`.
+This allows for flexibility in what other public key types can be supported in the future.
+
+## Counterparty Verification
+
+The solo machine light client can verify counterparty client state, consensus state, connection state,
+channel state, packet commitments, packet acknowledgements, packet receipt absence,
+and the next sequence receive. At the end of each successful verification call the light
+client sequence number will be incremented.
+
+Successful verification requires the current public key to sign over the proof.
+
+## Proofs
+
+A solo machine proof should verify that the solomachine public key signed
+over some specified data. The format for generating marshaled proofs for
+the SDK's implementation of solo machine is as follows:
+
+1. Construct the data using the associated protobuf definition and marshal it.
+
+For example:
+
+```go
+data := &ClientStateData{
+ Path: []byte(path.String()),
+ ClientState: any,
+}
+
+dataBz, err := cdc.MarshalBinaryBare(data)
+```
+
+The helper functions `...DataBytes()` in [proofs.go](../types/proofs.go) handle this
+functionality.
+
+2. Construct the `SignBytes` and marshal it.
+
+For example:
+
+```go
+signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: CLIENT,
+ Data: dataBz,
+}
+
+signBz, err := cdc.MarshalBinaryBare(signBytes)
+```
+
+The helper functions `...SignBytes()` in [proofs.go](../types/proofs.go) handle this functionality.
+The `DataType` field is used to disambiguate what type of data was signed to prevent potential
+proto encoding overlap.
+
+3. Sign the sign bytes. Embed the signatures into either `SingleSignatureData` or `MultiSignatureData`.
+Convert the `SignatureData` to proto and marshal it.
+
+For example:
+
+```go
+sig, err := key.Sign(signBz)
+sigData := &signing.SingleSignatureData{
+ Signature: sig,
+}
+
+protoSigData := signing.SignatureDataToProto(sigData)
+bz, err := cdc.MarshalBinaryBare(protoSigData)
+```
+
+4. Construct a `TimestampedSignatureData` and marshal it. The marshaled result can be passed in
+as the proof parameter to the verification functions.
+
+For example:
+
+```go
+timestampedSignatureData := &types.TimestampedSignatureData{
+ SignatureData: sigData,
+ Timestamp: solomachine.Time,
+}
+
+proof, err := cdc.MarshalBinaryBare(timestampedSignatureData)
+```
+
+## Updates By Header
+
+An update by a header will only succeed if:
+
+- the header provided is parseable to solo machine header
+- the header sequence matches the current sequence
+- the header timestamp is greater than or equal to the consensus state timestamp
+- the currently registered public key generated the proof
+
+If the update is successful:
+
+- the public key is updated
+- the diversifier is updated
+- the timestamp is updated
+- the sequence is incremented by 1
+- the new consensus state is set in the client state
+
+## Updates By Proposal
+
+An update by a governance proposal will only succeed if:
+
+- the substitute provided is parseable to solo machine client state
+- the `AllowUpdateAfterProposal` client parameter is set to `true`
+- the new consensus state public key does not equal the current consensus state public key
+
+If the update is successful:
+
+- the subject client state is updated to the substitute client state
+- the subject consensus state is updated to the substitute consensus state
+- the client is unfrozen (if it was previously frozen)
+
+## Misbehaviour
+
+Misbehaviour handling will only succeed if:
+
+- the misbehaviour provided is parseable to solo machine misbehaviour
+- the client is not already frozen
+- the current public key signed over two unique data messages at the same sequence and diversifier.
+
+If the misbehaviour is successfully processed:
+
+- the client is frozen by setting the frozen sequence to the misbehaviour sequence
+
+NOTE: Misbehaviour processing is data processing order dependent. A misbehaving solo machine
+could update to a new public key to prevent being frozen before misbehaviour is submitted.
+
+## Upgrades
+
+Upgrades to solo machine light clients are not supported since an entirely different type of
+public key can be set using normal client updates.
diff --git a/light-clients/06-solomachine/spec/02_state.md b/light-clients/06-solomachine/spec/02_state.md
new file mode 100644
index 00000000..a9ff4ea5
--- /dev/null
+++ b/light-clients/06-solomachine/spec/02_state.md
@@ -0,0 +1,12 @@
+
+
+# State
+
+The solo machine light client will only store consensus states for each update by a header
+or a governance proposal. The latest client state is also maintained in the store.
+
+These values can be found under the light client paths defined in the IBC
+[core store specs](../../../core/spec/02_state.md).
+
diff --git a/light-clients/06-solomachine/spec/03_state_transitions.md b/light-clients/06-solomachine/spec/03_state_transitions.md
new file mode 100644
index 00000000..48a1e18f
--- /dev/null
+++ b/light-clients/06-solomachine/spec/03_state_transitions.md
@@ -0,0 +1,39 @@
+
+
+# State Transitions
+
+## Client State Verification Functions
+
+Successful state verification by a solo machine light client will result in:
+
+- the sequence being incremented by 1.
+
+## Update By Header
+
+A successful update of a solo machine light client by a header will result in:
+
+- the public key being updated to the new public key provided by the header.
+- the diversifier being updated to the new diviersifier provided by the header.
+- the timestamp being updated to the new timestamp provided by the header.
+- the sequence being incremented by 1
+- the consensus state being updated (consensus state stores the public key, diversifier, and timestamp)
+
+## Update By Governance Proposal
+
+A successful update of a solo machine light client by a governance proposal will result in:
+
+- the client state being updated to the substitute client state
+- the consensus state being updated to the substitute consensus state (consensus state stores the public key, diversifier, and timestamp)
+- the frozen sequence being set to zero (client is unfrozen if it was previously frozen).
+
+## Upgrade
+
+Client udgrades are not supported for the solo machine light client. No state transition occurs.
+
+## Misbehaviour
+
+Successful misbehaviour processing of a solo machine light client will result in:
+
+- the frozen sequence being set to the sequence the misbehaviour occurred at
diff --git a/light-clients/06-solomachine/spec/04_messages.md b/light-clients/06-solomachine/spec/04_messages.md
new file mode 100644
index 00000000..465ea622
--- /dev/null
+++ b/light-clients/06-solomachine/spec/04_messages.md
@@ -0,0 +1,8 @@
+
+
+# Messages
+
+The messages used to initialize a solo machine light client are defined in the
+core sub-module [02-client](../../../core/spec/04_messages.md).
diff --git a/light-clients/06-solomachine/spec/README.md b/light-clients/06-solomachine/spec/README.md
new file mode 100644
index 00000000..77db1bfe
--- /dev/null
+++ b/light-clients/06-solomachine/spec/README.md
@@ -0,0 +1,26 @@
+
+
+# `solomachine`
+
+## Abstract
+
+This paper defines the implementation of the ICS06 protocol on the Cosmos SDK. For the general
+specification please refer to the [ICS06 Specification](https://github.com/cosmos/ics/tree/master/spec/ics-006-solo-machine-client).
+
+This implementation of a solo machine light client supports single and multi-signature public
+keys. The client is capable of handling public key updates by header and governance proposals.
+The light client is capable of processing client misbehaviour. Proofs of the counterparty state
+are generated by the solo machine client by signing over the desired state with a certain sequence,
+diversifier, and timestamp.
+
+## Contents
+
+1. **[Concepts](01_concepts.md)**
+2. **[State](02_state.md)**
+3. **[State Transitions](03_state_transitions.md)**
+4. **[Messages](04_messages.md)**
diff --git a/light-clients/06-solomachine/types/client_state.go b/light-clients/06-solomachine/types/client_state.go
new file mode 100644
index 00000000..24a6582f
--- /dev/null
+++ b/light-clients/06-solomachine/types/client_state.go
@@ -0,0 +1,491 @@
+package types
+
+import (
+ "reflect"
+
+ ics23 "github.com/confio/ics23/go"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.ClientState = (*ClientState)(nil)
+
+// NewClientState creates a new ClientState instance.
+func NewClientState(latestSequence uint64, consensusState *ConsensusState, allowUpdateAfterProposal bool) *ClientState {
+ return &ClientState{
+ Sequence: latestSequence,
+ FrozenSequence: 0,
+ ConsensusState: consensusState,
+ AllowUpdateAfterProposal: allowUpdateAfterProposal,
+ }
+}
+
+// ClientType is Solo Machine.
+func (cs ClientState) ClientType() string {
+ return exported.Solomachine
+}
+
+// GetLatestHeight returns the latest sequence number.
+// Return exported.Height to satisfy ClientState interface
+// Revision number is always 0 for a solo-machine.
+func (cs ClientState) GetLatestHeight() exported.Height {
+ return clienttypes.NewHeight(0, cs.Sequence)
+}
+
+// IsFrozen returns true if the client is frozen.
+func (cs ClientState) IsFrozen() bool {
+ return cs.FrozenSequence != 0
+}
+
+// GetFrozenHeight returns the frozen sequence of the client.
+// Return exported.Height to satisfy interface
+// Revision number is always 0 for a solo-machine
+func (cs ClientState) GetFrozenHeight() exported.Height {
+ return clienttypes.NewHeight(0, cs.FrozenSequence)
+}
+
+// GetProofSpecs returns nil proof specs since client state verification uses signatures.
+func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec {
+ return nil
+}
+
+// Validate performs basic validation of the client state fields.
+func (cs ClientState) Validate() error {
+ if cs.Sequence == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "sequence cannot be 0")
+ }
+ if cs.ConsensusState == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be nil")
+ }
+ return cs.ConsensusState.ValidateBasic()
+}
+
+// ZeroCustomFields returns solomachine client state with client-specific fields FrozenSequence,
+// and AllowUpdateAfterProposal zeroed out
+func (cs ClientState) ZeroCustomFields() exported.ClientState {
+ return NewClientState(
+ cs.Sequence, cs.ConsensusState, false,
+ )
+}
+
+// Initialize will check that initial consensus state is equal to the latest consensus state of the initial client.
+func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, consState exported.ConsensusState) error {
+ if !reflect.DeepEqual(cs.ConsensusState, consState) {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "consensus state in initial client does not equal initial consensus state. expected: %s, got: %s",
+ cs.ConsensusState, consState)
+ }
+ return nil
+}
+
+// ExportMetadata is a no-op since solomachine does not store any metadata in client store
+func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata {
+ return nil
+}
+
+// VerifyUpgradeAndUpdateState returns an error since solomachine client does not support upgrades
+func (cs ClientState) VerifyUpgradeAndUpdateState(
+ _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore,
+ _ exported.ClientState, _ exported.ConsensusState, _, _ []byte,
+) (exported.ClientState, exported.ConsensusState, error) {
+ return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade solomachine client")
+}
+
+// VerifyClientState verifies a proof of the client state of the running chain
+// stored on the solo machine.
+func (cs ClientState) VerifyClientState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ prefix exported.Prefix,
+ counterpartyClientIdentifier string,
+ proof []byte,
+ clientState exported.ClientState,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier))
+ path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := ClientStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, clientState)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyClientConsensusState verifies a proof of the consensus state of the
+// running chain stored on the solo machine.
+func (cs ClientState) VerifyClientConsensusState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ counterpartyClientIdentifier string,
+ consensusHeight exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+ consensusState exported.ConsensusState,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight))
+ path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := ConsensusStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, consensusState)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyConnectionState verifies a proof of the connection state of the
+// specified connection end stored on the target machine.
+func (cs ClientState) VerifyConnectionState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+ connectionID string,
+ connectionEnd exported.ConnectionI,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connectionID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := ConnectionStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, connectionEnd)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyChannelState verifies a proof of the channel state of the specified
+// channel end, under the specified port, stored on the target machine.
+func (cs ClientState) VerifyChannelState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ channel exported.ChannelI,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, channelPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := ChannelStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, channel)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
+// the specified port, specified channel, and specified sequence.
+func (cs ClientState) VerifyPacketCommitment(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ _ uint64,
+ _ uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ packetSequence uint64,
+ commitmentBytes []byte,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, packetSequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := PacketCommitmentSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, commitmentBytes)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyPacketAcknowledgement verifies a proof of an incoming packet
+// acknowledgement at the specified port, specified channel, and specified sequence.
+func (cs ClientState) VerifyPacketAcknowledgement(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ _ uint64,
+ _ uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ packetSequence uint64,
+ acknowledgement []byte,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, packetSequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, ackPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := PacketAcknowledgementSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, acknowledgement)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyPacketReceiptAbsence verifies a proof of the absence of an
+// incoming packet receipt at the specified port, specified channel, and
+// specified sequence.
+func (cs ClientState) VerifyPacketReceiptAbsence(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ _ uint64,
+ _ uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ packetSequence uint64,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, packetSequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := PacketReceiptAbsenceSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
+// received of the specified channel at the specified port.
+func (cs ClientState) VerifyNextSequenceRecv(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ _ uint64,
+ _ uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ nextSequenceRecv uint64,
+) error {
+ publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath)
+ if err != nil {
+ return err
+ }
+
+ signBz, err := NextSequenceRecvSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, nextSequenceRecv)
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, signBz, sigData); err != nil {
+ return err
+ }
+
+ cs.Sequence++
+ cs.ConsensusState.Timestamp = timestamp
+ setClientState(store, cdc, &cs)
+ return nil
+}
+
+// produceVerificationArgs perfoms the basic checks on the arguments that are
+// shared between the verification functions and returns the public key of the
+// consensus state, the unmarshalled proof representing the signature and timestamp
+// along with the solo-machine sequence encoded in the proofHeight.
+func produceVerificationArgs(
+ cdc codec.BinaryMarshaler,
+ cs ClientState,
+ height exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+) (cryptotypes.PubKey, signing.SignatureData, uint64, uint64, error) {
+ if revision := height.GetRevisionNumber(); revision != 0 {
+ return nil, nil, 0, 0, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "revision must be 0 for solomachine, got revision-number: %d", revision)
+ }
+ // sequence is encoded in the revision height of height struct
+ sequence := height.GetRevisionHeight()
+ if cs.IsFrozen() {
+ return nil, nil, 0, 0, clienttypes.ErrClientFrozen
+ }
+
+ if prefix == nil {
+ return nil, nil, 0, 0, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty")
+ }
+
+ _, ok := prefix.(commitmenttypes.MerklePrefix)
+ if !ok {
+ return nil, nil, 0, 0, sdkerrors.Wrapf(commitmenttypes.ErrInvalidPrefix, "invalid prefix type %T, expected MerklePrefix", prefix)
+ }
+
+ if proof == nil {
+ return nil, nil, 0, 0, sdkerrors.Wrap(ErrInvalidProof, "proof cannot be empty")
+ }
+
+ timestampedSigData := &TimestampedSignatureData{}
+ if err := cdc.UnmarshalBinaryBare(proof, timestampedSigData); err != nil {
+ return nil, nil, 0, 0, sdkerrors.Wrapf(err, "failed to unmarshal proof into type %T", timestampedSigData)
+ }
+
+ timestamp := timestampedSigData.Timestamp
+
+ if len(timestampedSigData.SignatureData) == 0 {
+ return nil, nil, 0, 0, sdkerrors.Wrap(ErrInvalidProof, "signature data cannot be empty")
+ }
+
+ sigData, err := UnmarshalSignatureData(cdc, timestampedSigData.SignatureData)
+ if err != nil {
+ return nil, nil, 0, 0, err
+ }
+
+ if cs.ConsensusState == nil {
+ return nil, nil, 0, 0, sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be empty")
+ }
+
+ latestSequence := cs.GetLatestHeight().GetRevisionHeight()
+ if latestSequence != sequence {
+ return nil, nil, 0, 0, sdkerrors.Wrapf(
+ sdkerrors.ErrInvalidHeight,
+ "client state sequence != proof sequence (%d != %d)", latestSequence, sequence,
+ )
+ }
+
+ if cs.ConsensusState.GetTimestamp() > timestamp {
+ return nil, nil, 0, 0, sdkerrors.Wrapf(ErrInvalidProof, "the consensus state timestamp is greater than the signature timestamp (%d >= %d)", cs.ConsensusState.GetTimestamp(), timestamp)
+ }
+
+ publicKey, err := cs.ConsensusState.GetPubKey()
+ if err != nil {
+ return nil, nil, 0, 0, err
+ }
+
+ return publicKey, sigData, timestamp, sequence, nil
+}
+
+// sets the client state to the store
+func setClientState(store sdk.KVStore, cdc codec.BinaryMarshaler, clientState exported.ClientState) {
+ bz := clienttypes.MustMarshalClientState(cdc, clientState)
+ store.Set([]byte(host.KeyClientState), bz)
+}
diff --git a/light-clients/06-solomachine/types/client_state_test.go b/light-clients/06-solomachine/types/client_state_test.go
new file mode 100644
index 00000000..4f6c195c
--- /dev/null
+++ b/light-clients/06-solomachine/types/client_state_test.go
@@ -0,0 +1,912 @@
+package types_test
+
+import (
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+const (
+ counterpartyClientIdentifier = "chainA"
+ testConnectionID = "connectionid"
+ testChannelID = "testchannelid"
+ testPortID = "testportid"
+)
+
+var (
+ prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
+ consensusHeight = clienttypes.ZeroHeight()
+)
+
+func (suite *SoloMachineTestSuite) TestClientStateValidateBasic() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ expPass bool
+ }{
+ {
+ "valid client state",
+ solomachine.ClientState(),
+ true,
+ },
+ {
+ "empty ClientState",
+ &types.ClientState{},
+ false,
+ },
+ {
+ "sequence is zero",
+ types.NewClientState(0, &types.ConsensusState{solomachine.ConsensusState().PublicKey, solomachine.Diversifier, solomachine.Time}, false),
+ false,
+ },
+ {
+ "timestamp is zero",
+ types.NewClientState(1, &types.ConsensusState{solomachine.ConsensusState().PublicKey, solomachine.Diversifier, 0}, false),
+ false,
+ },
+ {
+ "diversifier is blank",
+ types.NewClientState(1, &types.ConsensusState{solomachine.ConsensusState().PublicKey, " ", 1}, false),
+ false,
+ },
+ {
+ "pubkey is empty",
+ types.NewClientState(1, &types.ConsensusState{nil, solomachine.Diversifier, solomachine.Time}, false),
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+
+ err := tc.clientState.Validate()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestInitialize() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+ malleatedConsensus := solomachine.ClientState().ConsensusState
+ malleatedConsensus.Timestamp = malleatedConsensus.Timestamp + 10
+
+ testCases := []struct {
+ name string
+ consState exported.ConsensusState
+ expPass bool
+ }{
+ {
+ "valid consensus state",
+ solomachine.ConsensusState(),
+ true,
+ },
+ {
+ "nil consensus state",
+ nil,
+ false,
+ },
+ {
+ "invalid consensus state: Tendermint consensus state",
+ &ibctmtypes.ConsensusState{},
+ false,
+ },
+ {
+ "invalid consensus state: consensus state does not match consensus state in client",
+ malleatedConsensus,
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ err := solomachine.ClientState().Initialize(
+ suite.chainA.GetContext(), suite.chainA.Codec,
+ suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "solomachine"),
+ tc.consState,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid testcase: %s failed", tc.name)
+ } else {
+ suite.Require().Error(err, "invalid testcase: %s passed", tc.name)
+ }
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyClientState() {
+ // create client for tendermint so we can use client state for verification
+ clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ clientState := suite.chainA.GetClientState(clientA)
+ path := suite.solomachine.GetClientStatePath(counterpartyClientIdentifier)
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ value, err := types.ClientStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, clientState)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ nil,
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "consensus state in client state is nil",
+ types.NewClientState(1, nil, false),
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "client state latest height is less than sequence",
+ types.NewClientState(solomachine.Sequence-1,
+ &types.ConsensusState{
+ Timestamp: solomachine.Time,
+ PublicKey: solomachine.ConsensusState().PublicKey,
+ }, false),
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "consensus state timestamp is greater than signature",
+ types.NewClientState(solomachine.Sequence,
+ &types.ConsensusState{
+ Timestamp: solomachine.Time + 1,
+ PublicKey: solomachine.ConsensusState().PublicKey,
+ }, false),
+ prefix,
+ proof,
+ false,
+ },
+
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+
+ var expSeq uint64
+ if tc.clientState.ConsensusState != nil {
+ expSeq = tc.clientState.Sequence + 1
+ }
+
+ err := tc.clientState.VerifyClientState(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, counterpartyClientIdentifier, tc.proof, clientState,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %s", suite.GetSequenceFromStore(), tc.name)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
+ // create client for tendermint so we can use consensus state for verification
+ clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ clientState := suite.chainA.GetClientState(clientA)
+ consensusState, found := suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ suite.Require().True(found)
+
+ path := suite.solomachine.GetConsensusStatePath(counterpartyClientIdentifier, consensusHeight)
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ value, err := types.ConsensusStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, consensusState)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ nil,
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "consensus state in client state is nil",
+ types.NewClientState(1, nil, false),
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "client state latest height is less than sequence",
+ types.NewClientState(solomachine.Sequence-1,
+ &types.ConsensusState{
+ Timestamp: solomachine.Time,
+ PublicKey: solomachine.ConsensusState().PublicKey,
+ }, false),
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "consensus state timestamp is greater than signature",
+ types.NewClientState(solomachine.Sequence,
+ &types.ConsensusState{
+ Timestamp: solomachine.Time + 1,
+ PublicKey: solomachine.ConsensusState().PublicKey,
+ }, false),
+ prefix,
+ proof,
+ false,
+ },
+
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+
+ var expSeq uint64
+ if tc.clientState.ConsensusState != nil {
+ expSeq = tc.clientState.Sequence + 1
+ }
+
+ err := tc.clientState.VerifyClientConsensusState(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), counterpartyClientIdentifier, consensusHeight, tc.prefix, tc.proof, consensusState,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %s", suite.GetSequenceFromStore(), tc.name)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyConnectionState() {
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix)
+ conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0)
+
+ path := suite.solomachine.GetConnectionStatePath(testConnectionID)
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ value, err := types.ConnectionStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, conn)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ commitmenttypes.NewMerklePrefix([]byte{}),
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ expSeq := tc.clientState.Sequence + 1
+
+ err := tc.clientState.VerifyConnectionState(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, tc.proof, testConnectionID, conn,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyChannelState() {
+ counterparty := channeltypes.NewCounterparty(testPortID, testChannelID)
+ ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0")
+
+ path := suite.solomachine.GetChannelStatePath(testPortID, testChannelID)
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ value, err := types.ChannelStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, ch)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ nil,
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ expSeq := tc.clientState.Sequence + 1
+
+ err := tc.clientState.VerifyChannelState(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, tc.proof, testPortID, testChannelID, ch,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyPacketCommitment() {
+ commitmentBytes := []byte("COMMITMENT BYTES")
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ path := solomachine.GetPacketCommitmentPath(testPortID, testChannelID)
+
+ value, err := types.PacketCommitmentSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, commitmentBytes)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ commitmenttypes.NewMerklePrefix([]byte{}),
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ expSeq := tc.clientState.Sequence + 1
+
+ err := tc.clientState.VerifyPacketCommitment(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, commitmentBytes,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgement() {
+ ack := []byte("ACK")
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ path := solomachine.GetPacketAcknowledgementPath(testPortID, testChannelID)
+
+ value, err := types.PacketAcknowledgementSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, ack)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ commitmenttypes.NewMerklePrefix([]byte{}),
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ expSeq := tc.clientState.Sequence + 1
+
+ err := tc.clientState.VerifyPacketAcknowledgement(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, ack,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyPacketReceiptAbsence() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ // absence uses receipt path as well
+ path := solomachine.GetPacketReceiptPath(testPortID, testChannelID)
+
+ value, err := types.PacketReceiptAbsenceSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ commitmenttypes.NewMerklePrefix([]byte{}),
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ expSeq := tc.clientState.Sequence + 1
+
+ err := tc.clientState.VerifyPacketReceiptAbsence(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ nextSeqRecv := solomachine.Sequence + 1
+ path := solomachine.GetNextSequenceRecvPath(testPortID, testChannelID)
+
+ value, err := types.NextSequenceRecvSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, nextSeqRecv)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(value)
+ signatureDoc := &types.TimestampedSignatureData{
+ SignatureData: sig,
+ Timestamp: solomachine.Time,
+ }
+
+ proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ prefix exported.Prefix
+ proof []byte
+ expPass bool
+ }{
+ {
+ "successful verification",
+ solomachine.ClientState(),
+ prefix,
+ proof,
+ true,
+ },
+ {
+ "ApplyPrefix failed",
+ solomachine.ClientState(),
+ commitmenttypes.NewMerklePrefix([]byte{}),
+ proof,
+ false,
+ },
+ {
+ "client is frozen",
+ &types.ClientState{
+ Sequence: 1,
+ FrozenSequence: 1,
+ ConsensusState: solomachine.ConsensusState(),
+ AllowUpdateAfterProposal: false,
+ },
+ prefix,
+ proof,
+ false,
+ },
+ {
+ "proof is nil",
+ solomachine.ClientState(),
+ prefix,
+ nil,
+ false,
+ },
+ {
+ "proof verification failed",
+ solomachine.ClientState(),
+ prefix,
+ suite.GetInvalidProof(),
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ expSeq := tc.clientState.Sequence + 1
+
+ err := tc.clientState.VerifyNextSequenceRecv(
+ suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, nextSeqRecv,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+ }
+}
diff --git a/light-clients/06-solomachine/types/codec.go b/light-clients/06-solomachine/types/codec.go
new file mode 100644
index 00000000..313a910c
--- /dev/null
+++ b/light-clients/06-solomachine/types/codec.go
@@ -0,0 +1,130 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces register the ibc channel submodule interfaces to protobuf
+// Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterImplementations(
+ (*exported.ClientState)(nil),
+ &ClientState{},
+ )
+ registry.RegisterImplementations(
+ (*exported.ConsensusState)(nil),
+ &ConsensusState{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Header)(nil),
+ &Header{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Misbehaviour)(nil),
+ &Misbehaviour{},
+ )
+}
+
+func UnmarshalSignatureData(cdc codec.BinaryMarshaler, data []byte) (signing.SignatureData, error) {
+ protoSigData := &signing.SignatureDescriptor_Data{}
+ if err := cdc.UnmarshalBinaryBare(data, protoSigData); err != nil {
+ return nil, sdkerrors.Wrapf(err, "failed to unmarshal proof into type %T", protoSigData)
+ }
+
+ sigData := signing.SignatureDataFromProto(protoSigData)
+
+ return sigData, nil
+}
+
+// UnmarshalDataByType attempts to unmarshal the data to the specified type. An error is
+// return if it fails.
+func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []byte) (Data, error) {
+ if len(data) == 0 {
+ return nil, sdkerrors.Wrap(ErrInvalidSignatureAndData, "data cannot be empty")
+ }
+
+ switch dataType {
+ case UNSPECIFIED:
+ return nil, sdkerrors.Wrap(ErrInvalidDataType, "data type cannot be UNSPECIFIED")
+
+ case CLIENT:
+ clientData := &ClientStateData{}
+ if err := cdc.UnmarshalBinaryBare(data, clientData); err != nil {
+ return nil, err
+ }
+
+ // unpack any
+ if _, err := clienttypes.UnpackClientState(clientData.ClientState); err != nil {
+ return nil, err
+ }
+ return clientData, nil
+
+ case CONSENSUS:
+ consensusData := &ConsensusStateData{}
+ if err := cdc.UnmarshalBinaryBare(data, consensusData); err != nil {
+ return nil, err
+ }
+
+ // unpack any
+ if _, err := clienttypes.UnpackConsensusState(consensusData.ConsensusState); err != nil {
+ return nil, err
+ }
+ return consensusData, nil
+
+ case CONNECTION:
+ connectionData := &ConnectionStateData{}
+ if err := cdc.UnmarshalBinaryBare(data, connectionData); err != nil {
+ return nil, err
+ }
+
+ return connectionData, nil
+
+ case CHANNEL:
+ channelData := &ChannelStateData{}
+ if err := cdc.UnmarshalBinaryBare(data, channelData); err != nil {
+ return nil, err
+ }
+
+ return channelData, nil
+
+ case PACKETCOMMITMENT:
+ commitmentData := &PacketCommitmentData{}
+ if err := cdc.UnmarshalBinaryBare(data, commitmentData); err != nil {
+ return nil, err
+ }
+
+ return commitmentData, nil
+
+ case PACKETACKNOWLEDGEMENT:
+ ackData := &PacketAcknowledgementData{}
+ if err := cdc.UnmarshalBinaryBare(data, ackData); err != nil {
+ return nil, err
+ }
+
+ return ackData, nil
+
+ case PACKETRECEIPTABSENCE:
+ receiptAbsenceData := &PacketReceiptAbsenceData{}
+ if err := cdc.UnmarshalBinaryBare(data, receiptAbsenceData); err != nil {
+ return nil, err
+ }
+
+ return receiptAbsenceData, nil
+
+ case NEXTSEQUENCERECV:
+ nextSeqRecvData := &NextSequenceRecvData{}
+ if err := cdc.UnmarshalBinaryBare(data, nextSeqRecvData); err != nil {
+ return nil, err
+ }
+
+ return nextSeqRecvData, nil
+
+ default:
+ return nil, sdkerrors.Wrapf(ErrInvalidDataType, "unsupported data type %T", dataType)
+ }
+}
diff --git a/light-clients/06-solomachine/types/codec_test.go b/light-clients/06-solomachine/types/codec_test.go
new file mode 100644
index 00000000..70be186a
--- /dev/null
+++ b/light-clients/06-solomachine/types/codec_test.go
@@ -0,0 +1,190 @@
+package types_test
+
+import (
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite SoloMachineTestSuite) TestUnmarshalDataByType() {
+ var (
+ data []byte
+ err error
+ )
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ cdc := suite.chainA.App.AppCodec()
+ cases := []struct {
+ name string
+ dataType types.DataType
+ malleate func()
+ expPass bool
+ }{
+ {
+ "empty data", types.CLIENT, func() {
+ data = []byte{}
+ }, false,
+ },
+ {
+ "unspecified", types.UNSPECIFIED, func() {
+ path := solomachine.GetClientStatePath(counterpartyClientIdentifier)
+ data, err = types.ClientStateDataBytes(cdc, path, solomachine.ClientState())
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "client", types.CLIENT, func() {
+ path := solomachine.GetClientStatePath(counterpartyClientIdentifier)
+ data, err = types.ClientStateDataBytes(cdc, path, solomachine.ClientState())
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "bad client (provides consensus state data)", types.CLIENT, func() {
+ path := solomachine.GetConsensusStatePath(counterpartyClientIdentifier, clienttypes.NewHeight(0, 5))
+ data, err = types.ConsensusStateDataBytes(cdc, path, solomachine.ConsensusState())
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "consensus", types.CONSENSUS, func() {
+ path := solomachine.GetConsensusStatePath(counterpartyClientIdentifier, clienttypes.NewHeight(0, 5))
+ data, err = types.ConsensusStateDataBytes(cdc, path, solomachine.ConsensusState())
+ suite.Require().NoError(err)
+
+ }, true,
+ },
+ {
+ "bad consensus (provides client state data)", types.CONSENSUS, func() {
+ path := solomachine.GetClientStatePath(counterpartyClientIdentifier)
+ data, err = types.ClientStateDataBytes(cdc, path, solomachine.ClientState())
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "connection", types.CONNECTION, func() {
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix)
+ conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0)
+ path := solomachine.GetConnectionStatePath("connectionID")
+
+ data, err = types.ConnectionStateDataBytes(cdc, path, conn)
+ suite.Require().NoError(err)
+
+ }, true,
+ },
+ {
+ "bad connection (uses channel data)", types.CONNECTION, func() {
+ counterparty := channeltypes.NewCounterparty(testPortID, testChannelID)
+ ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0")
+ path := solomachine.GetChannelStatePath("portID", "channelID")
+
+ data, err = types.ChannelStateDataBytes(cdc, path, ch)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "channel", types.CHANNEL, func() {
+ counterparty := channeltypes.NewCounterparty(testPortID, testChannelID)
+ ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0")
+ path := solomachine.GetChannelStatePath("portID", "channelID")
+
+ data, err = types.ChannelStateDataBytes(cdc, path, ch)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "bad channel (uses connection data)", types.CHANNEL, func() {
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix)
+ conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0)
+ path := solomachine.GetConnectionStatePath("connectionID")
+
+ data, err = types.ConnectionStateDataBytes(cdc, path, conn)
+ suite.Require().NoError(err)
+
+ }, false,
+ },
+ {
+ "packet commitment", types.PACKETCOMMITMENT, func() {
+ commitment := []byte("packet commitment")
+ path := solomachine.GetPacketCommitmentPath("portID", "channelID")
+
+ data, err = types.PacketCommitmentDataBytes(cdc, path, commitment)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "bad packet commitment (uses next seq recv)", types.PACKETCOMMITMENT, func() {
+ path := solomachine.GetNextSequenceRecvPath("portID", "channelID")
+
+ data, err = types.NextSequenceRecvDataBytes(cdc, path, 10)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "packet acknowledgement", types.PACKETACKNOWLEDGEMENT, func() {
+ commitment := []byte("packet acknowledgement")
+ path := solomachine.GetPacketAcknowledgementPath("portID", "channelID")
+
+ data, err = types.PacketAcknowledgementDataBytes(cdc, path, commitment)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "bad packet acknowledgement (uses next sequence recv)", types.PACKETACKNOWLEDGEMENT, func() {
+ path := solomachine.GetNextSequenceRecvPath("portID", "channelID")
+
+ data, err = types.NextSequenceRecvDataBytes(cdc, path, 10)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "packet acknowledgement absence", types.PACKETRECEIPTABSENCE, func() {
+ path := solomachine.GetPacketReceiptPath("portID", "channelID")
+
+ data, err = types.PacketReceiptAbsenceDataBytes(cdc, path)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "next sequence recv", types.NEXTSEQUENCERECV, func() {
+ path := solomachine.GetNextSequenceRecvPath("portID", "channelID")
+
+ data, err = types.NextSequenceRecvDataBytes(cdc, path, 10)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "bad next sequence recv (uses packet commitment)", types.NEXTSEQUENCERECV, func() {
+ commitment := []byte("packet commitment")
+ path := solomachine.GetPacketCommitmentPath("portID", "channelID")
+
+ data, err = types.PacketCommitmentDataBytes(cdc, path, commitment)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ }
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ tc.malleate()
+
+ data, err := types.UnmarshalDataByType(cdc, tc.dataType, data)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(data)
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(data)
+ }
+ })
+ }
+ }
+
+}
diff --git a/light-clients/06-solomachine/types/consensus_state.go b/light-clients/06-solomachine/types/consensus_state.go
new file mode 100644
index 00000000..7d6d09cd
--- /dev/null
+++ b/light-clients/06-solomachine/types/consensus_state.go
@@ -0,0 +1,60 @@
+package types
+
+import (
+ "strings"
+
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.ConsensusState = &ConsensusState{}
+
+// ClientType returns Solo Machine type.
+func (ConsensusState) ClientType() string {
+ return exported.Solomachine
+}
+
+// GetTimestamp returns zero.
+func (cs ConsensusState) GetTimestamp() uint64 {
+ return cs.Timestamp
+}
+
+// GetRoot returns nil since solo machines do not have roots.
+func (cs ConsensusState) GetRoot() exported.Root {
+ return nil
+}
+
+// GetPubKey unmarshals the public key into a cryptotypes.PubKey type.
+// An error is returned if the public key is nil or the cached value
+// is not a PubKey.
+func (cs ConsensusState) GetPubKey() (cryptotypes.PubKey, error) {
+ if cs.PublicKey == nil {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state PublicKey cannot be nil")
+ }
+
+ publicKey, ok := cs.PublicKey.GetCachedValue().(cryptotypes.PubKey)
+ if !ok {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state PublicKey is not cryptotypes.PubKey")
+ }
+
+ return publicKey, nil
+}
+
+// ValidateBasic defines basic validation for the solo machine consensus state.
+func (cs ConsensusState) ValidateBasic() error {
+ if cs.Timestamp == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "timestamp cannot be 0")
+ }
+ if cs.Diversifier != "" && strings.TrimSpace(cs.Diversifier) == "" {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "diversifier cannot contain only spaces")
+ }
+
+ publicKey, err := cs.GetPubKey()
+ if err != nil || publicKey == nil || len(publicKey.Bytes()) == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "public key cannot be empty")
+ }
+
+ return nil
+}
diff --git a/light-clients/06-solomachine/types/consensus_state_test.go b/light-clients/06-solomachine/types/consensus_state_test.go
new file mode 100644
index 00000000..e0c22f95
--- /dev/null
+++ b/light-clients/06-solomachine/types/consensus_state_test.go
@@ -0,0 +1,75 @@
+package types_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestConsensusState() {
+ consensusState := suite.solomachine.ConsensusState()
+
+ suite.Require().Equal(exported.Solomachine, consensusState.ClientType())
+ suite.Require().Equal(suite.solomachine.Time, consensusState.GetTimestamp())
+ suite.Require().Nil(consensusState.GetRoot())
+}
+
+func (suite *SoloMachineTestSuite) TestConsensusStateValidateBasic() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ testCases := []struct {
+ name string
+ consensusState *types.ConsensusState
+ expPass bool
+ }{
+ {
+ "valid consensus state",
+ solomachine.ConsensusState(),
+ true,
+ },
+ {
+ "timestamp is zero",
+ &types.ConsensusState{
+ PublicKey: solomachine.ConsensusState().PublicKey,
+ Timestamp: 0,
+ Diversifier: solomachine.Diversifier,
+ },
+ false,
+ },
+ {
+ "diversifier is blank",
+ &types.ConsensusState{
+ PublicKey: solomachine.ConsensusState().PublicKey,
+ Timestamp: solomachine.Time,
+ Diversifier: " ",
+ },
+ false,
+ },
+ {
+ "pubkey is nil",
+ &types.ConsensusState{
+ Timestamp: solomachine.Time,
+ Diversifier: solomachine.Diversifier,
+ PublicKey: nil,
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+
+ err := tc.consensusState.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+ }
+}
diff --git a/light-clients/06-solomachine/types/errors.go b/light-clients/06-solomachine/types/errors.go
new file mode 100644
index 00000000..3e27f607
--- /dev/null
+++ b/light-clients/06-solomachine/types/errors.go
@@ -0,0 +1,18 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+const (
+ SubModuleName = "solo machine"
+)
+
+var (
+ ErrInvalidHeader = sdkerrors.Register(SubModuleName, 2, "invalid header")
+ ErrInvalidSequence = sdkerrors.Register(SubModuleName, 3, "invalid sequence")
+ ErrInvalidSignatureAndData = sdkerrors.Register(SubModuleName, 4, "invalid signature and data")
+ ErrSignatureVerificationFailed = sdkerrors.Register(SubModuleName, 5, "signature verification failed")
+ ErrInvalidProof = sdkerrors.Register(SubModuleName, 6, "invalid solo machine proof")
+ ErrInvalidDataType = sdkerrors.Register(SubModuleName, 7, "invalid data type")
+)
diff --git a/light-clients/06-solomachine/types/header.go b/light-clients/06-solomachine/types/header.go
new file mode 100644
index 00000000..f9c5f176
--- /dev/null
+++ b/light-clients/06-solomachine/types/header.go
@@ -0,0 +1,67 @@
+package types
+
+import (
+ "strings"
+
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.Header = &Header{}
+
+// ClientType defines that the Header is a Solo Machine.
+func (Header) ClientType() string {
+ return exported.Solomachine
+}
+
+// GetHeight returns the current sequence number as the height.
+// Return clientexported.Height to satisfy interface
+// Revision number is always 0 for a solo-machine
+func (h Header) GetHeight() exported.Height {
+ return clienttypes.NewHeight(0, h.Sequence)
+}
+
+// GetPubKey unmarshals the new public key into a cryptotypes.PubKey type.
+// An error is returned if the new public key is nil or the cached value
+// is not a PubKey.
+func (h Header) GetPubKey() (cryptotypes.PubKey, error) {
+ if h.NewPublicKey == nil {
+ return nil, sdkerrors.Wrap(ErrInvalidHeader, "header NewPublicKey cannot be nil")
+ }
+
+ publicKey, ok := h.NewPublicKey.GetCachedValue().(cryptotypes.PubKey)
+ if !ok {
+ return nil, sdkerrors.Wrap(ErrInvalidHeader, "header NewPublicKey is not cryptotypes.PubKey")
+ }
+
+ return publicKey, nil
+}
+
+// ValidateBasic ensures that the sequence, signature and public key have all
+// been initialized.
+func (h Header) ValidateBasic() error {
+ if h.Sequence == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "sequence number cannot be zero")
+ }
+
+ if h.Timestamp == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "timestamp cannot be zero")
+ }
+
+ if h.NewDiversifier != "" && strings.TrimSpace(h.NewDiversifier) == "" {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "diversifier cannot contain only spaces")
+ }
+
+ if len(h.Signature) == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "signature cannot be empty")
+ }
+
+ newPublicKey, err := h.GetPubKey()
+ if err != nil || newPublicKey == nil || len(newPublicKey.Bytes()) == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "new public key cannot be empty")
+ }
+
+ return nil
+}
diff --git a/light-clients/06-solomachine/types/header_test.go b/light-clients/06-solomachine/types/header_test.go
new file mode 100644
index 00000000..a5ca45e8
--- /dev/null
+++ b/light-clients/06-solomachine/types/header_test.go
@@ -0,0 +1,98 @@
+package types_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestHeaderValidateBasic() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ header := solomachine.CreateHeader()
+
+ cases := []struct {
+ name string
+ header *types.Header
+ expPass bool
+ }{
+ {
+ "valid header",
+ header,
+ true,
+ },
+ {
+ "sequence is zero",
+ &types.Header{
+ Sequence: 0,
+ Timestamp: header.Timestamp,
+ Signature: header.Signature,
+ NewPublicKey: header.NewPublicKey,
+ NewDiversifier: header.NewDiversifier,
+ },
+ false,
+ },
+ {
+ "timestamp is zero",
+ &types.Header{
+ Sequence: header.Sequence,
+ Timestamp: 0,
+ Signature: header.Signature,
+ NewPublicKey: header.NewPublicKey,
+ NewDiversifier: header.NewDiversifier,
+ },
+ false,
+ },
+ {
+ "signature is empty",
+ &types.Header{
+ Sequence: header.Sequence,
+ Timestamp: header.Timestamp,
+ Signature: []byte{},
+ NewPublicKey: header.NewPublicKey,
+ NewDiversifier: header.NewDiversifier,
+ },
+ false,
+ },
+ {
+ "diversifier contains only spaces",
+ &types.Header{
+ Sequence: header.Sequence,
+ Timestamp: header.Timestamp,
+ Signature: header.Signature,
+ NewPublicKey: header.NewPublicKey,
+ NewDiversifier: " ",
+ },
+ false,
+ },
+ {
+ "public key is nil",
+ &types.Header{
+ Sequence: header.Sequence,
+ Timestamp: header.Timestamp,
+ Signature: header.Signature,
+ NewPublicKey: nil,
+ NewDiversifier: header.NewDiversifier,
+ },
+ false,
+ },
+ }
+
+ suite.Require().Equal(exported.Solomachine, header.ClientType())
+
+ for _, tc := range cases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := tc.header.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+ }
+}
diff --git a/light-clients/06-solomachine/types/misbehaviour.go b/light-clients/06-solomachine/types/misbehaviour.go
new file mode 100644
index 00000000..f5b218cc
--- /dev/null
+++ b/light-clients/06-solomachine/types/misbehaviour.go
@@ -0,0 +1,83 @@
+package types
+
+import (
+ "bytes"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.Misbehaviour = &Misbehaviour{}
+
+// ClientType is a Solo Machine light client.
+func (misbehaviour Misbehaviour) ClientType() string {
+ return exported.Solomachine
+}
+
+// GetClientID returns the ID of the client that committed a misbehaviour.
+func (misbehaviour Misbehaviour) GetClientID() string {
+ return misbehaviour.ClientId
+}
+
+// Type implements Evidence interface.
+func (misbehaviour Misbehaviour) Type() string {
+ return exported.TypeClientMisbehaviour
+}
+
+// GetHeight returns the sequence at which misbehaviour occurred.
+// Return exported.Height to satisfy interface
+// Revision number is always 0 for a solo-machine
+func (misbehaviour Misbehaviour) GetHeight() exported.Height {
+ return clienttypes.NewHeight(0, misbehaviour.Sequence)
+}
+
+// ValidateBasic implements Evidence interface.
+func (misbehaviour Misbehaviour) ValidateBasic() error {
+ if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil {
+ return sdkerrors.Wrap(err, "invalid client identifier for solo machine")
+ }
+
+ if misbehaviour.Sequence == 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "sequence cannot be 0")
+ }
+
+ if err := misbehaviour.SignatureOne.ValidateBasic(); err != nil {
+ return sdkerrors.Wrap(err, "signature one failed basic validation")
+ }
+
+ if err := misbehaviour.SignatureTwo.ValidateBasic(); err != nil {
+ return sdkerrors.Wrap(err, "signature two failed basic validation")
+ }
+
+ // misbehaviour signatures cannot be identical
+ if bytes.Equal(misbehaviour.SignatureOne.Signature, misbehaviour.SignatureTwo.Signature) {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "misbehaviour signatures cannot be equal")
+ }
+
+ // message data signed cannot be identical
+ if bytes.Equal(misbehaviour.SignatureOne.Data, misbehaviour.SignatureTwo.Data) {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "misbehaviour signature data must be signed over different messages")
+ }
+
+ return nil
+}
+
+// ValidateBasic ensures that the signature and data fields are non-empty.
+func (sd SignatureAndData) ValidateBasic() error {
+ if len(sd.Signature) == 0 {
+ return sdkerrors.Wrap(ErrInvalidSignatureAndData, "signature cannot be empty")
+ }
+ if len(sd.Data) == 0 {
+ return sdkerrors.Wrap(ErrInvalidSignatureAndData, "data for signature cannot be empty")
+ }
+ if sd.DataType == UNSPECIFIED {
+ return sdkerrors.Wrap(ErrInvalidSignatureAndData, "data type cannot be UNSPECIFIED")
+ }
+ if sd.Timestamp == 0 {
+ return sdkerrors.Wrap(ErrInvalidSignatureAndData, "timestamp cannot be 0")
+ }
+
+ return nil
+}
diff --git a/light-clients/06-solomachine/types/misbehaviour_handle.go b/light-clients/06-solomachine/types/misbehaviour_handle.go
new file mode 100644
index 00000000..ce5d6351
--- /dev/null
+++ b/light-clients/06-solomachine/types/misbehaviour_handle.go
@@ -0,0 +1,92 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CheckMisbehaviourAndUpdateState determines whether or not the currently registered
+// public key signed over two different messages with the same sequence. If this is true
+// the client state is updated to a frozen status.
+// NOTE: Misbehaviour is not tracked for previous public keys, a solo machine may update to
+// a new public key before the misbehaviour is processed. Therefore, misbehaviour is data
+// order processing dependent.
+func (cs ClientState) CheckMisbehaviourAndUpdateState(
+ ctx sdk.Context,
+ cdc codec.BinaryMarshaler,
+ clientStore sdk.KVStore,
+ misbehaviour exported.Misbehaviour,
+) (exported.ClientState, error) {
+
+ soloMisbehaviour, ok := misbehaviour.(*Misbehaviour)
+ if !ok {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidClientType,
+ "misbehaviour type %T, expected %T", misbehaviour, &Misbehaviour{},
+ )
+ }
+
+ if cs.IsFrozen() {
+ return nil, sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "client is already frozen")
+ }
+
+ // NOTE: a check that the misbehaviour message data are not equal is done by
+ // misbehaviour.ValidateBasic which is called by the 02-client keeper.
+
+ // verify first signature
+ if err := verifySignatureAndData(cdc, cs, soloMisbehaviour, soloMisbehaviour.SignatureOne); err != nil {
+ return nil, sdkerrors.Wrap(err, "failed to verify signature one")
+ }
+
+ // verify second signature
+ if err := verifySignatureAndData(cdc, cs, soloMisbehaviour, soloMisbehaviour.SignatureTwo); err != nil {
+ return nil, sdkerrors.Wrap(err, "failed to verify signature two")
+ }
+
+ cs.FrozenSequence = soloMisbehaviour.Sequence
+ return &cs, nil
+}
+
+// verifySignatureAndData verifies that the currently registered public key has signed
+// over the provided data and that the data is valid. The data is valid if it can be
+// unmarshaled into the specified data type.
+func verifySignatureAndData(cdc codec.BinaryMarshaler, clientState ClientState, misbehaviour *Misbehaviour, sigAndData *SignatureAndData) error {
+
+ // do not check misbehaviour timestamp since we want to allow processing of past misbehaviour
+
+ // ensure data can be unmarshaled to the specified data type
+ if _, err := UnmarshalDataByType(cdc, sigAndData.DataType, sigAndData.Data); err != nil {
+ return err
+ }
+
+ data, err := MisbehaviourSignBytes(
+ cdc,
+ misbehaviour.Sequence, sigAndData.Timestamp,
+ clientState.ConsensusState.Diversifier,
+ sigAndData.DataType,
+ sigAndData.Data,
+ )
+ if err != nil {
+ return err
+ }
+
+ sigData, err := UnmarshalSignatureData(cdc, sigAndData.Signature)
+ if err != nil {
+ return err
+ }
+
+ publicKey, err := clientState.ConsensusState.GetPubKey()
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, data, sigData); err != nil {
+ return err
+ }
+
+ return nil
+
+}
diff --git a/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/light-clients/06-solomachine/types/misbehaviour_handle_test.go
new file mode 100644
index 00000000..97ce22a3
--- /dev/null
+++ b/light-clients/06-solomachine/types/misbehaviour_handle_test.go
@@ -0,0 +1,275 @@
+package types_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
+ var (
+ clientState exported.ClientState
+ misbehaviour exported.Misbehaviour
+ )
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ testCases := []struct {
+ name string
+ setup func()
+ expPass bool
+ }{
+ {
+ "valid misbehaviour",
+ func() {
+ clientState = solomachine.ClientState()
+ misbehaviour = solomachine.CreateMisbehaviour()
+ },
+ true,
+ },
+ {
+ "old misbehaviour is successful (timestamp is less than current consensus state)",
+ func() {
+ clientState = solomachine.ClientState()
+ solomachine.Time = solomachine.Time - 5
+ misbehaviour = solomachine.CreateMisbehaviour()
+ }, true,
+ },
+ {
+ "client is frozen",
+ func() {
+ cs := solomachine.ClientState()
+ cs.FrozenSequence = 1
+ clientState = cs
+ misbehaviour = solomachine.CreateMisbehaviour()
+ },
+ false,
+ },
+ {
+ "wrong client state type",
+ func() {
+ clientState = &ibctmtypes.ClientState{}
+ misbehaviour = solomachine.CreateMisbehaviour()
+ },
+ false,
+ },
+ {
+ "invalid misbehaviour type",
+ func() {
+ clientState = solomachine.ClientState()
+ misbehaviour = &ibctmtypes.Misbehaviour{}
+ },
+ false,
+ },
+ {
+ "invalid SignatureOne SignatureData",
+ func() {
+ clientState = solomachine.ClientState()
+ m := solomachine.CreateMisbehaviour()
+
+ m.SignatureOne.Signature = suite.GetInvalidProof()
+ misbehaviour = m
+ }, false,
+ },
+ {
+ "invalid SignatureTwo SignatureData",
+ func() {
+ clientState = solomachine.ClientState()
+ m := solomachine.CreateMisbehaviour()
+
+ m.SignatureTwo.Signature = suite.GetInvalidProof()
+ misbehaviour = m
+ }, false,
+ },
+ {
+ "invalid SignatureOne timestamp",
+ func() {
+ clientState = solomachine.ClientState()
+ m := solomachine.CreateMisbehaviour()
+
+ m.SignatureOne.Timestamp = 1000000000000
+ misbehaviour = m
+ }, false,
+ },
+ {
+ "invalid SignatureTwo timestamp",
+ func() {
+ clientState = solomachine.ClientState()
+ m := solomachine.CreateMisbehaviour()
+
+ m.SignatureTwo.Timestamp = 1000000000000
+ misbehaviour = m
+ }, false,
+ },
+ {
+ "invalid first signature data",
+ func() {
+ clientState = solomachine.ClientState()
+
+ // store in temp before assigning to interface type
+ m := solomachine.CreateMisbehaviour()
+
+ msg := []byte("DATA ONE")
+ signBytes := &types.SignBytes{
+ Sequence: solomachine.Sequence + 1,
+ Timestamp: solomachine.Time,
+ Diversifier: solomachine.Diversifier,
+ DataType: types.CLIENT,
+ Data: msg,
+ }
+
+ data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(data)
+
+ m.SignatureOne.Signature = sig
+ m.SignatureOne.Data = msg
+ misbehaviour = m
+ },
+ false,
+ },
+ {
+ "invalid second signature data",
+ func() {
+ clientState = solomachine.ClientState()
+
+ // store in temp before assigning to interface type
+ m := solomachine.CreateMisbehaviour()
+
+ msg := []byte("DATA TWO")
+ signBytes := &types.SignBytes{
+ Sequence: solomachine.Sequence + 1,
+ Timestamp: solomachine.Time,
+ Diversifier: solomachine.Diversifier,
+ DataType: types.CLIENT,
+ Data: msg,
+ }
+
+ data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(data)
+
+ m.SignatureTwo.Signature = sig
+ m.SignatureTwo.Data = msg
+ misbehaviour = m
+ },
+ false,
+ },
+ {
+ "wrong pubkey generates first signature",
+ func() {
+ clientState = solomachine.ClientState()
+ badMisbehaviour := solomachine.CreateMisbehaviour()
+
+ // update public key to a new one
+ solomachine.CreateHeader()
+ m := solomachine.CreateMisbehaviour()
+
+ // set SignatureOne to use the wrong signature
+ m.SignatureOne = badMisbehaviour.SignatureOne
+ misbehaviour = m
+ }, false,
+ },
+ {
+ "wrong pubkey generates second signature",
+ func() {
+ clientState = solomachine.ClientState()
+ badMisbehaviour := solomachine.CreateMisbehaviour()
+
+ // update public key to a new one
+ solomachine.CreateHeader()
+ m := solomachine.CreateMisbehaviour()
+
+ // set SignatureTwo to use the wrong signature
+ m.SignatureTwo = badMisbehaviour.SignatureTwo
+ misbehaviour = m
+ }, false,
+ },
+
+ {
+ "signatures sign over different sequence",
+ func() {
+ clientState = solomachine.ClientState()
+
+ // store in temp before assigning to interface type
+ m := solomachine.CreateMisbehaviour()
+
+ // Signature One
+ msg := []byte("DATA ONE")
+ // sequence used is plus 1
+ signBytes := &types.SignBytes{
+ Sequence: solomachine.Sequence + 1,
+ Timestamp: solomachine.Time,
+ Diversifier: solomachine.Diversifier,
+ DataType: types.CLIENT,
+ Data: msg,
+ }
+
+ data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(data)
+
+ m.SignatureOne.Signature = sig
+ m.SignatureOne.Data = msg
+
+ // Signature Two
+ msg = []byte("DATA TWO")
+ // sequence used is minus 1
+
+ signBytes = &types.SignBytes{
+ Sequence: solomachine.Sequence - 1,
+ Timestamp: solomachine.Time,
+ Diversifier: solomachine.Diversifier,
+ DataType: types.CLIENT,
+ Data: msg,
+ }
+ data, err = suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ suite.Require().NoError(err)
+
+ sig = solomachine.GenerateSignature(data)
+
+ m.SignatureTwo.Signature = sig
+ m.SignatureTwo.Data = msg
+
+ misbehaviour = m
+
+ },
+ false,
+ },
+ {
+ "consensus state pubkey is nil",
+ func() {
+ cs := solomachine.ClientState()
+ cs.ConsensusState.PublicKey = nil
+ clientState = cs
+ misbehaviour = solomachine.CreateMisbehaviour()
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ // setup test
+ tc.setup()
+
+ clientState, err := clientState.CheckMisbehaviourAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), suite.store, misbehaviour)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().True(clientState.IsFrozen(), "client not frozen")
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(clientState)
+ }
+ })
+ }
+ }
+}
diff --git a/light-clients/06-solomachine/types/misbehaviour_test.go b/light-clients/06-solomachine/types/misbehaviour_test.go
new file mode 100644
index 00000000..7c1f9168
--- /dev/null
+++ b/light-clients/06-solomachine/types/misbehaviour_test.go
@@ -0,0 +1,132 @@
+package types_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestMisbehaviour() {
+ misbehaviour := suite.solomachine.CreateMisbehaviour()
+
+ suite.Require().Equal(exported.Solomachine, misbehaviour.ClientType())
+ suite.Require().Equal(suite.solomachine.ClientID, misbehaviour.GetClientID())
+ suite.Require().Equal(uint64(0), misbehaviour.GetHeight().GetRevisionNumber())
+ suite.Require().Equal(suite.solomachine.Sequence, misbehaviour.GetHeight().GetRevisionHeight())
+}
+
+func (suite *SoloMachineTestSuite) TestMisbehaviourValidateBasic() {
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ testCases := []struct {
+ name string
+ malleateMisbehaviour func(misbehaviour *types.Misbehaviour)
+ expPass bool
+ }{
+ {
+ "valid misbehaviour",
+ func(*types.Misbehaviour) {},
+ true,
+ },
+ {
+ "invalid client ID",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.ClientId = "(badclientid)"
+ },
+ false,
+ },
+ {
+ "sequence is zero",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.Sequence = 0
+ },
+ false,
+ },
+ {
+ "signature one sig is empty",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureOne.Signature = []byte{}
+ },
+ false,
+ },
+ {
+ "signature two sig is empty",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureTwo.Signature = []byte{}
+ },
+ false,
+ },
+ {
+ "signature one data is empty",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureOne.Data = nil
+ },
+ false,
+ },
+ {
+ "signature two data is empty",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureTwo.Data = []byte{}
+ },
+ false,
+ },
+ {
+ "signatures are identical",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureTwo.Signature = misbehaviour.SignatureOne.Signature
+ },
+ false,
+ },
+ {
+ "data signed is identical",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureTwo.Data = misbehaviour.SignatureOne.Data
+ },
+ false,
+ },
+ {
+ "data type for SignatureOne is unspecified",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureOne.DataType = types.UNSPECIFIED
+ }, false,
+ },
+ {
+ "data type for SignatureTwo is unspecified",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureTwo.DataType = types.UNSPECIFIED
+ }, false,
+ },
+ {
+ "timestamp for SignatureOne is zero",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureOne.Timestamp = 0
+ }, false,
+ },
+ {
+ "timestamp for SignatureTwo is zero",
+ func(misbehaviour *types.Misbehaviour) {
+ misbehaviour.SignatureTwo.Timestamp = 0
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+
+ misbehaviour := solomachine.CreateMisbehaviour()
+ tc.malleateMisbehaviour(misbehaviour)
+
+ err := misbehaviour.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+ }
+}
diff --git a/light-clients/06-solomachine/types/proof.go b/light-clients/06-solomachine/types/proof.go
new file mode 100644
index 00000000..6c2e0b84
--- /dev/null
+++ b/light-clients/06-solomachine/types/proof.go
@@ -0,0 +1,475 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/crypto/types/multisig"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// VerifySignature verifies if the the provided public key generated the signature
+// over the given data. Single and Multi signature public keys are supported.
+// The signature data type must correspond to the public key type. An error is
+// returned if signature verification fails or an invalid SignatureData type is
+// provided.
+func VerifySignature(pubKey cryptotypes.PubKey, signBytes []byte, sigData signing.SignatureData) error {
+ switch pubKey := pubKey.(type) {
+ case multisig.PubKey:
+ data, ok := sigData.(*signing.MultiSignatureData)
+ if !ok {
+ return sdkerrors.Wrapf(ErrSignatureVerificationFailed, "invalid signature data type, expected %T, got %T", (*signing.MultiSignatureData)(nil), data)
+ }
+
+ // The function supplied fulfills the VerifyMultisignature interface. No special
+ // adjustments need to be made to the sign bytes based on the sign mode.
+ if err := pubKey.VerifyMultisignature(func(signing.SignMode) ([]byte, error) {
+ return signBytes, nil
+ }, data); err != nil {
+ return err
+ }
+
+ default:
+ data, ok := sigData.(*signing.SingleSignatureData)
+ if !ok {
+ return sdkerrors.Wrapf(ErrSignatureVerificationFailed, "invalid signature data type, expected %T, got %T", (*signing.SingleSignatureData)(nil), data)
+ }
+
+ if !pubKey.VerifySignature(signBytes, data.Signature) {
+ return ErrSignatureVerificationFailed
+ }
+ }
+
+ return nil
+}
+
+// MisbehaviourSignBytes returns the sign bytes for verification of misbehaviour.
+func MisbehaviourSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ dataType DataType,
+ data []byte) ([]byte, error) {
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: dataType,
+ Data: data,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// HeaderSignBytes returns the sign bytes for verification of misbehaviour.
+func HeaderSignBytes(
+ cdc codec.BinaryMarshaler,
+ header *Header,
+) ([]byte, error) {
+ data := &HeaderData{
+ NewPubKey: header.NewPublicKey,
+ NewDiversifier: header.NewDiversifier,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: header.Sequence,
+ Timestamp: header.Timestamp,
+ Diversifier: header.NewDiversifier,
+ DataType: HEADER,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// ClientStateSignBytes returns the sign bytes for verification of the
+// client state.
+func ClientStateSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ clientState exported.ClientState,
+) ([]byte, error) {
+ dataBz, err := ClientStateDataBytes(cdc, path, clientState)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: CLIENT,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// ClientStateDataBytes returns the client state data bytes used in constructing
+// SignBytes.
+func ClientStateDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ clientState exported.ClientState,
+) ([]byte, error) {
+ any, err := clienttypes.PackClientState(clientState)
+ if err != nil {
+ return nil, err
+ }
+
+ data := &ClientStateData{
+ Path: []byte(path.String()),
+ ClientState: any,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// ConsensusStateSignBytes returns the sign bytes for verification of the
+// consensus state.
+func ConsensusStateSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ consensusState exported.ConsensusState,
+) ([]byte, error) {
+ dataBz, err := ConsensusStateDataBytes(cdc, path, consensusState)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: CONSENSUS,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// ConsensusStateDataBytes returns the consensus state data bytes used in constructing
+// SignBytes.
+func ConsensusStateDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ consensusState exported.ConsensusState,
+) ([]byte, error) {
+ any, err := clienttypes.PackConsensusState(consensusState)
+ if err != nil {
+ return nil, err
+ }
+
+ data := &ConsensusStateData{
+ Path: []byte(path.String()),
+ ConsensusState: any,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// ConnectionStateSignBytes returns the sign bytes for verification of the
+// connection state.
+func ConnectionStateSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ connectionEnd exported.ConnectionI,
+) ([]byte, error) {
+ dataBz, err := ConnectionStateDataBytes(cdc, path, connectionEnd)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: CONNECTION,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// ConnectionStateDataBytes returns the connection state data bytes used in constructing
+// SignBytes.
+func ConnectionStateDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ connectionEnd exported.ConnectionI,
+) ([]byte, error) {
+ connection, ok := connectionEnd.(connectiontypes.ConnectionEnd)
+ if !ok {
+ return nil, sdkerrors.Wrapf(
+ connectiontypes.ErrInvalidConnection,
+ "expected type %T, got %T", connectiontypes.ConnectionEnd{}, connectionEnd,
+ )
+ }
+
+ data := &ConnectionStateData{
+ Path: []byte(path.String()),
+ Connection: &connection,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// ChannelStateSignBytes returns the sign bytes for verification of the
+// channel state.
+func ChannelStateSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ channelEnd exported.ChannelI,
+) ([]byte, error) {
+ dataBz, err := ChannelStateDataBytes(cdc, path, channelEnd)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: CHANNEL,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// ChannelStateDataBytes returns the channel state data bytes used in constructing
+// SignBytes.
+func ChannelStateDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ channelEnd exported.ChannelI,
+) ([]byte, error) {
+ channel, ok := channelEnd.(channeltypes.Channel)
+ if !ok {
+ return nil, sdkerrors.Wrapf(
+ channeltypes.ErrInvalidChannel,
+ "expected channel type %T, got %T", channeltypes.Channel{}, channelEnd)
+ }
+
+ data := &ChannelStateData{
+ Path: []byte(path.String()),
+ Channel: &channel,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// PacketCommitmentSignBytes returns the sign bytes for verification of the
+// packet commitment.
+func PacketCommitmentSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ commitmentBytes []byte,
+) ([]byte, error) {
+ dataBz, err := PacketCommitmentDataBytes(cdc, path, commitmentBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: PACKETCOMMITMENT,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// PacketCommitmentDataBytes returns the packet commitment data bytes used in constructing
+// SignBytes.
+func PacketCommitmentDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ commitmentBytes []byte,
+) ([]byte, error) {
+ data := &PacketCommitmentData{
+ Path: []byte(path.String()),
+ Commitment: commitmentBytes,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// PacketAcknowledgementSignBytes returns the sign bytes for verification of
+// the acknowledgement.
+func PacketAcknowledgementSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ acknowledgement []byte,
+) ([]byte, error) {
+ dataBz, err := PacketAcknowledgementDataBytes(cdc, path, acknowledgement)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: PACKETACKNOWLEDGEMENT,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// PacketAcknowledgementDataBytes returns the packet acknowledgement data bytes used in constructing
+// SignBytes.
+func PacketAcknowledgementDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ acknowledgement []byte,
+) ([]byte, error) {
+ data := &PacketAcknowledgementData{
+ Path: []byte(path.String()),
+ Acknowledgement: acknowledgement,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// PacketReceiptAbsenceSignBytes returns the sign bytes for verification
+// of the absence of an receipt.
+func PacketReceiptAbsenceSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+) ([]byte, error) {
+ dataBz, err := PacketReceiptAbsenceDataBytes(cdc, path)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: PACKETRECEIPTABSENCE,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// PacketReceiptAbsenceDataBytes returns the packet receipt absence data bytes
+// used in constructing SignBytes.
+func PacketReceiptAbsenceDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+) ([]byte, error) {
+ data := &PacketReceiptAbsenceData{
+ Path: []byte(path.String()),
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
+
+// NextSequenceRecvSignBytes returns the sign bytes for verification of the next
+// sequence to be received.
+func NextSequenceRecvSignBytes(
+ cdc codec.BinaryMarshaler,
+ sequence, timestamp uint64,
+ diversifier string,
+ path commitmenttypes.MerklePath,
+ nextSequenceRecv uint64,
+) ([]byte, error) {
+ dataBz, err := NextSequenceRecvDataBytes(cdc, path, nextSequenceRecv)
+ if err != nil {
+ return nil, err
+ }
+
+ signBytes := &SignBytes{
+ Sequence: sequence,
+ Timestamp: timestamp,
+ Diversifier: diversifier,
+ DataType: NEXTSEQUENCERECV,
+ Data: dataBz,
+ }
+
+ return cdc.MarshalBinaryBare(signBytes)
+}
+
+// NextSequenceRecvDataBytes returns the next sequence recv data bytes used in constructing
+// SignBytes.
+func NextSequenceRecvDataBytes(
+ cdc codec.BinaryMarshaler,
+ path commitmenttypes.MerklePath, // nolint: interfacer
+ nextSequenceRecv uint64,
+) ([]byte, error) {
+ data := &NextSequenceRecvData{
+ Path: []byte(path.String()),
+ NextSeqRecv: nextSequenceRecv,
+ }
+
+ dataBz, err := cdc.MarshalBinaryBare(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return dataBz, nil
+}
diff --git a/light-clients/06-solomachine/types/proof_test.go b/light-clients/06-solomachine/types/proof_test.go
new file mode 100644
index 00000000..e2ba679a
--- /dev/null
+++ b/light-clients/06-solomachine/types/proof_test.go
@@ -0,0 +1,102 @@
+package types_test
+
+import (
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestVerifySignature() {
+ cdc := suite.chainA.App.AppCodec()
+ signBytes := []byte("sign bytes")
+
+ singleSignature := suite.solomachine.GenerateSignature(signBytes)
+ singleSigData, err := solomachinetypes.UnmarshalSignatureData(cdc, singleSignature)
+ suite.Require().NoError(err)
+
+ multiSignature := suite.solomachineMulti.GenerateSignature(signBytes)
+ multiSigData, err := solomachinetypes.UnmarshalSignatureData(cdc, multiSignature)
+ suite.Require().NoError(err)
+
+ testCases := []struct {
+ name string
+ publicKey cryptotypes.PubKey
+ sigData signing.SignatureData
+ expPass bool
+ }{
+ {
+ "single signature with regular public key",
+ suite.solomachine.PublicKey,
+ singleSigData,
+ true,
+ },
+ {
+ "multi signature with multisig public key",
+ suite.solomachineMulti.PublicKey,
+ multiSigData,
+ true,
+ },
+ {
+ "single signature with multisig public key",
+ suite.solomachineMulti.PublicKey,
+ singleSigData,
+ false,
+ },
+ {
+ "multi signature with regular public key",
+ suite.solomachine.PublicKey,
+ multiSigData,
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ err := solomachinetypes.VerifySignature(tc.publicKey, signBytes, tc.sigData)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestClientStateSignBytes() {
+ cdc := suite.chainA.App.AppCodec()
+
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+ // success
+ path := solomachine.GetClientStatePath(counterpartyClientIdentifier)
+ bz, err := types.ClientStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, solomachine.ClientState())
+ suite.Require().NoError(err)
+ suite.Require().NotNil(bz)
+
+ // nil client state
+ bz, err = types.ClientStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, nil)
+ suite.Require().Error(err)
+ suite.Require().Nil(bz)
+ }
+}
+
+func (suite *SoloMachineTestSuite) TestConsensusStateSignBytes() {
+ cdc := suite.chainA.App.AppCodec()
+
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+ // success
+ path := solomachine.GetConsensusStatePath(counterpartyClientIdentifier, consensusHeight)
+ bz, err := types.ConsensusStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, solomachine.ConsensusState())
+ suite.Require().NoError(err)
+ suite.Require().NotNil(bz)
+
+ // nil consensus state
+ bz, err = types.ConsensusStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, nil)
+ suite.Require().Error(err)
+ suite.Require().Nil(bz)
+ }
+}
diff --git a/light-clients/06-solomachine/types/proposal_handle.go b/light-clients/06-solomachine/types/proposal_handle.go
new file mode 100644
index 00000000..e38155b2
--- /dev/null
+++ b/light-clients/06-solomachine/types/proposal_handle.go
@@ -0,0 +1,64 @@
+package types
+
+import (
+ "reflect"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CheckSubstituteAndUpdateState verifies that the subject is allowed to be updated by
+// a governance proposal and that the substitute client is a solo machine.
+// It will update the consensus state to the substitute's consensus state and
+// the sequence to the substitute's current sequence. An error is returned if
+// the client has been disallowed to be updated by a governance proposal,
+// the substitute is not a solo machine, or the current public key equals
+// the new public key.
+func (cs ClientState) CheckSubstituteAndUpdateState(
+ ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore,
+ _ sdk.KVStore, substituteClient exported.ClientState,
+ _ exported.Height,
+) (exported.ClientState, error) {
+
+ if !cs.AllowUpdateAfterProposal {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrUpdateClientFailed,
+ "solo machine client is not allowed to updated with a proposal",
+ )
+ }
+
+ substituteClientState, ok := substituteClient.(*ClientState)
+ if !ok {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidClientType, "substitute client state type %T, expected %T", substituteClient, &ClientState{},
+ )
+ }
+
+ subjectPublicKey, err := cs.ConsensusState.GetPubKey()
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "failed to get consensus public key")
+ }
+
+ substitutePublicKey, err := substituteClientState.ConsensusState.GetPubKey()
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "failed to get substitute client public key")
+ }
+
+ if reflect.DeepEqual(subjectPublicKey, substitutePublicKey) {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeader, "subject and substitute have the same public key",
+ )
+ }
+
+ clientState := &cs
+
+ // update to substitute parameters
+ clientState.Sequence = substituteClientState.Sequence
+ clientState.ConsensusState = substituteClientState.ConsensusState
+ clientState.FrozenSequence = 0
+
+ return clientState, nil
+}
diff --git a/light-clients/06-solomachine/types/proposal_handle_test.go b/light-clients/06-solomachine/types/proposal_handle_test.go
new file mode 100644
index 00000000..0113da10
--- /dev/null
+++ b/light-clients/06-solomachine/types/proposal_handle_test.go
@@ -0,0 +1,88 @@
+package types_test
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestCheckSubstituteAndUpdateState() {
+ var (
+ subjectClientState *types.ClientState
+ substituteClientState exported.ClientState
+ )
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "valid substitute", func() {
+ subjectClientState.AllowUpdateAfterProposal = true
+ }, true,
+ },
+ {
+ "subject not allowed to be updated", func() {
+ subjectClientState.AllowUpdateAfterProposal = false
+ }, false,
+ },
+ {
+ "substitute is not the solo machine", func() {
+ substituteClientState = &ibctmtypes.ClientState{}
+ }, false,
+ },
+ {
+ "subject public key is nil", func() {
+ subjectClientState.ConsensusState.PublicKey = nil
+ }, false,
+ },
+
+ {
+ "substitute public key is nil", func() {
+ substituteClientState.(*types.ClientState).ConsensusState.PublicKey = nil
+ }, false,
+ },
+ {
+ "subject and substitute use the same public key", func() {
+ substituteClientState.(*types.ClientState).ConsensusState.PublicKey = subjectClientState.ConsensusState.PublicKey
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ subjectClientState = solomachine.ClientState()
+ subjectClientState.AllowUpdateAfterProposal = true
+ substitute := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "substitute", "testing", 5)
+ substituteClientState = substitute.ClientState()
+
+ tc.malleate()
+
+ subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID)
+ substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute.ClientID)
+
+ updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, nil)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ suite.Require().Equal(substituteClientState.(*types.ClientState).ConsensusState, updatedClient.(*types.ClientState).ConsensusState)
+ suite.Require().Equal(substituteClientState.(*types.ClientState).Sequence, updatedClient.(*types.ClientState).Sequence)
+ suite.Require().Equal(uint64(0), updatedClient.(*types.ClientState).FrozenSequence)
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(updatedClient)
+ }
+ })
+ }
+ }
+}
diff --git a/light-clients/06-solomachine/types/solomachine.go b/light-clients/06-solomachine/types/solomachine.go
new file mode 100644
index 00000000..d3936ef4
--- /dev/null
+++ b/light-clients/06-solomachine/types/solomachine.go
@@ -0,0 +1,43 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// Interface implementation checks.
+var _, _, _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{}, &Header{}, &HeaderData{}
+
+// Data is an interface used for all the signature data bytes proto definitions.
+type Data interface{}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return cs.ConsensusState.UnpackInterfaces(unpacker)
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey))
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (h Header) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(h.NewPublicKey, new(cryptotypes.PubKey))
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (hd HeaderData) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(hd.NewPubKey, new(cryptotypes.PubKey))
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (csd ClientStateData) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(csd.ClientState, new(exported.ClientState))
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (csd ConsensusStateData) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(csd.ConsensusState, new(exported.ConsensusState))
+}
diff --git a/light-clients/06-solomachine/types/solomachine.pb.go b/light-clients/06-solomachine/types/solomachine.pb.go
new file mode 100644
index 00000000..90c4110d
--- /dev/null
+++ b/light-clients/06-solomachine/types/solomachine.pb.go
@@ -0,0 +1,4121 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/lightclients/solomachine/v1/solomachine.proto
+
+package types
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/codec/types"
+ types1 "github.com/cosmos/ibc-go/core/03-connection/types"
+ types2 "github.com/cosmos/ibc-go/core/04-channel/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// DataType defines the type of solo machine proof being created. This is done
+// to preserve uniqueness of different data sign byte encodings.
+type DataType int32
+
+const (
+ // Default State
+ UNSPECIFIED DataType = 0
+ // Data type for client state verification
+ CLIENT DataType = 1
+ // Data type for consensus state verification
+ CONSENSUS DataType = 2
+ // Data type for connection state verification
+ CONNECTION DataType = 3
+ // Data type for channel state verification
+ CHANNEL DataType = 4
+ // Data type for packet commitment verification
+ PACKETCOMMITMENT DataType = 5
+ // Data type for packet acknowledgement verification
+ PACKETACKNOWLEDGEMENT DataType = 6
+ // Data type for packet receipt absence verification
+ PACKETRECEIPTABSENCE DataType = 7
+ // Data type for next sequence recv verification
+ NEXTSEQUENCERECV DataType = 8
+ // Data type for header verification
+ HEADER DataType = 9
+)
+
+var DataType_name = map[int32]string{
+ 0: "DATA_TYPE_UNINITIALIZED_UNSPECIFIED",
+ 1: "DATA_TYPE_CLIENT_STATE",
+ 2: "DATA_TYPE_CONSENSUS_STATE",
+ 3: "DATA_TYPE_CONNECTION_STATE",
+ 4: "DATA_TYPE_CHANNEL_STATE",
+ 5: "DATA_TYPE_PACKET_COMMITMENT",
+ 6: "DATA_TYPE_PACKET_ACKNOWLEDGEMENT",
+ 7: "DATA_TYPE_PACKET_RECEIPT_ABSENCE",
+ 8: "DATA_TYPE_NEXT_SEQUENCE_RECV",
+ 9: "DATA_TYPE_HEADER",
+}
+
+var DataType_value = map[string]int32{
+ "DATA_TYPE_UNINITIALIZED_UNSPECIFIED": 0,
+ "DATA_TYPE_CLIENT_STATE": 1,
+ "DATA_TYPE_CONSENSUS_STATE": 2,
+ "DATA_TYPE_CONNECTION_STATE": 3,
+ "DATA_TYPE_CHANNEL_STATE": 4,
+ "DATA_TYPE_PACKET_COMMITMENT": 5,
+ "DATA_TYPE_PACKET_ACKNOWLEDGEMENT": 6,
+ "DATA_TYPE_PACKET_RECEIPT_ABSENCE": 7,
+ "DATA_TYPE_NEXT_SEQUENCE_RECV": 8,
+ "DATA_TYPE_HEADER": 9,
+}
+
+func (x DataType) String() string {
+ return proto.EnumName(DataType_name, int32(x))
+}
+
+func (DataType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{0}
+}
+
+// ClientState defines a solo machine client that tracks the current consensus
+// state and if the client is frozen.
+type ClientState struct {
+ // latest sequence of the client state
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ // frozen sequence of the solo machine
+ FrozenSequence uint64 `protobuf:"varint,2,opt,name=frozen_sequence,json=frozenSequence,proto3" json:"frozen_sequence,omitempty" yaml:"frozen_sequence"`
+ ConsensusState *ConsensusState `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
+ // when set to true, will allow governance to update a solo machine client.
+ // The client will be unfrozen if it is frozen.
+ AllowUpdateAfterProposal bool `protobuf:"varint,4,opt,name=allow_update_after_proposal,json=allowUpdateAfterProposal,proto3" json:"allow_update_after_proposal,omitempty" yaml:"allow_update_after_proposal"`
+}
+
+func (m *ClientState) Reset() { *m = ClientState{} }
+func (m *ClientState) String() string { return proto.CompactTextString(m) }
+func (*ClientState) ProtoMessage() {}
+func (*ClientState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{0}
+}
+func (m *ClientState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientState.Merge(m, src)
+}
+func (m *ClientState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientState proto.InternalMessageInfo
+
+// ConsensusState defines a solo machine consensus state. The sequence of a
+// consensus state is contained in the "height" key used in storing the
+// consensus state.
+type ConsensusState struct {
+ // public key of the solo machine
+ PublicKey *types.Any `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" yaml:"public_key"`
+ // diversifier allows the same public key to be re-used across different solo
+ // machine clients (potentially on different chains) without being considered
+ // misbehaviour.
+ Diversifier string `protobuf:"bytes,2,opt,name=diversifier,proto3" json:"diversifier,omitempty"`
+ Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *ConsensusState) Reset() { *m = ConsensusState{} }
+func (m *ConsensusState) String() string { return proto.CompactTextString(m) }
+func (*ConsensusState) ProtoMessage() {}
+func (*ConsensusState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{1}
+}
+func (m *ConsensusState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConsensusState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConsensusState.Merge(m, src)
+}
+func (m *ConsensusState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConsensusState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConsensusState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConsensusState proto.InternalMessageInfo
+
+// Header defines a solo machine consensus header
+type Header struct {
+ // sequence to update solo machine public key at
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
+ NewPublicKey *types.Any `protobuf:"bytes,4,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"`
+ NewDiversifier string `protobuf:"bytes,5,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"`
+}
+
+func (m *Header) Reset() { *m = Header{} }
+func (m *Header) String() string { return proto.CompactTextString(m) }
+func (*Header) ProtoMessage() {}
+func (*Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{2}
+}
+func (m *Header) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Header.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Header.Merge(m, src)
+}
+func (m *Header) XXX_Size() int {
+ return m.Size()
+}
+func (m *Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Header proto.InternalMessageInfo
+
+// Misbehaviour defines misbehaviour for a solo machine which consists
+// of a sequence and two signatures over different messages at that sequence.
+type Misbehaviour struct {
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ SignatureOne *SignatureAndData `protobuf:"bytes,3,opt,name=signature_one,json=signatureOne,proto3" json:"signature_one,omitempty" yaml:"signature_one"`
+ SignatureTwo *SignatureAndData `protobuf:"bytes,4,opt,name=signature_two,json=signatureTwo,proto3" json:"signature_two,omitempty" yaml:"signature_two"`
+}
+
+func (m *Misbehaviour) Reset() { *m = Misbehaviour{} }
+func (m *Misbehaviour) String() string { return proto.CompactTextString(m) }
+func (*Misbehaviour) ProtoMessage() {}
+func (*Misbehaviour) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{3}
+}
+func (m *Misbehaviour) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Misbehaviour) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Misbehaviour.Merge(m, src)
+}
+func (m *Misbehaviour) XXX_Size() int {
+ return m.Size()
+}
+func (m *Misbehaviour) XXX_DiscardUnknown() {
+ xxx_messageInfo_Misbehaviour.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo
+
+// SignatureAndData contains a signature and the data signed over to create that
+// signature.
+type SignatureAndData struct {
+ Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"`
+ DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibcgo.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+ Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *SignatureAndData) Reset() { *m = SignatureAndData{} }
+func (m *SignatureAndData) String() string { return proto.CompactTextString(m) }
+func (*SignatureAndData) ProtoMessage() {}
+func (*SignatureAndData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{4}
+}
+func (m *SignatureAndData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureAndData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SignatureAndData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SignatureAndData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureAndData.Merge(m, src)
+}
+func (m *SignatureAndData) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureAndData) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureAndData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureAndData proto.InternalMessageInfo
+
+// TimestampedSignatureData contains the signature data and the timestamp of the
+// signature.
+type TimestampedSignatureData struct {
+ SignatureData []byte `protobuf:"bytes,1,opt,name=signature_data,json=signatureData,proto3" json:"signature_data,omitempty" yaml:"signature_data"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureData{} }
+func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) }
+func (*TimestampedSignatureData) ProtoMessage() {}
+func (*TimestampedSignatureData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{5}
+}
+func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TimestampedSignatureData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TimestampedSignatureData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TimestampedSignatureData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimestampedSignatureData.Merge(m, src)
+}
+func (m *TimestampedSignatureData) XXX_Size() int {
+ return m.Size()
+}
+func (m *TimestampedSignatureData) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimestampedSignatureData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimestampedSignatureData proto.InternalMessageInfo
+
+// SignBytes defines the signed bytes used for signature verification.
+type SignBytes struct {
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"`
+ // type of the data used
+ DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibcgo.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ // marshaled data
+ Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *SignBytes) Reset() { *m = SignBytes{} }
+func (m *SignBytes) String() string { return proto.CompactTextString(m) }
+func (*SignBytes) ProtoMessage() {}
+func (*SignBytes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{6}
+}
+func (m *SignBytes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SignBytes.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SignBytes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignBytes.Merge(m, src)
+}
+func (m *SignBytes) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignBytes) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignBytes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignBytes proto.InternalMessageInfo
+
+// HeaderData returns the SignBytes data for update verification.
+type HeaderData struct {
+ // header public key
+ NewPubKey *types.Any `protobuf:"bytes,1,opt,name=new_pub_key,json=newPubKey,proto3" json:"new_pub_key,omitempty" yaml:"new_pub_key"`
+ // header diversifier
+ NewDiversifier string `protobuf:"bytes,2,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"`
+}
+
+func (m *HeaderData) Reset() { *m = HeaderData{} }
+func (m *HeaderData) String() string { return proto.CompactTextString(m) }
+func (*HeaderData) ProtoMessage() {}
+func (*HeaderData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{7}
+}
+func (m *HeaderData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HeaderData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HeaderData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HeaderData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HeaderData.Merge(m, src)
+}
+func (m *HeaderData) XXX_Size() int {
+ return m.Size()
+}
+func (m *HeaderData) XXX_DiscardUnknown() {
+ xxx_messageInfo_HeaderData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HeaderData proto.InternalMessageInfo
+
+// ClientStateData returns the SignBytes data for client state verification.
+type ClientStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+}
+
+func (m *ClientStateData) Reset() { *m = ClientStateData{} }
+func (m *ClientStateData) String() string { return proto.CompactTextString(m) }
+func (*ClientStateData) ProtoMessage() {}
+func (*ClientStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{8}
+}
+func (m *ClientStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientStateData.Merge(m, src)
+}
+func (m *ClientStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientStateData proto.InternalMessageInfo
+
+// ConsensusStateData returns the SignBytes data for consensus state
+// verification.
+type ConsensusStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
+}
+
+func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} }
+func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) }
+func (*ConsensusStateData) ProtoMessage() {}
+func (*ConsensusStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{9}
+}
+func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConsensusStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConsensusStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConsensusStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConsensusStateData.Merge(m, src)
+}
+func (m *ConsensusStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConsensusStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConsensusStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConsensusStateData proto.InternalMessageInfo
+
+// ConnectionStateData returns the SignBytes data for connection state
+// verification.
+type ConnectionStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Connection *types1.ConnectionEnd `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"`
+}
+
+func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} }
+func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) }
+func (*ConnectionStateData) ProtoMessage() {}
+func (*ConnectionStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{10}
+}
+func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConnectionStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConnectionStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConnectionStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionStateData.Merge(m, src)
+}
+func (m *ConnectionStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConnectionStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionStateData proto.InternalMessageInfo
+
+// ChannelStateData returns the SignBytes data for channel state
+// verification.
+type ChannelStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Channel *types2.Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"`
+}
+
+func (m *ChannelStateData) Reset() { *m = ChannelStateData{} }
+func (m *ChannelStateData) String() string { return proto.CompactTextString(m) }
+func (*ChannelStateData) ProtoMessage() {}
+func (*ChannelStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{11}
+}
+func (m *ChannelStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ChannelStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ChannelStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ChannelStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ChannelStateData.Merge(m, src)
+}
+func (m *ChannelStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ChannelStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ChannelStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChannelStateData proto.InternalMessageInfo
+
+// PacketCommitmentData returns the SignBytes data for packet commitment
+// verification.
+type PacketCommitmentData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Commitment []byte `protobuf:"bytes,2,opt,name=commitment,proto3" json:"commitment,omitempty"`
+}
+
+func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} }
+func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) }
+func (*PacketCommitmentData) ProtoMessage() {}
+func (*PacketCommitmentData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{12}
+}
+func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketCommitmentData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketCommitmentData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketCommitmentData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketCommitmentData.Merge(m, src)
+}
+func (m *PacketCommitmentData) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketCommitmentData) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketCommitmentData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketCommitmentData proto.InternalMessageInfo
+
+func (m *PacketCommitmentData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *PacketCommitmentData) GetCommitment() []byte {
+ if m != nil {
+ return m.Commitment
+ }
+ return nil
+}
+
+// PacketAcknowledgementData returns the SignBytes data for acknowledgement
+// verification.
+type PacketAcknowledgementData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"`
+}
+
+func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgementData{} }
+func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) }
+func (*PacketAcknowledgementData) ProtoMessage() {}
+func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{13}
+}
+func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketAcknowledgementData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketAcknowledgementData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketAcknowledgementData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketAcknowledgementData.Merge(m, src)
+}
+func (m *PacketAcknowledgementData) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketAcknowledgementData) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketAcknowledgementData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketAcknowledgementData proto.InternalMessageInfo
+
+func (m *PacketAcknowledgementData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *PacketAcknowledgementData) GetAcknowledgement() []byte {
+ if m != nil {
+ return m.Acknowledgement
+ }
+ return nil
+}
+
+// PacketReceiptAbsenceData returns the SignBytes data for
+// packet receipt absence verification.
+type PacketReceiptAbsenceData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceData{} }
+func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) }
+func (*PacketReceiptAbsenceData) ProtoMessage() {}
+func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{14}
+}
+func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketReceiptAbsenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketReceiptAbsenceData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketReceiptAbsenceData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketReceiptAbsenceData.Merge(m, src)
+}
+func (m *PacketReceiptAbsenceData) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketReceiptAbsenceData) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketReceiptAbsenceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketReceiptAbsenceData proto.InternalMessageInfo
+
+func (m *PacketReceiptAbsenceData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+// NextSequenceRecvData returns the SignBytes data for verification of the next
+// sequence to be received.
+type NextSequenceRecvData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ NextSeqRecv uint64 `protobuf:"varint,2,opt,name=next_seq_recv,json=nextSeqRecv,proto3" json:"next_seq_recv,omitempty" yaml:"next_seq_recv"`
+}
+
+func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} }
+func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) }
+func (*NextSequenceRecvData) ProtoMessage() {}
+func (*NextSequenceRecvData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_39862ff634781870, []int{15}
+}
+func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NextSequenceRecvData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NextSequenceRecvData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NextSequenceRecvData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NextSequenceRecvData.Merge(m, src)
+}
+func (m *NextSequenceRecvData) XXX_Size() int {
+ return m.Size()
+}
+func (m *NextSequenceRecvData) XXX_DiscardUnknown() {
+ xxx_messageInfo_NextSequenceRecvData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NextSequenceRecvData proto.InternalMessageInfo
+
+func (m *NextSequenceRecvData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 {
+ if m != nil {
+ return m.NextSeqRecv
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("ibcgo.lightclients.solomachine.v1.DataType", DataType_name, DataType_value)
+ proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.solomachine.v1.ClientState")
+ proto.RegisterType((*ConsensusState)(nil), "ibcgo.lightclients.solomachine.v1.ConsensusState")
+ proto.RegisterType((*Header)(nil), "ibcgo.lightclients.solomachine.v1.Header")
+ proto.RegisterType((*Misbehaviour)(nil), "ibcgo.lightclients.solomachine.v1.Misbehaviour")
+ proto.RegisterType((*SignatureAndData)(nil), "ibcgo.lightclients.solomachine.v1.SignatureAndData")
+ proto.RegisterType((*TimestampedSignatureData)(nil), "ibcgo.lightclients.solomachine.v1.TimestampedSignatureData")
+ proto.RegisterType((*SignBytes)(nil), "ibcgo.lightclients.solomachine.v1.SignBytes")
+ proto.RegisterType((*HeaderData)(nil), "ibcgo.lightclients.solomachine.v1.HeaderData")
+ proto.RegisterType((*ClientStateData)(nil), "ibcgo.lightclients.solomachine.v1.ClientStateData")
+ proto.RegisterType((*ConsensusStateData)(nil), "ibcgo.lightclients.solomachine.v1.ConsensusStateData")
+ proto.RegisterType((*ConnectionStateData)(nil), "ibcgo.lightclients.solomachine.v1.ConnectionStateData")
+ proto.RegisterType((*ChannelStateData)(nil), "ibcgo.lightclients.solomachine.v1.ChannelStateData")
+ proto.RegisterType((*PacketCommitmentData)(nil), "ibcgo.lightclients.solomachine.v1.PacketCommitmentData")
+ proto.RegisterType((*PacketAcknowledgementData)(nil), "ibcgo.lightclients.solomachine.v1.PacketAcknowledgementData")
+ proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibcgo.lightclients.solomachine.v1.PacketReceiptAbsenceData")
+ proto.RegisterType((*NextSequenceRecvData)(nil), "ibcgo.lightclients.solomachine.v1.NextSequenceRecvData")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_39862ff634781870)
+}
+
+var fileDescriptor_39862ff634781870 = []byte{
+ // 1361 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x8e, 0xda, 0xd6,
+ 0x13, 0x5f, 0x13, 0xb2, 0x59, 0x86, 0x0d, 0xcb, 0xdf, 0x21, 0x09, 0xeb, 0x44, 0xe0, 0xbf, 0x23,
+ 0xa5, 0xdb, 0x8f, 0x40, 0x37, 0x51, 0xa3, 0x28, 0xad, 0xda, 0x1a, 0xe3, 0x26, 0x24, 0xbb, 0x5e,
+ 0x6a, 0x4c, 0xdb, 0xe4, 0xa2, 0x96, 0x31, 0x67, 0xc1, 0x0a, 0xd8, 0x14, 0x1b, 0x08, 0x95, 0x2a,
+ 0x55, 0xbd, 0x4a, 0x51, 0x2f, 0xfa, 0x02, 0x48, 0x55, 0xab, 0xbe, 0x4b, 0xa4, 0xde, 0x44, 0xea,
+ 0x4d, 0xaf, 0x50, 0x9b, 0xbc, 0x01, 0x4f, 0x50, 0xd9, 0xe7, 0x18, 0xdb, 0xec, 0x2e, 0x49, 0xbf,
+ 0xee, 0xce, 0x99, 0xf9, 0xcd, 0x6f, 0xe6, 0xcc, 0x8c, 0xe7, 0x1c, 0xc3, 0x0d, 0xa3, 0xa1, 0xb7,
+ 0xac, 0x62, 0xc7, 0x68, 0xb5, 0x1d, 0xbd, 0x63, 0x20, 0xd3, 0xb1, 0x8b, 0xb6, 0xd5, 0xb1, 0xba,
+ 0x9a, 0xde, 0x36, 0x4c, 0x54, 0x1c, 0xee, 0x86, 0xb7, 0x85, 0x5e, 0xdf, 0x72, 0x2c, 0xfa, 0xff,
+ 0x9e, 0x51, 0x21, 0x6c, 0x54, 0x08, 0xa3, 0x86, 0xbb, 0xcc, 0xeb, 0x98, 0x57, 0xb7, 0xfa, 0xa8,
+ 0xa8, 0x5b, 0xa6, 0x89, 0x74, 0xc7, 0xb0, 0x4c, 0x97, 0x2e, 0xd8, 0x61, 0x36, 0xe6, 0x4a, 0x18,
+ 0xda, 0xd6, 0x4c, 0x13, 0x75, 0x3c, 0x1c, 0x5e, 0x12, 0x50, 0xa6, 0x65, 0xb5, 0x2c, 0x6f, 0x59,
+ 0x74, 0x57, 0x44, 0xba, 0xdd, 0xb2, 0xac, 0x56, 0x07, 0x15, 0xbd, 0x5d, 0x63, 0x70, 0x58, 0xd4,
+ 0xcc, 0x31, 0x56, 0x71, 0xbf, 0xc6, 0x20, 0x29, 0x78, 0xb1, 0xd5, 0x1c, 0xcd, 0x41, 0x34, 0x03,
+ 0x1b, 0x36, 0xfa, 0x62, 0x80, 0x4c, 0x1d, 0x65, 0x29, 0x96, 0xda, 0x89, 0xcb, 0x8b, 0x3d, 0x2d,
+ 0xc0, 0xd6, 0x61, 0xdf, 0xfa, 0x12, 0x99, 0xea, 0x02, 0x12, 0x73, 0x21, 0x25, 0x66, 0x3e, 0xcb,
+ 0x5f, 0x18, 0x6b, 0xdd, 0xce, 0x6d, 0x6e, 0x09, 0xc0, 0xc9, 0x29, 0x2c, 0xa9, 0xf9, 0x24, 0x43,
+ 0xd8, 0xd2, 0x2d, 0xd3, 0x46, 0xa6, 0x3d, 0xb0, 0x55, 0xdb, 0xf5, 0x99, 0x3d, 0xc5, 0x52, 0x3b,
+ 0xc9, 0xeb, 0xbb, 0x85, 0x97, 0xa6, 0xab, 0x20, 0xf8, 0x96, 0x5e, 0xb0, 0x61, 0xbf, 0x4b, 0x9c,
+ 0x9c, 0x9c, 0xd2, 0x23, 0x58, 0x1a, 0xc1, 0x25, 0xad, 0xd3, 0xb1, 0x46, 0xea, 0xa0, 0xd7, 0xd4,
+ 0x1c, 0xa4, 0x6a, 0x87, 0x0e, 0xea, 0xab, 0xbd, 0xbe, 0xd5, 0xb3, 0x6c, 0xad, 0x93, 0x8d, 0xb3,
+ 0xd4, 0xce, 0x46, 0xe9, 0xea, 0x7c, 0x96, 0xe7, 0x30, 0xe1, 0x0a, 0x30, 0x27, 0x67, 0x3d, 0x6d,
+ 0xdd, 0x53, 0xf2, 0xae, 0xae, 0x4a, 0x54, 0xb7, 0xe3, 0x4f, 0x7e, 0xc8, 0xaf, 0x71, 0x3f, 0x52,
+ 0x90, 0x8a, 0xc6, 0x4a, 0xdf, 0x03, 0xe8, 0x0d, 0x1a, 0x1d, 0x43, 0x57, 0x1f, 0xa1, 0xb1, 0x97,
+ 0xda, 0xe4, 0xf5, 0x4c, 0x01, 0x17, 0xa6, 0xe0, 0x17, 0xa6, 0xc0, 0x9b, 0xe3, 0xd2, 0xf9, 0xf9,
+ 0x2c, 0xff, 0x3f, 0x1c, 0x44, 0x60, 0xc1, 0xc9, 0x09, 0xbc, 0xb9, 0x8f, 0xc6, 0x34, 0x0b, 0xc9,
+ 0xa6, 0x31, 0x44, 0x7d, 0xdb, 0x38, 0x34, 0x50, 0xdf, 0x2b, 0x42, 0x42, 0x0e, 0x8b, 0xe8, 0xcb,
+ 0x90, 0x70, 0x8c, 0x2e, 0xb2, 0x1d, 0xad, 0xdb, 0xf3, 0xf2, 0x1b, 0x97, 0x03, 0x01, 0x09, 0xf2,
+ 0x9b, 0x18, 0xac, 0xdf, 0x45, 0x5a, 0x13, 0xf5, 0x57, 0x56, 0x3d, 0x42, 0x15, 0x5b, 0xa2, 0x72,
+ 0xb5, 0xb6, 0xd1, 0x32, 0x35, 0x67, 0xd0, 0xc7, 0x85, 0xdc, 0x94, 0x03, 0x01, 0x5d, 0x87, 0x94,
+ 0x89, 0x46, 0x6a, 0xe8, 0xe0, 0xf1, 0x15, 0x07, 0xdf, 0x9e, 0xcf, 0xf2, 0xe7, 0xf1, 0xc1, 0xa3,
+ 0x56, 0x9c, 0xbc, 0x69, 0xa2, 0x51, 0x75, 0x71, 0x7e, 0x01, 0xb6, 0x5c, 0x40, 0x38, 0x07, 0xa7,
+ 0xdd, 0x1c, 0x84, 0x1b, 0x62, 0x09, 0xc0, 0xc9, 0x6e, 0x24, 0xe5, 0x40, 0x40, 0x92, 0xf0, 0x4b,
+ 0x0c, 0x36, 0xf7, 0x0d, 0xbb, 0x81, 0xda, 0xda, 0xd0, 0xb0, 0x06, 0x7d, 0x7a, 0x17, 0x12, 0xb8,
+ 0xf9, 0x54, 0xa3, 0xe9, 0xe5, 0x22, 0x51, 0xca, 0xcc, 0x67, 0xf9, 0x34, 0x69, 0x33, 0x5f, 0xc5,
+ 0xc9, 0x1b, 0x78, 0x5d, 0x69, 0x46, 0xb2, 0x17, 0x5b, 0xca, 0x5e, 0x1f, 0xce, 0x2e, 0xd2, 0xa1,
+ 0x5a, 0xa6, 0xdf, 0xec, 0x37, 0x5e, 0xa1, 0xd9, 0x6b, 0xbe, 0x1d, 0x6f, 0x36, 0xcb, 0x9a, 0xa3,
+ 0x95, 0xb2, 0xf3, 0x59, 0x3e, 0x83, 0xe3, 0x88, 0x70, 0x72, 0xf2, 0xe6, 0x62, 0x7f, 0x60, 0x2e,
+ 0xf9, 0x74, 0x46, 0x16, 0x49, 0xfa, 0xbf, 0xe7, 0xd3, 0x19, 0x59, 0x61, 0x9f, 0xca, 0xc8, 0x22,
+ 0xd9, 0x7c, 0x4a, 0x41, 0x7a, 0x99, 0x22, 0xda, 0x22, 0xd4, 0x72, 0x8b, 0x7c, 0x0e, 0x89, 0xa6,
+ 0xe6, 0x68, 0xaa, 0x33, 0xee, 0xe1, 0xec, 0xa5, 0xae, 0xbf, 0xf9, 0x0a, 0x81, 0xba, 0xcc, 0xca,
+ 0xb8, 0x87, 0xc2, 0xc5, 0x59, 0xf0, 0x70, 0xf2, 0x46, 0x93, 0xe8, 0x69, 0x1a, 0xe2, 0xee, 0x9a,
+ 0xf4, 0xa6, 0xb7, 0x8e, 0xb6, 0x74, 0xfc, 0xf8, 0xaf, 0xe3, 0x6b, 0x0a, 0xb2, 0x8a, 0x2f, 0x43,
+ 0xcd, 0xc5, 0xa9, 0xbc, 0x23, 0x7d, 0x08, 0xa9, 0x20, 0x1b, 0x1e, 0xbd, 0x77, 0xae, 0x70, 0x07,
+ 0x47, 0xf5, 0x9c, 0x1c, 0x94, 0xa4, 0x7c, 0x24, 0x84, 0xd8, 0xf1, 0x21, 0xfc, 0x41, 0x41, 0xc2,
+ 0xf5, 0x5b, 0x1a, 0x3b, 0xc8, 0xfe, 0x07, 0xdf, 0xe8, 0xd2, 0xb8, 0x38, 0x75, 0x74, 0x5c, 0x44,
+ 0x8a, 0x10, 0xff, 0xef, 0x8a, 0x70, 0x3a, 0x28, 0x02, 0x39, 0xe3, 0xcf, 0x14, 0x00, 0x1e, 0x42,
+ 0x5e, 0x5a, 0xf6, 0x20, 0x49, 0x3e, 0xfd, 0x97, 0x8e, 0xc9, 0x0b, 0xf3, 0x59, 0x9e, 0x8e, 0x4c,
+ 0x0b, 0x32, 0x27, 0xf1, 0xa8, 0x38, 0x61, 0x4e, 0xc4, 0xfe, 0xe6, 0x9c, 0xf8, 0x0a, 0xb6, 0x42,
+ 0xd7, 0xa4, 0x17, 0x2b, 0x0d, 0xf1, 0x9e, 0xe6, 0xb4, 0x49, 0x4b, 0x7b, 0x6b, 0xba, 0x0a, 0x9b,
+ 0x64, 0x44, 0xe0, 0xab, 0x2d, 0xb6, 0xe2, 0x00, 0x17, 0xe7, 0xb3, 0xfc, 0xb9, 0xc8, 0x58, 0x21,
+ 0x57, 0x57, 0x52, 0x0f, 0x3c, 0x11, 0xf7, 0xdf, 0x52, 0x40, 0x47, 0x2f, 0x94, 0x13, 0x43, 0x78,
+ 0x70, 0xf4, 0x82, 0x5d, 0x15, 0xc5, 0x5f, 0xb8, 0x43, 0x49, 0x2c, 0x8f, 0xe1, 0x9c, 0xb0, 0x78,
+ 0x9c, 0xac, 0x8e, 0xe5, 0x0e, 0x40, 0xf0, 0x8e, 0x21, 0x61, 0xbc, 0x46, 0x1a, 0xcb, 0x7d, 0xc8,
+ 0x14, 0x42, 0xaf, 0x1c, 0x7c, 0xbd, 0x93, 0x9d, 0x68, 0x36, 0xe5, 0x90, 0x29, 0xf1, 0x7c, 0x08,
+ 0x69, 0x01, 0x3f, 0x77, 0x56, 0xbb, 0xbd, 0x05, 0x67, 0xc8, 0xb3, 0x88, 0xf8, 0xcc, 0x45, 0x7c,
+ 0x92, 0x17, 0x93, 0xeb, 0x10, 0x2f, 0x65, 0x1f, 0x4e, 0xfc, 0xdc, 0x83, 0x4c, 0x55, 0xd3, 0x1f,
+ 0x21, 0x47, 0xb0, 0xba, 0x5d, 0xc3, 0xe9, 0x22, 0xd3, 0x39, 0xd1, 0x57, 0xce, 0x3d, 0xa2, 0x8f,
+ 0xf2, 0xdc, 0x6d, 0xca, 0x21, 0x09, 0xf7, 0x00, 0xb6, 0x31, 0x17, 0xaf, 0x3f, 0x32, 0xad, 0x51,
+ 0x07, 0x35, 0x5b, 0x68, 0x25, 0xe1, 0x0e, 0x6c, 0x69, 0x51, 0x28, 0x61, 0x5d, 0x16, 0x73, 0x05,
+ 0xc8, 0x62, 0x6a, 0x19, 0xe9, 0xc8, 0xe8, 0x39, 0x7c, 0xc3, 0x76, 0xa7, 0xc1, 0x49, 0xcc, 0x5c,
+ 0x1b, 0x32, 0x12, 0x7a, 0xec, 0xf8, 0x4f, 0x31, 0x19, 0xe9, 0xc3, 0x13, 0xa3, 0x78, 0x0f, 0xce,
+ 0x9a, 0xe8, 0xb1, 0xe3, 0x3e, 0xe4, 0xd4, 0x3e, 0xd2, 0x87, 0xe4, 0xa5, 0x17, 0xba, 0x0e, 0x22,
+ 0x6a, 0x4e, 0x4e, 0x9a, 0x98, 0xda, 0x65, 0x7d, 0xe3, 0xbb, 0x38, 0x6c, 0xf8, 0xc3, 0x81, 0xbe,
+ 0x05, 0x57, 0xca, 0xbc, 0xc2, 0xab, 0xca, 0x83, 0xaa, 0xa8, 0xd6, 0xa5, 0x8a, 0x54, 0x51, 0x2a,
+ 0xfc, 0x5e, 0xe5, 0xa1, 0x58, 0x56, 0xeb, 0x52, 0xad, 0x2a, 0x0a, 0x95, 0x8f, 0x2a, 0x62, 0x39,
+ 0xbd, 0xc6, 0x6c, 0x4d, 0xa6, 0x6c, 0x32, 0x24, 0xa2, 0xaf, 0xc2, 0x85, 0xc0, 0x52, 0xd8, 0xab,
+ 0x88, 0x92, 0xa2, 0xd6, 0x14, 0x5e, 0x11, 0xd3, 0x14, 0x03, 0x93, 0x29, 0xbb, 0x8e, 0x65, 0xf4,
+ 0x5b, 0xb0, 0x1d, 0xc2, 0x1d, 0x48, 0x35, 0x51, 0xaa, 0xd5, 0x6b, 0x04, 0x1a, 0x63, 0xce, 0x4e,
+ 0xa6, 0x6c, 0x62, 0x21, 0xa6, 0x0b, 0xc0, 0x44, 0xd0, 0x92, 0x28, 0x28, 0x95, 0x03, 0x89, 0xc0,
+ 0x4f, 0x31, 0xa9, 0xc9, 0x94, 0x85, 0x40, 0x4e, 0xef, 0xc0, 0xc5, 0x10, 0xfe, 0x2e, 0x2f, 0x49,
+ 0xe2, 0x1e, 0x01, 0xc7, 0x99, 0xe4, 0x64, 0xca, 0x9e, 0x21, 0x42, 0xfa, 0x1d, 0xb8, 0x14, 0x20,
+ 0xab, 0xbc, 0x70, 0x5f, 0x54, 0x54, 0xe1, 0x60, 0x7f, 0xbf, 0xa2, 0xec, 0x8b, 0x92, 0x92, 0x3e,
+ 0xcd, 0x64, 0x26, 0x53, 0x36, 0x8d, 0x15, 0x81, 0x9c, 0xfe, 0x00, 0xd8, 0x23, 0x66, 0xbc, 0x70,
+ 0x5f, 0x3a, 0xf8, 0x74, 0x4f, 0x2c, 0xdf, 0x11, 0x3d, 0xdb, 0x75, 0x66, 0x7b, 0x32, 0x65, 0xcf,
+ 0x63, 0xed, 0x92, 0x92, 0x7e, 0xff, 0x18, 0x02, 0x59, 0x14, 0xc4, 0x4a, 0x55, 0x51, 0xf9, 0x52,
+ 0x4d, 0x94, 0x04, 0x31, 0x7d, 0x86, 0xc9, 0x4e, 0xa6, 0x6c, 0x06, 0x6b, 0x89, 0x92, 0xe8, 0xe8,
+ 0x9b, 0x70, 0x39, 0xb0, 0x97, 0xc4, 0xcf, 0x14, 0xb5, 0x26, 0x7e, 0x5c, 0x77, 0x55, 0x2e, 0xcd,
+ 0x27, 0xe9, 0x0d, 0x1c, 0xb8, 0xab, 0xf1, 0x15, 0xae, 0x9c, 0x66, 0x21, 0x1d, 0xd8, 0xdd, 0x15,
+ 0xf9, 0xb2, 0x28, 0xa7, 0x13, 0xb8, 0x32, 0x78, 0xc7, 0xc4, 0x9f, 0xfc, 0x94, 0x5b, 0x2b, 0xd5,
+ 0x9f, 0x3e, 0xcf, 0x51, 0xcf, 0x9e, 0xe7, 0xa8, 0xdf, 0x9f, 0xe7, 0xa8, 0xef, 0x5f, 0xe4, 0xd6,
+ 0x9e, 0xbd, 0xc8, 0xad, 0xfd, 0xf6, 0x22, 0xb7, 0xf6, 0xf0, 0xdd, 0x96, 0xe1, 0xb4, 0x07, 0x8d,
+ 0x82, 0x6e, 0x75, 0x8b, 0xba, 0x65, 0x77, 0x2d, 0xbb, 0x68, 0x34, 0xf4, 0x6b, 0xfe, 0xaf, 0xd6,
+ 0x35, 0xff, 0x5f, 0xeb, 0xed, 0x9b, 0xd7, 0xc2, 0xbf, 0x5b, 0xee, 0xfd, 0x62, 0x37, 0xd6, 0xbd,
+ 0x41, 0x76, 0xe3, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xb1, 0x1d, 0x54, 0x9d, 0x0d, 0x00,
+ 0x00,
+}
+
+func (m *ClientState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AllowUpdateAfterProposal {
+ i--
+ if m.AllowUpdateAfterProposal {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.FrozenSequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.FrozenSequence))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConsensusState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Diversifier) > 0 {
+ i -= len(m.Diversifier)
+ copy(dAtA[i:], m.Diversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.PublicKey != nil {
+ {
+ size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Header) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Header) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NewDiversifier) > 0 {
+ i -= len(m.NewDiversifier)
+ copy(dAtA[i:], m.NewDiversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NewPublicKey != nil {
+ {
+ size, err := m.NewPublicKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Signature) > 0 {
+ i -= len(m.Signature)
+ copy(dAtA[i:], m.Signature)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Misbehaviour) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SignatureTwo != nil {
+ {
+ size, err := m.SignatureTwo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.SignatureOne != nil {
+ {
+ size, err := m.SignatureOne.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureAndData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureAndData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureAndData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.DataType != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Signature) > 0 {
+ i -= len(m.Signature)
+ copy(dAtA[i:], m.Signature)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TimestampedSignatureData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TimestampedSignatureData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TimestampedSignatureData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.SignatureData) > 0 {
+ i -= len(m.SignatureData)
+ copy(dAtA[i:], m.SignatureData)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.SignatureData)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SignBytes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignBytes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DataType != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Diversifier) > 0 {
+ i -= len(m.Diversifier)
+ copy(dAtA[i:], m.Diversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HeaderData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HeaderData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HeaderData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NewDiversifier) > 0 {
+ i -= len(m.NewDiversifier)
+ copy(dAtA[i:], m.NewDiversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.NewPubKey != nil {
+ {
+ size, err := m.NewPubKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClientStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConsensusStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConsensusStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConsensusStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConnectionStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConnectionStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConnectionStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Connection != nil {
+ {
+ size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ChannelStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ChannelStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ChannelStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Channel != nil {
+ {
+ size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketCommitmentData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketCommitmentData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketCommitmentData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Commitment) > 0 {
+ i -= len(m.Commitment)
+ copy(dAtA[i:], m.Commitment)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Commitment)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketAcknowledgementData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketAcknowledgementData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketAcknowledgementData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Acknowledgement) > 0 {
+ i -= len(m.Acknowledgement)
+ copy(dAtA[i:], m.Acknowledgement)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Acknowledgement)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketReceiptAbsenceData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketReceiptAbsenceData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketReceiptAbsenceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NextSequenceRecvData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NextSequenceRecvData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NextSequenceRecvData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NextSeqRecv != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.NextSeqRecv))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintSolomachine(dAtA []byte, offset int, v uint64) int {
+ offset -= sovSolomachine(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ClientState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.FrozenSequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.FrozenSequence))
+ }
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.AllowUpdateAfterProposal {
+ n += 2
+ }
+ return n
+}
+
+func (m *ConsensusState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.PublicKey != nil {
+ l = m.PublicKey.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.Diversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *Header) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ l = len(m.Signature)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.NewPublicKey != nil {
+ l = m.NewPublicKey.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.NewDiversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *Misbehaviour) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.SignatureOne != nil {
+ l = m.SignatureOne.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.SignatureTwo != nil {
+ l = m.SignatureTwo.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *SignatureAndData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Signature)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.DataType != 0 {
+ n += 1 + sovSolomachine(uint64(m.DataType))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *TimestampedSignatureData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignatureData)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *SignBytes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ l = len(m.Diversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.DataType != 0 {
+ n += 1 + sovSolomachine(uint64(m.DataType))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *HeaderData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NewPubKey != nil {
+ l = m.NewPubKey.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.NewDiversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ClientStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ConsensusStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ConnectionStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Connection != nil {
+ l = m.Connection.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ChannelStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Channel != nil {
+ l = m.Channel.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *PacketCommitmentData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.Commitment)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *PacketAcknowledgementData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.Acknowledgement)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *PacketReceiptAbsenceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *NextSequenceRecvData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.NextSeqRecv != 0 {
+ n += 1 + sovSolomachine(uint64(m.NextSeqRecv))
+ }
+ return n
+}
+
+func sovSolomachine(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozSolomachine(x uint64) (n int) {
+ return sovSolomachine(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ClientState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FrozenSequence", wireType)
+ }
+ m.FrozenSequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.FrozenSequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &ConsensusState{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterProposal", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowUpdateAfterProposal = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConsensusState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PublicKey == nil {
+ m.PublicKey = &types.Any{}
+ }
+ if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Diversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Header) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Header: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
+ if m.Signature == nil {
+ m.Signature = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewPublicKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NewPublicKey == nil {
+ m.NewPublicKey = &types.Any{}
+ }
+ if err := m.NewPublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NewDiversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Misbehaviour) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureOne", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SignatureOne == nil {
+ m.SignatureOne = &SignatureAndData{}
+ }
+ if err := m.SignatureOne.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureTwo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SignatureTwo == nil {
+ m.SignatureTwo = &SignatureAndData{}
+ }
+ if err := m.SignatureTwo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureAndData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureAndData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureAndData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
+ if m.Signature == nil {
+ m.Signature = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType)
+ }
+ m.DataType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DataType |= DataType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TimestampedSignatureData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimestampedSignatureData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimestampedSignatureData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureData", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignatureData = append(m.SignatureData[:0], dAtA[iNdEx:postIndex]...)
+ if m.SignatureData == nil {
+ m.SignatureData = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignBytes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignBytes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignBytes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Diversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType)
+ }
+ m.DataType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DataType |= DataType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HeaderData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HeaderData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HeaderData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewPubKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NewPubKey == nil {
+ m.NewPubKey = &types.Any{}
+ }
+ if err := m.NewPubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NewDiversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClientStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConsensusStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConsensusStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConsensusStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConnectionStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConnectionStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConnectionStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Connection == nil {
+ m.Connection = &types1.ConnectionEnd{}
+ }
+ if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ChannelStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ChannelStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ChannelStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Channel == nil {
+ m.Channel = &types2.Channel{}
+ }
+ if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketCommitmentData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketCommitmentData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketCommitmentData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...)
+ if m.Commitment == nil {
+ m.Commitment = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketAcknowledgementData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketAcknowledgementData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketAcknowledgementData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...)
+ if m.Acknowledgement == nil {
+ m.Acknowledgement = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketReceiptAbsenceData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketReceiptAbsenceData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketReceiptAbsenceData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NextSequenceRecvData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NextSequenceRecvData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NextSequenceRecvData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextSeqRecv", wireType)
+ }
+ m.NextSeqRecv = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextSeqRecv |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSolomachine(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthSolomachine
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupSolomachine
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSolomachine
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthSolomachine = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSolomachine = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSolomachine = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/light-clients/06-solomachine/types/solomachine_test.go b/light-clients/06-solomachine/types/solomachine_test.go
new file mode 100644
index 00000000..50555e45
--- /dev/null
+++ b/light-clients/06-solomachine/types/solomachine_test.go
@@ -0,0 +1,113 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/testutil/testdata"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+type SoloMachineTestSuite struct {
+ suite.Suite
+
+ solomachine *ibctesting.Solomachine // singlesig public key
+ solomachineMulti *ibctesting.Solomachine // multisig public key
+ coordinator *ibctesting.Coordinator
+
+ // testing chain used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+
+ store sdk.KVStore
+}
+
+func (suite *SoloMachineTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+
+ suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1)
+ suite.solomachineMulti = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinemulti", "testing", 4)
+
+ suite.store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), exported.Solomachine)
+}
+
+func TestSoloMachineTestSuite(t *testing.T) {
+ suite.Run(t, new(SoloMachineTestSuite))
+}
+
+func (suite *SoloMachineTestSuite) GetSequenceFromStore() uint64 {
+ bz := suite.store.Get(host.ClientStateKey())
+ suite.Require().NotNil(bz)
+
+ var clientState exported.ClientState
+ err := suite.chainA.Codec.UnmarshalInterface(bz, &clientState)
+ suite.Require().NoError(err)
+ return clientState.GetLatestHeight().GetRevisionHeight()
+}
+
+func (suite *SoloMachineTestSuite) GetInvalidProof() []byte {
+ invalidProof, err := suite.chainA.Codec.MarshalBinaryBare(&types.TimestampedSignatureData{Timestamp: suite.solomachine.Time})
+ suite.Require().NoError(err)
+
+ return invalidProof
+}
+
+func TestUnpackInterfaces_Header(t *testing.T) {
+ registry := testdata.NewTestInterfaceRegistry()
+ cryptocodec.RegisterInterfaces(registry)
+
+ pk := secp256k1.GenPrivKey().PubKey().(cryptotypes.PubKey)
+ any, err := codectypes.NewAnyWithValue(pk)
+ require.NoError(t, err)
+
+ header := types.Header{
+ NewPublicKey: any,
+ }
+ bz, err := header.Marshal()
+ require.NoError(t, err)
+
+ var header2 types.Header
+ err = header2.Unmarshal(bz)
+ require.NoError(t, err)
+
+ err = codectypes.UnpackInterfaces(header2, registry)
+ require.NoError(t, err)
+
+ require.Equal(t, pk, header2.NewPublicKey.GetCachedValue())
+}
+
+func TestUnpackInterfaces_HeaderData(t *testing.T) {
+ registry := testdata.NewTestInterfaceRegistry()
+ cryptocodec.RegisterInterfaces(registry)
+
+ pk := secp256k1.GenPrivKey().PubKey().(cryptotypes.PubKey)
+ any, err := codectypes.NewAnyWithValue(pk)
+ require.NoError(t, err)
+
+ hd := types.HeaderData{
+ NewPubKey: any,
+ }
+ bz, err := hd.Marshal()
+ require.NoError(t, err)
+
+ var hd2 types.HeaderData
+ err = hd2.Unmarshal(bz)
+ require.NoError(t, err)
+
+ err = codectypes.UnpackInterfaces(hd2, registry)
+ require.NoError(t, err)
+
+ require.Equal(t, pk, hd2.NewPubKey.GetCachedValue())
+}
diff --git a/light-clients/06-solomachine/types/update.go b/light-clients/06-solomachine/types/update.go
new file mode 100644
index 00000000..4cf31fd9
--- /dev/null
+++ b/light-clients/06-solomachine/types/update.go
@@ -0,0 +1,89 @@
+package types
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CheckHeaderAndUpdateState checks if the provided header is valid and updates
+// the consensus state if appropriate. It returns an error if:
+// - the header provided is not parseable to a solo machine header
+// - the header sequence does not match the current sequence
+// - the header timestamp is less than the consensus state timestamp
+// - the currently registered public key did not provide the update signature
+func (cs ClientState) CheckHeaderAndUpdateState(
+ ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
+ header exported.Header,
+) (exported.ClientState, exported.ConsensusState, error) {
+ smHeader, ok := header.(*Header)
+ if !ok {
+ return nil, nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeader, "header type %T, expected %T", header, &Header{},
+ )
+ }
+
+ if err := checkHeader(cdc, &cs, smHeader); err != nil {
+ return nil, nil, err
+ }
+
+ clientState, consensusState := update(&cs, smHeader)
+ return clientState, consensusState, nil
+}
+
+// checkHeader checks if the Solo Machine update signature is valid.
+func checkHeader(cdc codec.BinaryMarshaler, clientState *ClientState, header *Header) error {
+ // assert update sequence is current sequence
+ if header.Sequence != clientState.Sequence {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeader,
+ "header sequence does not match the client state sequence (%d != %d)", header.Sequence, clientState.Sequence,
+ )
+ }
+
+ // assert update timestamp is not less than current consensus state timestamp
+ if header.Timestamp < clientState.ConsensusState.Timestamp {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeader,
+ "header timestamp is less than to the consensus state timestamp (%d < %d)", header.Timestamp, clientState.ConsensusState.Timestamp,
+ )
+ }
+
+ // assert currently registered public key signed over the new public key with correct sequence
+ data, err := HeaderSignBytes(cdc, header)
+ if err != nil {
+ return err
+ }
+
+ sigData, err := UnmarshalSignatureData(cdc, header.Signature)
+ if err != nil {
+ return err
+ }
+
+ publicKey, err := clientState.ConsensusState.GetPubKey()
+ if err != nil {
+ return err
+ }
+
+ if err := VerifySignature(publicKey, data, sigData); err != nil {
+ return sdkerrors.Wrap(ErrInvalidHeader, err.Error())
+ }
+
+ return nil
+}
+
+// update the consensus state to the new public key and an incremented sequence
+func update(clientState *ClientState, header *Header) (*ClientState, *ConsensusState) {
+ consensusState := &ConsensusState{
+ PublicKey: header.NewPublicKey,
+ Diversifier: header.NewDiversifier,
+ Timestamp: header.Timestamp,
+ }
+
+ // increment sequence number
+ clientState.Sequence++
+ clientState.ConsensusState = consensusState
+ return clientState, consensusState
+}
diff --git a/light-clients/06-solomachine/types/update_test.go b/light-clients/06-solomachine/types/update_test.go
new file mode 100644
index 00000000..e49992cb
--- /dev/null
+++ b/light-clients/06-solomachine/types/update_test.go
@@ -0,0 +1,181 @@
+package types_test
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() {
+ var (
+ clientState exported.ClientState
+ header exported.Header
+ )
+
+ // test singlesig and multisig public keys
+ for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
+
+ testCases := []struct {
+ name string
+ setup func()
+ expPass bool
+ }{
+ {
+ "successful update",
+ func() {
+ clientState = solomachine.ClientState()
+ header = solomachine.CreateHeader()
+ },
+ true,
+ },
+ {
+ "wrong client state type",
+ func() {
+ clientState = &ibctmtypes.ClientState{}
+ header = solomachine.CreateHeader()
+ },
+ false,
+ },
+ {
+ "invalid header type",
+ func() {
+ clientState = solomachine.ClientState()
+ header = &ibctmtypes.Header{}
+ },
+ false,
+ },
+ {
+ "wrong sequence in header",
+ func() {
+ clientState = solomachine.ClientState()
+ // store in temp before assigning to interface type
+ h := solomachine.CreateHeader()
+ h.Sequence++
+ header = h
+ },
+ false,
+ },
+ {
+ "invalid header Signature",
+ func() {
+ clientState = solomachine.ClientState()
+ h := solomachine.CreateHeader()
+ h.Signature = suite.GetInvalidProof()
+ header = h
+ }, false,
+ },
+ {
+ "invalid timestamp in header",
+ func() {
+ clientState = solomachine.ClientState()
+ h := solomachine.CreateHeader()
+ h.Timestamp--
+ header = h
+ }, false,
+ },
+ {
+ "signature uses wrong sequence",
+ func() {
+ clientState = solomachine.ClientState()
+ solomachine.Sequence++
+ header = solomachine.CreateHeader()
+ },
+ false,
+ },
+ {
+ "signature uses new pubkey to sign",
+ func() {
+ // store in temp before assinging to interface type
+ cs := solomachine.ClientState()
+ h := solomachine.CreateHeader()
+
+ publicKey, err := codectypes.NewAnyWithValue(solomachine.PublicKey)
+ suite.NoError(err)
+
+ data := &types.HeaderData{
+ NewPubKey: publicKey,
+ NewDiversifier: h.NewDiversifier,
+ }
+
+ dataBz, err := suite.chainA.Codec.MarshalBinaryBare(data)
+ suite.Require().NoError(err)
+
+ // generate invalid signature
+ signBytes := &types.SignBytes{
+ Sequence: cs.Sequence,
+ Timestamp: solomachine.Time,
+ Diversifier: solomachine.Diversifier,
+ DataType: types.CLIENT,
+ Data: dataBz,
+ }
+
+ signBz, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ suite.Require().NoError(err)
+
+ sig := solomachine.GenerateSignature(signBz)
+ suite.Require().NoError(err)
+ h.Signature = sig
+
+ clientState = cs
+ header = h
+
+ },
+ false,
+ },
+ {
+ "signature signs over old pubkey",
+ func() {
+ // store in temp before assinging to interface type
+ cs := solomachine.ClientState()
+ oldPubKey := solomachine.PublicKey
+ h := solomachine.CreateHeader()
+
+ // generate invalid signature
+ data := append(sdk.Uint64ToBigEndian(cs.Sequence), oldPubKey.Bytes()...)
+ sig := solomachine.GenerateSignature(data)
+ h.Signature = sig
+
+ clientState = cs
+ header = h
+ },
+ false,
+ },
+ {
+ "consensus state public key is nil",
+ func() {
+ cs := solomachine.ClientState()
+ cs.ConsensusState.PublicKey = nil
+ clientState = cs
+ header = solomachine.CreateHeader()
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ // setup test
+ tc.setup()
+
+ clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(suite.chainA.GetContext(), suite.chainA.Codec, suite.store, header)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(header.(*types.Header).NewPublicKey, clientState.(*types.ClientState).ConsensusState.PublicKey)
+ suite.Require().Equal(uint64(0), clientState.(*types.ClientState).FrozenSequence)
+ suite.Require().Equal(header.(*types.Header).Sequence+1, clientState.(*types.ClientState).Sequence)
+ suite.Require().Equal(consensusState, clientState.(*types.ClientState).ConsensusState)
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(clientState)
+ suite.Require().Nil(consensusState)
+ }
+ })
+ }
+ }
+}
diff --git a/light-clients/07-tendermint/doc.go b/light-clients/07-tendermint/doc.go
new file mode 100644
index 00000000..26aa430a
--- /dev/null
+++ b/light-clients/07-tendermint/doc.go
@@ -0,0 +1,5 @@
+/*
+Package tendermint implements a concrete `ConsensusState`, `Header`,
+`Misbehaviour` and `Equivocation` types for the Tendermint consensus light client.
+*/
+package tendermint
diff --git a/light-clients/07-tendermint/module.go b/light-clients/07-tendermint/module.go
new file mode 100644
index 00000000..4c5cc2f9
--- /dev/null
+++ b/light-clients/07-tendermint/module.go
@@ -0,0 +1,10 @@
+package tendermint
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+// Name returns the IBC client name
+func Name() string {
+ return types.SubModuleName
+}
diff --git a/light-clients/07-tendermint/types/client_state.go b/light-clients/07-tendermint/types/client_state.go
new file mode 100644
index 00000000..c2bb5239
--- /dev/null
+++ b/light-clients/07-tendermint/types/client_state.go
@@ -0,0 +1,532 @@
+package types
+
+import (
+ "strings"
+ "time"
+
+ ics23 "github.com/confio/ics23/go"
+ "github.com/tendermint/tendermint/light"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.ClientState = (*ClientState)(nil)
+
+// NewClientState creates a new ClientState instance
+func NewClientState(
+ chainID string, trustLevel Fraction,
+ trustingPeriod, ubdPeriod, maxClockDrift time.Duration,
+ latestHeight clienttypes.Height, specs []*ics23.ProofSpec,
+ upgradePath []string, allowUpdateAfterExpiry, allowUpdateAfterMisbehaviour bool,
+) *ClientState {
+ return &ClientState{
+ ChainId: chainID,
+ TrustLevel: trustLevel,
+ TrustingPeriod: trustingPeriod,
+ UnbondingPeriod: ubdPeriod,
+ MaxClockDrift: maxClockDrift,
+ LatestHeight: latestHeight,
+ FrozenHeight: clienttypes.ZeroHeight(),
+ ProofSpecs: specs,
+ UpgradePath: upgradePath,
+ AllowUpdateAfterExpiry: allowUpdateAfterExpiry,
+ AllowUpdateAfterMisbehaviour: allowUpdateAfterMisbehaviour,
+ }
+}
+
+// GetChainID returns the chain-id
+func (cs ClientState) GetChainID() string {
+ return cs.ChainId
+}
+
+// ClientType is tendermint.
+func (cs ClientState) ClientType() string {
+ return exported.Tendermint
+}
+
+// GetLatestHeight returns latest block height.
+func (cs ClientState) GetLatestHeight() exported.Height {
+ return cs.LatestHeight
+}
+
+// IsFrozen returns true if the frozen height has been set.
+func (cs ClientState) IsFrozen() bool {
+ return !cs.FrozenHeight.IsZero()
+}
+
+// GetFrozenHeight returns the height at which client is frozen
+// NOTE: FrozenHeight is zero if client is unfrozen
+func (cs ClientState) GetFrozenHeight() exported.Height {
+ return cs.FrozenHeight
+}
+
+// IsExpired returns whether or not the client has passed the trusting period since the last
+// update (in which case no headers are considered valid).
+func (cs ClientState) IsExpired(latestTimestamp, now time.Time) bool {
+ expirationTime := latestTimestamp.Add(cs.TrustingPeriod)
+ return !expirationTime.After(now)
+}
+
+// Validate performs a basic validation of the client state fields.
+func (cs ClientState) Validate() error {
+ if strings.TrimSpace(cs.ChainId) == "" {
+ return sdkerrors.Wrap(ErrInvalidChainID, "chain id cannot be empty string")
+ }
+ if err := light.ValidateTrustLevel(cs.TrustLevel.ToTendermint()); err != nil {
+ return err
+ }
+ if cs.TrustingPeriod == 0 {
+ return sdkerrors.Wrap(ErrInvalidTrustingPeriod, "trusting period cannot be zero")
+ }
+ if cs.UnbondingPeriod == 0 {
+ return sdkerrors.Wrap(ErrInvalidUnbondingPeriod, "unbonding period cannot be zero")
+ }
+ if cs.MaxClockDrift == 0 {
+ return sdkerrors.Wrap(ErrInvalidMaxClockDrift, "max clock drift cannot be zero")
+ }
+ if cs.LatestHeight.RevisionHeight == 0 {
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "tendermint revision height cannot be zero")
+ }
+ if cs.TrustingPeriod >= cs.UnbondingPeriod {
+ return sdkerrors.Wrapf(
+ ErrInvalidTrustingPeriod,
+ "trusting period (%s) should be < unbonding period (%s)", cs.TrustingPeriod, cs.UnbondingPeriod,
+ )
+ }
+
+ if cs.ProofSpecs == nil {
+ return sdkerrors.Wrap(ErrInvalidProofSpecs, "proof specs cannot be nil for tm client")
+ }
+ for i, spec := range cs.ProofSpecs {
+ if spec == nil {
+ return sdkerrors.Wrapf(ErrInvalidProofSpecs, "proof spec cannot be nil at index: %d", i)
+ }
+ }
+ // UpgradePath may be empty, but if it isn't, each key must be non-empty
+ for i, k := range cs.UpgradePath {
+ if strings.TrimSpace(k) == "" {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "key in upgrade path at index %d cannot be empty", i)
+ }
+ }
+
+ return nil
+}
+
+// GetProofSpecs returns the format the client expects for proof verification
+// as a string array specifying the proof type for each position in chained proof
+func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec {
+ return cs.ProofSpecs
+}
+
+// ZeroCustomFields returns a ClientState that is a copy of the current ClientState
+// with all client customizable fields zeroed out
+func (cs ClientState) ZeroCustomFields() exported.ClientState {
+ // copy over all chain-specified fields
+ // and leave custom fields empty
+ return &ClientState{
+ ChainId: cs.ChainId,
+ UnbondingPeriod: cs.UnbondingPeriod,
+ LatestHeight: cs.LatestHeight,
+ ProofSpecs: cs.ProofSpecs,
+ UpgradePath: cs.UpgradePath,
+ }
+}
+
+// Initialize will check that initial consensus state is a Tendermint consensus state
+// and will store ProcessedTime for initial consensus state as ctx.BlockTime()
+func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryMarshaler, clientStore sdk.KVStore, consState exported.ConsensusState) error {
+ if _, ok := consState.(*ConsensusState); !ok {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid initial consensus state. expected type: %T, got: %T",
+ &ConsensusState{}, consState)
+ }
+ // set processed time with initial consensus state height equal to initial client state's latest height
+ SetProcessedTime(clientStore, cs.GetLatestHeight(), uint64(ctx.BlockTime().UnixNano()))
+ return nil
+}
+
+// VerifyClientState verifies a proof of the client state of the running chain
+// stored on the target machine
+func (cs ClientState) VerifyClientState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ prefix exported.Prefix,
+ counterpartyClientIdentifier string,
+ proof []byte,
+ clientState exported.ClientState,
+) error {
+ merkleProof, provingConsensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier))
+ path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath)
+ if err != nil {
+ return err
+ }
+
+ if clientState == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "client state cannot be empty")
+ }
+
+ _, ok := clientState.(*ClientState)
+ if !ok {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid client type %T, expected %T", clientState, &ClientState{})
+ }
+
+ bz, err := cdc.MarshalInterface(clientState)
+ if err != nil {
+ return err
+ }
+
+ return merkleProof.VerifyMembership(cs.ProofSpecs, provingConsensusState.GetRoot(), path, bz)
+}
+
+// VerifyClientConsensusState verifies a proof of the consensus state of the
+// Tendermint client stored on the target machine.
+func (cs ClientState) VerifyClientConsensusState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ counterpartyClientIdentifier string,
+ consensusHeight exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+ consensusState exported.ConsensusState,
+) error {
+ merkleProof, provingConsensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight))
+ path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath)
+ if err != nil {
+ return err
+ }
+
+ if consensusState == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be empty")
+ }
+
+ _, ok := consensusState.(*ConsensusState)
+ if !ok {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid consensus type %T, expected %T", consensusState, &ConsensusState{})
+ }
+
+ bz, err := cdc.MarshalInterface(consensusState)
+ if err != nil {
+ return err
+ }
+
+ if err := merkleProof.VerifyMembership(cs.ProofSpecs, provingConsensusState.GetRoot(), path, bz); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyConnectionState verifies a proof of the connection state of the
+// specified connection end stored on the target machine.
+func (cs ClientState) VerifyConnectionState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+ connectionID string,
+ connectionEnd exported.ConnectionI,
+) error {
+ merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connectionID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath)
+ if err != nil {
+ return err
+ }
+
+ connection, ok := connectionEnd.(connectiontypes.ConnectionEnd)
+ if !ok {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid connection type %T", connectionEnd)
+ }
+
+ bz, err := cdc.MarshalBinaryBare(&connection)
+ if err != nil {
+ return err
+ }
+
+ if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyChannelState verifies a proof of the channel state of the specified
+// channel end, under the specified port, stored on the target machine.
+func (cs ClientState) VerifyChannelState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ channel exported.ChannelI,
+) error {
+ merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, channelPath)
+ if err != nil {
+ return err
+ }
+
+ channelEnd, ok := channel.(channeltypes.Channel)
+ if !ok {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid channel type %T", channel)
+ }
+
+ bz, err := cdc.MarshalBinaryBare(&channelEnd)
+ if err != nil {
+ return err
+ }
+
+ if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
+// the specified port, specified channel, and specified sequence.
+func (cs ClientState) VerifyPacketCommitment(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ commitmentBytes []byte,
+) error {
+ merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ // check delay period has passed
+ if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ return err
+ }
+
+ commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, sequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath)
+ if err != nil {
+ return err
+ }
+
+ if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, commitmentBytes); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyPacketAcknowledgement verifies a proof of an incoming packet
+// acknowledgement at the specified port, specified channel, and specified sequence.
+func (cs ClientState) VerifyPacketAcknowledgement(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ acknowledgement []byte,
+) error {
+ merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ // check delay period has passed
+ if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ return err
+ }
+
+ ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, sequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, ackPath)
+ if err != nil {
+ return err
+ }
+
+ if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, channeltypes.CommitAcknowledgement(acknowledgement)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyPacketReceiptAbsence verifies a proof of the absence of an
+// incoming packet receipt at the specified port, specified channel, and
+// specified sequence.
+func (cs ClientState) VerifyPacketReceiptAbsence(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+) error {
+ merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ // check delay period has passed
+ if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ return err
+ }
+
+ receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, sequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath)
+ if err != nil {
+ return err
+ }
+
+ if err := merkleProof.VerifyNonMembership(cs.ProofSpecs, consensusState.GetRoot(), path); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
+// received of the specified channel at the specified port.
+func (cs ClientState) VerifyNextSequenceRecv(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ height exported.Height,
+ currentTimestamp uint64,
+ delayPeriod uint64,
+ prefix exported.Prefix,
+ proof []byte,
+ portID,
+ channelID string,
+ nextSequenceRecv uint64,
+) error {
+ merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof)
+ if err != nil {
+ return err
+ }
+
+ // check delay period has passed
+ if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ return err
+ }
+
+ nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath)
+ if err != nil {
+ return err
+ }
+
+ bz := sdk.Uint64ToBigEndian(nextSequenceRecv)
+
+ if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// verifyDelayPeriodPassed will ensure that at least delayPeriod amount of time has passed since consensus state was submitted
+// before allowing verification to continue.
+func verifyDelayPeriodPassed(store sdk.KVStore, proofHeight exported.Height, currentTimestamp, delayPeriod uint64) error {
+ // check that executing chain's timestamp has passed consensusState's processed time + delay period
+ processedTime, ok := GetProcessedTime(store, proofHeight)
+ if !ok {
+ return sdkerrors.Wrapf(ErrProcessedTimeNotFound, "processed time not found for height: %s", proofHeight)
+ }
+ validTime := processedTime + delayPeriod
+ // NOTE: delay period is inclusive, so if currentTimestamp is validTime, then we return no error
+ if validTime > currentTimestamp {
+ return sdkerrors.Wrapf(ErrDelayPeriodNotPassed, "cannot verify packet until time: %d, current time: %d",
+ validTime, currentTimestamp)
+ }
+ return nil
+}
+
+// produceVerificationArgs perfoms the basic checks on the arguments that are
+// shared between the verification functions and returns the unmarshalled
+// merkle proof, the consensus state and an error if one occurred.
+func produceVerificationArgs(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ cs ClientState,
+ height exported.Height,
+ prefix exported.Prefix,
+ proof []byte,
+) (merkleProof commitmenttypes.MerkleProof, consensusState *ConsensusState, err error) {
+ if cs.GetLatestHeight().LT(height) {
+ return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf(
+ sdkerrors.ErrInvalidHeight,
+ "client state height < proof height (%d < %d)", cs.GetLatestHeight(), height,
+ )
+ }
+
+ if cs.IsFrozen() && !cs.FrozenHeight.GT(height) {
+ return commitmenttypes.MerkleProof{}, nil, clienttypes.ErrClientFrozen
+ }
+
+ if prefix == nil {
+ return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty")
+ }
+
+ _, ok := prefix.(*commitmenttypes.MerklePrefix)
+ if !ok {
+ return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidPrefix, "invalid prefix type %T, expected *MerklePrefix", prefix)
+ }
+
+ if proof == nil {
+ return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "proof cannot be empty")
+ }
+
+ if err = cdc.UnmarshalBinaryBare(proof, &merkleProof); err != nil {
+ return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "failed to unmarshal proof into commitment merkle proof")
+ }
+
+ consensusState, err = GetConsensusState(store, cdc, height)
+ if err != nil {
+ return commitmenttypes.MerkleProof{}, nil, err
+ }
+
+ return merkleProof, consensusState, nil
+}
diff --git a/light-clients/07-tendermint/types/client_state_test.go b/light-clients/07-tendermint/types/client_state_test.go
new file mode 100644
index 00000000..744b4729
--- /dev/null
+++ b/light-clients/07-tendermint/types/client_state_test.go
@@ -0,0 +1,779 @@
+package types_test
+
+import (
+ "time"
+
+ ics23 "github.com/confio/ics23/go"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+const (
+ testClientID = "clientidone"
+ testConnectionID = "connectionid"
+ testPortID = "testportid"
+ testChannelID = "testchannelid"
+ testSequence = 1
+)
+
+var (
+ invalidProof = []byte("invalid proof")
+)
+
+func (suite *TendermintTestSuite) TestValidate() {
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ expPass bool
+ }{
+ {
+ name: "valid client",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: true,
+ },
+ {
+ name: "valid client with nil upgrade path",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), nil, false, false),
+ expPass: true,
+ },
+ {
+ name: "invalid chainID",
+ clientState: types.NewClientState(" ", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "invalid trust level",
+ clientState: types.NewClientState(chainID, types.Fraction{Numerator: 0, Denominator: 1}, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "invalid trusting period",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, 0, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "invalid unbonding period",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, 0, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "invalid max clock drift",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, 0, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "invalid height",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "trusting period not less than unbonding period",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, ubdPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "proof specs is nil",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, ubdPeriod, ubdPeriod, maxClockDrift, height, nil, upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "proof specs contains nil",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, ubdPeriod, ubdPeriod, maxClockDrift, height, []*ics23.ProofSpec{ics23.TendermintSpec, nil}, upgradePath, false, false),
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ err := tc.clientState.Validate()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *TendermintTestSuite) TestInitialize() {
+
+ testCases := []struct {
+ name string
+ consensusState exported.ConsensusState
+ expPass bool
+ }{
+ {
+ name: "valid consensus",
+ consensusState: &types.ConsensusState{},
+ expPass: true,
+ },
+ {
+ name: "invalid consensus: consensus state is solomachine consensus",
+ consensusState: ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState(),
+ expPass: false,
+ },
+ }
+
+ clientA, err := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ clientState := suite.chainA.GetClientState(clientA)
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ for _, tc := range testCases {
+ err := clientState.Initialize(suite.chainA.GetContext(), suite.chainA.Codec, store, tc.consensusState)
+ if tc.expPass {
+ suite.Require().NoError(err, "valid case returned an error")
+ } else {
+ suite.Require().Error(err, "invalid case didn't return an error")
+ }
+ }
+}
+
+func (suite *TendermintTestSuite) TestVerifyClientConsensusState() {
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ consensusState *types.ConsensusState
+ prefix commitmenttypes.MerklePrefix
+ proof []byte
+ expPass bool
+ }{
+ // FIXME: uncomment
+ // {
+ // name: "successful verification",
+ // clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs()),
+ // consensusState: types.ConsensusState{
+ // Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()),
+ // },
+ // prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")),
+ // expPass: true,
+ // },
+ {
+ name: "ApplyPrefix failed",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ consensusState: &types.ConsensusState{
+ Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()),
+ },
+ prefix: commitmenttypes.MerklePrefix{},
+ expPass: false,
+ },
+ {
+ name: "latest client height < height",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ consensusState: &types.ConsensusState{
+ Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()),
+ },
+ prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")),
+ expPass: false,
+ },
+ {
+ name: "client is frozen",
+ clientState: &types.ClientState{LatestHeight: height, FrozenHeight: clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1)},
+ consensusState: &types.ConsensusState{
+ Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()),
+ },
+ prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")),
+ expPass: false,
+ },
+ {
+ name: "proof verification failed",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ consensusState: &types.ConsensusState{
+ Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()),
+ NextValidatorsHash: suite.valsHash,
+ },
+ prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")),
+ proof: []byte{},
+ expPass: false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.clientState.VerifyClientConsensusState(
+ nil, suite.cdc, height, "chainA", tc.clientState.LatestHeight, tc.prefix, tc.proof, tc.consensusState,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
+
+// test verification of the connection on chainB being represented in the
+// light client on chainA
+func (suite *TendermintTestSuite) TestVerifyConnectionState() {
+ var (
+ clientState *types.ClientState
+ proof []byte
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "successful verification", func() {}, true,
+ },
+ {
+ "ApplyPrefix failed", func() {
+ prefix = commitmenttypes.MerklePrefix{}
+ }, false,
+ },
+ {
+ "latest client height < height", func() {
+ proofHeight = clientState.LatestHeight.Increment()
+ }, false,
+ },
+ {
+ "client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ }, false,
+ },
+ {
+ "proof verification failed", func() {
+ proof = invalidProof
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ // setup testing conditions
+ clientA, _, _, connB, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ connection := suite.chainB.GetConnection(connB)
+
+ var ok bool
+ clientStateI := suite.chainA.GetClientState(clientA)
+ clientState, ok = clientStateI.(*types.ClientState)
+ suite.Require().True(ok)
+
+ prefix = suite.chainB.GetPrefix()
+
+ // make connection proof
+ connectionKey := host.ConnectionKey(connB.ID)
+ proof, proofHeight = suite.chainB.QueryProof(connectionKey)
+
+ tc.malleate() // make changes as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ err := clientState.VerifyConnectionState(
+ store, suite.chainA.Codec, proofHeight, &prefix, proof, connB.ID, connection,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// test verification of the channel on chainB being represented in the light
+// client on chainA
+func (suite *TendermintTestSuite) TestVerifyChannelState() {
+ var (
+ clientState *types.ClientState
+ proof []byte
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "successful verification", func() {}, true,
+ },
+ {
+ "ApplyPrefix failed", func() {
+ prefix = commitmenttypes.MerklePrefix{}
+ }, false,
+ },
+ {
+ "latest client height < height", func() {
+ proofHeight = clientState.LatestHeight.Increment()
+ }, false,
+ },
+ {
+ "client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ }, false,
+ },
+ {
+ "proof verification failed", func() {
+ proof = invalidProof
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ // setup testing conditions
+ clientA, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ channel := suite.chainB.GetChannel(channelB)
+
+ var ok bool
+ clientStateI := suite.chainA.GetClientState(clientA)
+ clientState, ok = clientStateI.(*types.ClientState)
+ suite.Require().True(ok)
+
+ prefix = suite.chainB.GetPrefix()
+
+ // make channel proof
+ channelKey := host.ChannelKey(channelB.PortID, channelB.ID)
+ proof, proofHeight = suite.chainB.QueryProof(channelKey)
+
+ tc.malleate() // make changes as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ err := clientState.VerifyChannelState(
+ store, suite.chainA.Codec, proofHeight, &prefix, proof,
+ channelB.PortID, channelB.ID, channel,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// test verification of the packet commitment on chainB being represented
+// in the light client on chainA. A send from chainB to chainA is simulated.
+func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
+ var (
+ clientState *types.ClientState
+ proof []byte
+ delayPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "successful verification", func() {}, true,
+ },
+ {
+ name: "delay period has passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay period has not passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ "ApplyPrefix failed", func() {
+ prefix = commitmenttypes.MerklePrefix{}
+ }, false,
+ },
+ {
+ "latest client height < height", func() {
+ proofHeight = clientState.LatestHeight.Increment()
+ }, false,
+ },
+ {
+ "client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ }, false,
+ },
+ {
+ "proof verification failed", func() {
+ proof = invalidProof
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ // setup testing conditions
+ clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 100), 0)
+ err := suite.coordinator.SendPacket(suite.chainB, suite.chainA, packet, clientA)
+ suite.Require().NoError(err)
+
+ var ok bool
+ clientStateI := suite.chainA.GetClientState(clientA)
+ clientState, ok = clientStateI.(*types.ClientState)
+ suite.Require().True(ok)
+
+ prefix = suite.chainB.GetPrefix()
+
+ // make packet commitment proof
+ packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight = suite.chainB.QueryProof(packetKey)
+
+ tc.malleate() // make changes as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
+ commitment := channeltypes.CommitPacket(suite.chainA.App.IBCKeeper.Codec(), packet)
+ err = clientState.VerifyPacketCommitment(
+ store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// test verification of the acknowledgement on chainB being represented
+// in the light client on chainA. A send and ack from chainA to chainB
+// is simulated.
+func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
+ var (
+ clientState *types.ClientState
+ proof []byte
+ delayPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "successful verification", func() {}, true,
+ },
+ {
+ name: "delay period has passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay period has not passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ "ApplyPrefix failed", func() {
+ prefix = commitmenttypes.MerklePrefix{}
+ }, false,
+ },
+ {
+ "latest client height < height", func() {
+ proofHeight = clientState.LatestHeight.Increment()
+ }, false,
+ },
+ {
+ "client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ }, false,
+ },
+ {
+ "proof verification failed", func() {
+ proof = invalidProof
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ // setup testing conditions
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+
+ // send packet
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // write receipt and ack
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ var ok bool
+ clientStateI := suite.chainA.GetClientState(clientA)
+ clientState, ok = clientStateI.(*types.ClientState)
+ suite.Require().True(ok)
+
+ prefix = suite.chainB.GetPrefix()
+
+ // make packet acknowledgement proof
+ acknowledgementKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight = suite.chainB.QueryProof(acknowledgementKey)
+
+ tc.malleate() // make changes as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
+ err = clientState.VerifyPacketAcknowledgement(
+ store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ibcmock.MockAcknowledgement,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// test verification of the absent acknowledgement on chainB being represented
+// in the light client on chainA. A send from chainB to chainA is simulated, but
+// no receive.
+func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
+ var (
+ clientState *types.ClientState
+ proof []byte
+ delayPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "successful verification", func() {}, true,
+ },
+ {
+ name: "delay period has passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay period has not passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ "ApplyPrefix failed", func() {
+ prefix = commitmenttypes.MerklePrefix{}
+ }, false,
+ },
+ {
+ "latest client height < height", func() {
+ proofHeight = clientState.LatestHeight.Increment()
+ }, false,
+ },
+ {
+ "client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ }, false,
+ },
+ {
+ "proof verification failed", func() {
+ proof = invalidProof
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ // setup testing conditions
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+
+ // send packet, but no recv
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // need to update chainA's client representing chainB to prove missing ack
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ var ok bool
+ clientStateI := suite.chainA.GetClientState(clientA)
+ clientState, ok = clientStateI.(*types.ClientState)
+ suite.Require().True(ok)
+
+ prefix = suite.chainB.GetPrefix()
+
+ // make packet receipt absence proof
+ receiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight = suite.chainB.QueryProof(receiptKey)
+
+ tc.malleate() // make changes as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
+ err = clientState.VerifyPacketReceiptAbsence(
+ store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+// test verification of the next receive sequence on chainB being represented
+// in the light client on chainA. A send and receive from chainB to chainA is
+// simulated.
+func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
+ var (
+ clientState *types.ClientState
+ proof []byte
+ delayPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "successful verification", func() {}, true,
+ },
+ {
+ name: "delay period has passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay period has not passed",
+ malleate: func() {
+ delayPeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ "ApplyPrefix failed", func() {
+ prefix = commitmenttypes.MerklePrefix{}
+ }, false,
+ },
+ {
+ "latest client height < height", func() {
+ proofHeight = clientState.LatestHeight.Increment()
+ }, false,
+ },
+ {
+ "client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ }, false,
+ },
+ {
+ "proof verification failed", func() {
+ proof = invalidProof
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ // setup testing conditions
+ clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+
+ // send packet
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+
+ // next seq recv incremented
+ err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ suite.Require().NoError(err)
+
+ // need to update chainA's client representing chainB
+ suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+
+ var ok bool
+ clientStateI := suite.chainA.GetClientState(clientA)
+ clientState, ok = clientStateI.(*types.ClientState)
+ suite.Require().True(ok)
+
+ prefix = suite.chainB.GetPrefix()
+
+ // make next seq recv proof
+ nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ proof, proofHeight = suite.chainB.QueryProof(nextSeqRecvKey)
+
+ tc.malleate() // make changes as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
+ err = clientState.VerifyNextSequenceRecv(
+ store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+1,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/light-clients/07-tendermint/types/codec.go b/light-clients/07-tendermint/types/codec.go
new file mode 100644
index 00000000..5d876c8f
--- /dev/null
+++ b/light-clients/07-tendermint/types/codec.go
@@ -0,0 +1,27 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces registers the tendermint concrete client-related
+// implementations and interfaces.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterImplementations(
+ (*exported.ClientState)(nil),
+ &ClientState{},
+ )
+ registry.RegisterImplementations(
+ (*exported.ConsensusState)(nil),
+ &ConsensusState{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Header)(nil),
+ &Header{},
+ )
+ registry.RegisterImplementations(
+ (*exported.Misbehaviour)(nil),
+ &Misbehaviour{},
+ )
+}
diff --git a/light-clients/07-tendermint/types/consensus_state.go b/light-clients/07-tendermint/types/consensus_state.go
new file mode 100644
index 00000000..adb469a3
--- /dev/null
+++ b/light-clients/07-tendermint/types/consensus_state.go
@@ -0,0 +1,55 @@
+package types
+
+import (
+ "time"
+
+ tmbytes "github.com/tendermint/tendermint/libs/bytes"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// NewConsensusState creates a new ConsensusState instance.
+func NewConsensusState(
+ timestamp time.Time, root commitmenttypes.MerkleRoot, nextValsHash tmbytes.HexBytes,
+) *ConsensusState {
+ return &ConsensusState{
+ Timestamp: timestamp,
+ Root: root,
+ NextValidatorsHash: nextValsHash,
+ }
+}
+
+// ClientType returns Tendermint
+func (ConsensusState) ClientType() string {
+ return exported.Tendermint
+}
+
+// GetRoot returns the commitment Root for the specific
+func (cs ConsensusState) GetRoot() exported.Root {
+ return cs.Root
+}
+
+// GetTimestamp returns block time in nanoseconds of the header that created consensus state
+func (cs ConsensusState) GetTimestamp() uint64 {
+ return uint64(cs.Timestamp.UnixNano())
+}
+
+// ValidateBasic defines a basic validation for the tendermint consensus state.
+// NOTE: ProcessedTimestamp may be zero if this is an initial consensus state passed in by relayer
+// as opposed to a consensus state constructed by the chain.
+func (cs ConsensusState) ValidateBasic() error {
+ if cs.Root.Empty() {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "root cannot be empty")
+ }
+ if err := tmtypes.ValidateHash(cs.NextValidatorsHash); err != nil {
+ return sdkerrors.Wrap(err, "next validators hash is invalid")
+ }
+ if cs.Timestamp.Unix() <= 0 {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "timestamp must be a positive Unix time")
+ }
+ return nil
+}
diff --git a/light-clients/07-tendermint/types/consensus_state_test.go b/light-clients/07-tendermint/types/consensus_state_test.go
new file mode 100644
index 00000000..313815d0
--- /dev/null
+++ b/light-clients/07-tendermint/types/consensus_state_test.go
@@ -0,0 +1,69 @@
+package types_test
+
+import (
+ "time"
+
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() {
+ testCases := []struct {
+ msg string
+ consensusState *types.ConsensusState
+ expectPass bool
+ }{
+ {"success",
+ &types.ConsensusState{
+ Timestamp: suite.now,
+ Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")),
+ NextValidatorsHash: suite.valsHash,
+ },
+ true},
+ {"root is nil",
+ &types.ConsensusState{
+ Timestamp: suite.now,
+ Root: commitmenttypes.MerkleRoot{},
+ NextValidatorsHash: suite.valsHash,
+ },
+ false},
+ {"root is empty",
+ &types.ConsensusState{
+ Timestamp: suite.now,
+ Root: commitmenttypes.MerkleRoot{},
+ NextValidatorsHash: suite.valsHash,
+ },
+ false},
+ {"nextvalshash is invalid",
+ &types.ConsensusState{
+ Timestamp: suite.now,
+ Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")),
+ NextValidatorsHash: []byte("hi"),
+ },
+ false},
+
+ {"timestamp is zero",
+ &types.ConsensusState{
+ Timestamp: time.Time{},
+ Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")),
+ NextValidatorsHash: suite.valsHash,
+ },
+ false},
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ // check just to increase coverage
+ suite.Require().Equal(exported.Tendermint, tc.consensusState.ClientType())
+ suite.Require().Equal(tc.consensusState.GetRoot(), tc.consensusState.Root)
+
+ err := tc.consensusState.ValidateBasic()
+ if tc.expectPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg)
+ }
+ }
+}
diff --git a/light-clients/07-tendermint/types/errors.go b/light-clients/07-tendermint/types/errors.go
new file mode 100644
index 00000000..276c225b
--- /dev/null
+++ b/light-clients/07-tendermint/types/errors.go
@@ -0,0 +1,25 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+const (
+ SubModuleName = "tendermint-client"
+)
+
+// IBC tendermint client sentinel errors
+var (
+ ErrInvalidChainID = sdkerrors.Register(SubModuleName, 2, "invalid chain-id")
+ ErrInvalidTrustingPeriod = sdkerrors.Register(SubModuleName, 3, "invalid trusting period")
+ ErrInvalidUnbondingPeriod = sdkerrors.Register(SubModuleName, 4, "invalid unbonding period")
+ ErrInvalidHeaderHeight = sdkerrors.Register(SubModuleName, 5, "invalid header height")
+ ErrInvalidHeader = sdkerrors.Register(SubModuleName, 6, "invalid header")
+ ErrInvalidMaxClockDrift = sdkerrors.Register(SubModuleName, 7, "invalid max clock drift")
+ ErrProcessedTimeNotFound = sdkerrors.Register(SubModuleName, 8, "processed time not found")
+ ErrDelayPeriodNotPassed = sdkerrors.Register(SubModuleName, 9, "packet-specified delay period has not been reached")
+ ErrTrustingPeriodExpired = sdkerrors.Register(SubModuleName, 10, "time since latest trusted state has passed the trusting period")
+ ErrUnbondingPeriodExpired = sdkerrors.Register(SubModuleName, 11, "time since latest trusted state has passed the unbonding period")
+ ErrInvalidProofSpecs = sdkerrors.Register(SubModuleName, 12, "invalid proof specs")
+ ErrInvalidValidatorSet = sdkerrors.Register(SubModuleName, 13, "invalid validator set")
+)
diff --git a/light-clients/07-tendermint/types/fraction.go b/light-clients/07-tendermint/types/fraction.go
new file mode 100644
index 00000000..e445f19b
--- /dev/null
+++ b/light-clients/07-tendermint/types/fraction.go
@@ -0,0 +1,25 @@
+package types
+
+import (
+ tmmath "github.com/tendermint/tendermint/libs/math"
+ "github.com/tendermint/tendermint/light"
+)
+
+// DefaultTrustLevel is the tendermint light client default trust level
+var DefaultTrustLevel = NewFractionFromTm(light.DefaultTrustLevel)
+
+// NewFractionFromTm returns a new Fraction instance from a tmmath.Fraction
+func NewFractionFromTm(f tmmath.Fraction) Fraction {
+ return Fraction{
+ Numerator: f.Numerator,
+ Denominator: f.Denominator,
+ }
+}
+
+// ToTendermint converts Fraction to tmmath.Fraction
+func (f Fraction) ToTendermint() tmmath.Fraction {
+ return tmmath.Fraction{
+ Numerator: f.Numerator,
+ Denominator: f.Denominator,
+ }
+}
diff --git a/light-clients/07-tendermint/types/genesis.go b/light-clients/07-tendermint/types/genesis.go
new file mode 100644
index 00000000..7124643b
--- /dev/null
+++ b/light-clients/07-tendermint/types/genesis.go
@@ -0,0 +1,21 @@
+package types
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// ExportMetadata exports all the processed times in the client store so they can be included in clients genesis
+// and imported by a ClientKeeper
+func (cs ClientState) ExportMetadata(store sdk.KVStore) []exported.GenesisMetadata {
+ gm := make([]exported.GenesisMetadata, 0)
+ IterateProcessedTime(store, func(key, val []byte) bool {
+ gm = append(gm, clienttypes.NewGenesisMetadata(key, val))
+ return false
+ })
+ if len(gm) == 0 {
+ return nil
+ }
+ return gm
+}
diff --git a/light-clients/07-tendermint/types/genesis_test.go b/light-clients/07-tendermint/types/genesis_test.go
new file mode 100644
index 00000000..5732151e
--- /dev/null
+++ b/light-clients/07-tendermint/types/genesis_test.go
@@ -0,0 +1,38 @@
+package types_test
+
+import (
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+func (suite *TendermintTestSuite) TestExportMetadata() {
+ clientState := types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), "clientA", clientState)
+
+ gm := clientState.ExportMetadata(suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
+ suite.Require().Nil(gm, "client with no metadata returned non-nil exported metadata")
+
+ clientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA")
+
+ // set some processed times
+ timestamp1 := uint64(time.Now().UnixNano())
+ timestamp2 := uint64(time.Now().Add(time.Minute).UnixNano())
+ timestampBz1 := sdk.Uint64ToBigEndian(timestamp1)
+ timestampBz2 := sdk.Uint64ToBigEndian(timestamp2)
+ types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 1), timestamp1)
+ types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 2), timestamp2)
+
+ gm = clientState.ExportMetadata(suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
+ suite.Require().NotNil(gm, "client with metadata returned nil exported metadata")
+ suite.Require().Len(gm, 2, "exported metadata has unexpected length")
+
+ suite.Require().Equal(types.ProcessedTimeKey(clienttypes.NewHeight(0, 1)), gm[0].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(timestampBz1, gm[0].GetValue(), "metadata has unexpected value")
+
+ suite.Require().Equal(types.ProcessedTimeKey(clienttypes.NewHeight(0, 2)), gm[1].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(timestampBz2, gm[1].GetValue(), "metadata has unexpected value")
+}
diff --git a/light-clients/07-tendermint/types/header.go b/light-clients/07-tendermint/types/header.go
new file mode 100644
index 00000000..0b9cfa1d
--- /dev/null
+++ b/light-clients/07-tendermint/types/header.go
@@ -0,0 +1,83 @@
+package types
+
+import (
+ "bytes"
+ "time"
+
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.Header = &Header{}
+
+// ConsensusState returns the updated consensus state associated with the header
+func (h Header) ConsensusState() *ConsensusState {
+ return &ConsensusState{
+ Timestamp: h.GetTime(),
+ Root: commitmenttypes.NewMerkleRoot(h.Header.GetAppHash()),
+ NextValidatorsHash: h.Header.NextValidatorsHash,
+ }
+}
+
+// ClientType defines that the Header is a Tendermint consensus algorithm
+func (h Header) ClientType() string {
+ return exported.Tendermint
+}
+
+// GetHeight returns the current height. It returns 0 if the tendermint
+// header is nil.
+// NOTE: the header.Header is checked to be non nil in ValidateBasic.
+func (h Header) GetHeight() exported.Height {
+ revision := clienttypes.ParseChainID(h.Header.ChainID)
+ return clienttypes.NewHeight(revision, uint64(h.Header.Height))
+}
+
+// GetTime returns the current block timestamp. It returns a zero time if
+// the tendermint header is nil.
+// NOTE: the header.Header is checked to be non nil in ValidateBasic.
+func (h Header) GetTime() time.Time {
+ return h.Header.Time
+}
+
+// ValidateBasic calls the SignedHeader ValidateBasic function and checks
+// that validatorsets are not nil.
+// NOTE: TrustedHeight and TrustedValidators may be empty when creating client
+// with MsgCreateClient
+func (h Header) ValidateBasic() error {
+ if h.SignedHeader == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "tendermint signed header cannot be nil")
+ }
+ if h.Header == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "tendermint header cannot be nil")
+ }
+ tmSignedHeader, err := tmtypes.SignedHeaderFromProto(h.SignedHeader)
+ if err != nil {
+ return sdkerrors.Wrap(err, "header is not a tendermint header")
+ }
+ if err := tmSignedHeader.ValidateBasic(h.Header.GetChainID()); err != nil {
+ return sdkerrors.Wrap(err, "header failed basic validation")
+ }
+
+ // TrustedHeight is less than Header for updates
+ // and less than or equal to Header for misbehaviour
+ if h.TrustedHeight.GT(h.GetHeight()) {
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "TrustedHeight %d must be less than or equal to header height %d",
+ h.TrustedHeight, h.GetHeight())
+ }
+
+ if h.ValidatorSet == nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set is nil")
+ }
+ tmValset, err := tmtypes.ValidatorSetFromProto(h.ValidatorSet)
+ if err != nil {
+ return sdkerrors.Wrap(err, "validator set is not tendermint validator set")
+ }
+ if !bytes.Equal(h.Header.ValidatorsHash, tmValset.Hash()) {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set does not match hash")
+ }
+ return nil
+}
diff --git a/light-clients/07-tendermint/types/header_test.go b/light-clients/07-tendermint/types/header_test.go
new file mode 100644
index 00000000..97647f86
--- /dev/null
+++ b/light-clients/07-tendermint/types/header_test.go
@@ -0,0 +1,82 @@
+package types_test
+
+import (
+ "time"
+
+ tmprotocrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+)
+
+func (suite *TendermintTestSuite) TestGetHeight() {
+ header := suite.chainA.LastHeader
+ suite.Require().NotEqual(uint64(0), header.GetHeight())
+}
+
+func (suite *TendermintTestSuite) TestGetTime() {
+ header := suite.chainA.LastHeader
+ suite.Require().NotEqual(time.Time{}, header.GetTime())
+}
+
+func (suite *TendermintTestSuite) TestHeaderValidateBasic() {
+ var (
+ header *types.Header
+ )
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {"valid header", func() {}, true},
+ {"header is nil", func() {
+ header.Header = nil
+ }, false},
+ {"signed header is nil", func() {
+ header.SignedHeader = nil
+ }, false},
+ {"SignedHeaderFromProto failed", func() {
+ header.SignedHeader.Commit.Height = -1
+ }, false},
+ {"signed header failed tendermint ValidateBasic", func() {
+ header = suite.chainA.LastHeader
+ header.SignedHeader.Commit = nil
+ }, false},
+ {"trusted height is greater than header height", func() {
+ header.TrustedHeight = header.GetHeight().(clienttypes.Height).Increment().(clienttypes.Height)
+ }, false},
+ {"validator set nil", func() {
+ header.ValidatorSet = nil
+ }, false},
+ {"ValidatorSetFromProto failed", func() {
+ header.ValidatorSet.Validators[0].PubKey = tmprotocrypto.PublicKey{}
+ }, false},
+ {"header validator hash does not equal hash of validator set", func() {
+ // use chainB's randomly generated validator set
+ header.ValidatorSet = suite.chainB.LastHeader.ValidatorSet
+ }, false},
+ }
+
+ suite.Require().Equal(exported.Tendermint, suite.header.ClientType())
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ header = suite.chainA.LastHeader // must be explicitly changed in malleate
+
+ tc.malleate()
+
+ err := header.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/light-clients/07-tendermint/types/misbehaviour.go b/light-clients/07-tendermint/types/misbehaviour.go
new file mode 100644
index 00000000..340130d2
--- /dev/null
+++ b/light-clients/07-tendermint/types/misbehaviour.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "bytes"
+ "time"
+
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.Misbehaviour = &Misbehaviour{}
+
+// NewMisbehaviour creates a new Misbehaviour instance.
+func NewMisbehaviour(clientID string, header1, header2 *Header) *Misbehaviour {
+ return &Misbehaviour{
+ ClientId: clientID,
+ Header1: header1,
+ Header2: header2,
+ }
+}
+
+// ClientType is Tendermint light client
+func (misbehaviour Misbehaviour) ClientType() string {
+ return exported.Tendermint
+}
+
+// GetClientID returns the ID of the client that committed a misbehaviour.
+func (misbehaviour Misbehaviour) GetClientID() string {
+ return misbehaviour.ClientId
+}
+
+// GetHeight returns the height at which misbehaviour occurred
+//
+// NOTE: assumes that misbehaviour headers have the same height
+func (misbehaviour Misbehaviour) GetHeight() exported.Height {
+ return misbehaviour.Header1.GetHeight()
+}
+
+// GetTime returns the timestamp at which misbehaviour occurred. It uses the
+// maximum value from both headers to prevent producing an invalid header outside
+// of the misbehaviour age range.
+func (misbehaviour Misbehaviour) GetTime() time.Time {
+ t1, t2 := misbehaviour.Header1.GetTime(), misbehaviour.Header2.GetTime()
+ if t1.After(t2) {
+ return t1
+ }
+ return t2
+}
+
+// ValidateBasic implements Misbehaviour interface
+func (misbehaviour Misbehaviour) ValidateBasic() error {
+ if misbehaviour.Header1 == nil {
+ return sdkerrors.Wrap(ErrInvalidHeader, "misbehaviour Header1 cannot be nil")
+ }
+ if misbehaviour.Header2 == nil {
+ return sdkerrors.Wrap(ErrInvalidHeader, "misbehaviour Header2 cannot be nil")
+ }
+ if misbehaviour.Header1.TrustedHeight.RevisionHeight == 0 {
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header1 cannot have zero revision height")
+ }
+ if misbehaviour.Header2.TrustedHeight.RevisionHeight == 0 {
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header2 cannot have zero revision height")
+ }
+ if misbehaviour.Header1.TrustedValidators == nil {
+ return sdkerrors.Wrap(ErrInvalidValidatorSet, "trusted validator set in Header1 cannot be empty")
+ }
+ if misbehaviour.Header2.TrustedValidators == nil {
+ return sdkerrors.Wrap(ErrInvalidValidatorSet, "trusted validator set in Header2 cannot be empty")
+ }
+ if misbehaviour.Header1.Header.ChainID != misbehaviour.Header2.Header.ChainID {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers must have identical chainIDs")
+ }
+
+ if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil {
+ return sdkerrors.Wrap(err, "misbehaviour client ID is invalid")
+ }
+
+ // ValidateBasic on both validators
+ if err := misbehaviour.Header1.ValidateBasic(); err != nil {
+ return sdkerrors.Wrap(
+ clienttypes.ErrInvalidMisbehaviour,
+ sdkerrors.Wrap(err, "header 1 failed validation").Error(),
+ )
+ }
+ if err := misbehaviour.Header2.ValidateBasic(); err != nil {
+ return sdkerrors.Wrap(
+ clienttypes.ErrInvalidMisbehaviour,
+ sdkerrors.Wrap(err, "header 2 failed validation").Error(),
+ )
+ }
+ // Ensure that Heights are the same
+ if misbehaviour.Header1.GetHeight() != misbehaviour.Header2.GetHeight() {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "headers in misbehaviour are on different heights (%d ≠ %d)", misbehaviour.Header1.GetHeight(), misbehaviour.Header2.GetHeight())
+ }
+
+ blockID1, err := tmtypes.BlockIDFromProto(&misbehaviour.Header1.SignedHeader.Commit.BlockID)
+ if err != nil {
+ return sdkerrors.Wrap(err, "invalid block ID from header 1 in misbehaviour")
+ }
+ blockID2, err := tmtypes.BlockIDFromProto(&misbehaviour.Header2.SignedHeader.Commit.BlockID)
+ if err != nil {
+ return sdkerrors.Wrap(err, "invalid block ID from header 2 in misbehaviour")
+ }
+
+ // Ensure that Commit Hashes are different
+ if bytes.Equal(blockID1.Hash, blockID2.Hash) {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers block hashes are equal")
+ }
+ if err := validCommit(misbehaviour.Header1.Header.ChainID, *blockID1,
+ misbehaviour.Header1.Commit, misbehaviour.Header1.ValidatorSet); err != nil {
+ return err
+ }
+ if err := validCommit(misbehaviour.Header2.Header.ChainID, *blockID2,
+ misbehaviour.Header2.Commit, misbehaviour.Header2.ValidatorSet); err != nil {
+ return err
+ }
+ return nil
+}
+
+// validCommit checks if the given commit is a valid commit from the passed-in validatorset
+func validCommit(chainID string, blockID tmtypes.BlockID, commit *tmproto.Commit, valSet *tmproto.ValidatorSet) (err error) {
+ tmCommit, err := tmtypes.CommitFromProto(commit)
+ if err != nil {
+ return sdkerrors.Wrap(err, "commit is not tendermint commit type")
+ }
+ tmValset, err := tmtypes.ValidatorSetFromProto(valSet)
+ if err != nil {
+ return sdkerrors.Wrap(err, "validator set is not tendermint validator set type")
+ }
+
+ if err := tmValset.VerifyCommitLight(chainID, blockID, tmCommit.Height, tmCommit); err != nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "validator set did not commit to header")
+ }
+
+ return nil
+}
diff --git a/light-clients/07-tendermint/types/misbehaviour_handle.go b/light-clients/07-tendermint/types/misbehaviour_handle.go
new file mode 100644
index 00000000..4c55552d
--- /dev/null
+++ b/light-clients/07-tendermint/types/misbehaviour_handle.go
@@ -0,0 +1,119 @@
+package types
+
+import (
+ "time"
+
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CheckMisbehaviourAndUpdateState determines whether or not two conflicting
+// headers at the same height would have convinced the light client.
+//
+// NOTE: consensusState1 is the trusted consensus state that corresponds to the TrustedHeight
+// of misbehaviour.Header1
+// Similarly, consensusState2 is the trusted consensus state that corresponds
+// to misbehaviour.Header2
+func (cs ClientState) CheckMisbehaviourAndUpdateState(
+ ctx sdk.Context,
+ cdc codec.BinaryMarshaler,
+ clientStore sdk.KVStore,
+ misbehaviour exported.Misbehaviour,
+) (exported.ClientState, error) {
+ tmMisbehaviour, ok := misbehaviour.(*Misbehaviour)
+ if !ok {
+ return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "expected type %T, got %T", misbehaviour, &Misbehaviour{})
+ }
+
+ // If client is already frozen at earlier height than misbehaviour, return with error
+ if cs.IsFrozen() && cs.FrozenHeight.LTE(misbehaviour.GetHeight()) {
+ return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour,
+ "client is already frozen at earlier height %s than misbehaviour height %s", cs.FrozenHeight, misbehaviour.GetHeight())
+ }
+
+ // Retrieve trusted consensus states for each Header in misbehaviour
+ // and unmarshal from clientStore
+
+ // Get consensus bytes from clientStore
+ tmConsensusState1, err := GetConsensusState(clientStore, cdc, tmMisbehaviour.Header1.TrustedHeight)
+ if err != nil {
+ return nil, sdkerrors.Wrapf(err, "could not get trusted consensus state from clientStore for Header1 at TrustedHeight: %s", tmMisbehaviour.Header1)
+ }
+
+ // Get consensus bytes from clientStore
+ tmConsensusState2, err := GetConsensusState(clientStore, cdc, tmMisbehaviour.Header2.TrustedHeight)
+ if err != nil {
+ return nil, sdkerrors.Wrapf(err, "could not get trusted consensus state from clientStore for Header2 at TrustedHeight: %s", tmMisbehaviour.Header2)
+ }
+
+ // Check the validity of the two conflicting headers against their respective
+ // trusted consensus states
+ // NOTE: header height and commitment root assertions are checked in
+ // misbehaviour.ValidateBasic by the client keeper and msg.ValidateBasic
+ // by the base application.
+ if err := checkMisbehaviourHeader(
+ &cs, tmConsensusState1, tmMisbehaviour.Header1, ctx.BlockTime(),
+ ); err != nil {
+ return nil, sdkerrors.Wrap(err, "verifying Header1 in Misbehaviour failed")
+ }
+ if err := checkMisbehaviourHeader(
+ &cs, tmConsensusState2, tmMisbehaviour.Header2, ctx.BlockTime(),
+ ); err != nil {
+ return nil, sdkerrors.Wrap(err, "verifying Header2 in Misbehaviour failed")
+ }
+
+ cs.FrozenHeight = tmMisbehaviour.GetHeight().(clienttypes.Height)
+ return &cs, nil
+}
+
+// checkMisbehaviourHeader checks that a Header in Misbehaviour is valid misbehaviour given
+// a trusted ConsensusState
+func checkMisbehaviourHeader(
+ clientState *ClientState, consState *ConsensusState, header *Header, currentTimestamp time.Time,
+) error {
+
+ tmTrustedValset, err := tmtypes.ValidatorSetFromProto(header.TrustedValidators)
+ if err != nil {
+ return sdkerrors.Wrap(err, "trusted validator set is not tendermint validator set type")
+ }
+
+ tmCommit, err := tmtypes.CommitFromProto(header.Commit)
+ if err != nil {
+ return sdkerrors.Wrap(err, "commit is not tendermint commit type")
+ }
+
+ // check the trusted fields for the header against ConsensusState
+ if err := checkTrustedHeader(header, consState); err != nil {
+ return err
+ }
+
+ // assert that the age of the trusted consensus state is not older than the trusting period
+ if currentTimestamp.Sub(consState.Timestamp) >= clientState.TrustingPeriod {
+ return sdkerrors.Wrapf(
+ ErrTrustingPeriodExpired,
+ "current timestamp minus the latest consensus state timestamp is greater than or equal to the trusting period (%d >= %d)",
+ currentTimestamp.Sub(consState.Timestamp), clientState.TrustingPeriod,
+ )
+ }
+
+ chainID := clientState.GetChainID()
+ // If chainID is in revision format, then set revision number of chainID with the revision number
+ // of the misbehaviour header
+ if clienttypes.IsRevisionFormat(chainID) {
+ chainID, _ = clienttypes.SetRevisionNumber(chainID, header.GetHeight().GetRevisionNumber())
+ }
+
+ // - ValidatorSet must have TrustLevel similarity with trusted FromValidatorSet
+ // - ValidatorSets on both headers are valid given the last trusted ValidatorSet
+ if err := tmTrustedValset.VerifyCommitLightTrusting(
+ chainID, tmCommit, clientState.TrustLevel.ToTendermint(),
+ ); err != nil {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "validator set in header has too much change from trusted validator set: %v", err)
+ }
+ return nil
+}
diff --git a/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/light-clients/07-tendermint/types/misbehaviour_handle_test.go
new file mode 100644
index 00000000..3ca2e4dc
--- /dev/null
+++ b/light-clients/07-tendermint/types/misbehaviour_handle_test.go
@@ -0,0 +1,372 @@
+package types_test
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/tendermint/tendermint/crypto/tmhash"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
+ altPrivVal := ibctestingmock.NewPV()
+ altPubKey, err := altPrivVal.GetPubKey()
+ suite.Require().NoError(err)
+
+ altVal := tmtypes.NewValidator(altPubKey, 4)
+
+ // Create bothValSet with both suite validator and altVal
+ bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
+ bothValsHash := bothValSet.Hash()
+ // Create alternative validator set with only altVal
+ altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal})
+
+ _, suiteVal := suite.valSet.GetByIndex(0)
+
+ // Create signer array and ensure it is in same order as bothValSet
+ bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
+
+ altSigners := []tmtypes.PrivValidator{altPrivVal}
+
+ heightMinus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1)
+ heightMinus3 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-3)
+
+ testCases := []struct {
+ name string
+ clientState exported.ClientState
+ consensusState1 exported.ConsensusState
+ height1 clienttypes.Height
+ consensusState2 exported.ConsensusState
+ height2 clienttypes.Height
+ misbehaviour exported.Misbehaviour
+ timestamp time.Time
+ expPass bool
+ }{
+ {
+ "valid misbehavior misbehaviour",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid misbehavior at height greater than last consensusState",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid misbehaviour with different trusted heights",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ heightMinus3,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid misbehaviour at a previous revision",
+ types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ heightMinus3,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid misbehaviour at a future revision",
+ types.NewClientState(chainIDRevision0, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ heightMinus3,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainIDRevision0, 3, heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainIDRevision0, 3, heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid misbehaviour with trusted heights at a previous revision",
+ types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ heightMinus3,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "consensus state's valset hash different from misbehaviour should still pass",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, suite.valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "invalid misbehavior misbehaviour from different chain",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "invalid misbehavior misbehaviour with trusted height different from trusted consensus state",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ heightMinus3,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "invalid misbehavior misbehaviour with trusted validators different from trusted consensus state",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
+ heightMinus3,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "already frozen client state",
+ &types.ClientState{FrozenHeight: clienttypes.NewHeight(0, 1)},
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "trusted consensus state does not exist",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ nil, // consensus state for trusted height - 1 does not exist in store
+ clienttypes.Height{},
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "invalid tendermint misbehaviour",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ nil,
+ suite.now,
+ false,
+ },
+ {
+ "provided height > header height",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "trusting period expired",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(time.Time{}, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ heightMinus1,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now.Add(trustingPeriod),
+ false,
+ },
+ {
+ "trusted validators is incorrect for given consensus state",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, suite.valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "first valset has too much change",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, altValSet, bothValSet, altSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "second valset has too much change",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "both valsets have too much change",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, altValSet, bothValSet, altSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+ suite.Run(fmt.Sprintf("Case: %s", tc.name), func() {
+ // reset suite to create fresh application state
+ suite.SetupTest()
+
+ // Set current timestamp in context
+ ctx := suite.chainA.GetContext().WithBlockTime(tc.timestamp)
+
+ // Set trusted consensus states in client store
+
+ if tc.consensusState1 != nil {
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height1, tc.consensusState1)
+ }
+ if tc.consensusState2 != nil {
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height2, tc.consensusState2)
+ }
+
+ clientState, err := tc.clientState.CheckMisbehaviourAndUpdateState(
+ ctx,
+ suite.cdc,
+ suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(ctx, clientID), // pass in clientID prefixed clientStore
+ tc.misbehaviour,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().NotNil(clientState, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().True(clientState.IsFrozen(), "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(),
+ "valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight())
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ suite.Require().Nil(clientState, "invalid test case %d passed: %s", i, tc.name)
+ }
+ })
+ }
+}
diff --git a/light-clients/07-tendermint/types/misbehaviour_test.go b/light-clients/07-tendermint/types/misbehaviour_test.go
new file mode 100644
index 00000000..dede4e60
--- /dev/null
+++ b/light-clients/07-tendermint/types/misbehaviour_test.go
@@ -0,0 +1,244 @@
+package types_test
+
+import (
+ "time"
+
+ "github.com/tendermint/tendermint/crypto/tmhash"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+func (suite *TendermintTestSuite) TestMisbehaviour() {
+ signers := []tmtypes.PrivValidator{suite.privVal}
+ heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1)
+
+ misbehaviour := &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers),
+ ClientId: clientID,
+ }
+
+ suite.Require().Equal(exported.Tendermint, misbehaviour.ClientType())
+ suite.Require().Equal(clientID, misbehaviour.GetClientID())
+ suite.Require().Equal(height, misbehaviour.GetHeight())
+}
+
+func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() {
+ altPrivVal := ibctestingmock.NewPV()
+ altPubKey, err := altPrivVal.GetPubKey()
+ suite.Require().NoError(err)
+
+ revisionHeight := int64(height.RevisionHeight)
+
+ altVal := tmtypes.NewValidator(altPubKey, revisionHeight)
+
+ // Create bothValSet with both suite validator and altVal
+ bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
+ // Create alternative validator set with only altVal
+ altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal})
+
+ signers := []tmtypes.PrivValidator{suite.privVal}
+
+ // Create signer array and ensure it is in same order as bothValSet
+ _, suiteVal := suite.valSet.GetByIndex(0)
+ bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
+
+ altSigners := []tmtypes.PrivValidator{altPrivVal}
+
+ heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1)
+
+ testCases := []struct {
+ name string
+ misbehaviour *types.Misbehaviour
+ malleateMisbehaviour func(misbehaviour *types.Misbehaviour) error
+ expPass bool
+ }{
+ {
+ "valid misbehaviour",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ true,
+ },
+ {
+ "misbehaviour Header1 is nil",
+ types.NewMisbehaviour(clientID, nil, suite.header),
+ func(m *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "misbehaviour Header2 is nil",
+ types.NewMisbehaviour(clientID, suite.header, nil),
+ func(m *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "valid misbehaviour with different trusted headers",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.NewHeight(0, height.RevisionHeight-3), suite.now.Add(time.Minute), suite.valSet, bothValSet, signers),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ true,
+ },
+ {
+ "trusted height is 0 in Header1",
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers),
+ Header2: suite.header,
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "trusted height is 0 in Header2",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "trusted valset is nil in Header1",
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, nil, signers),
+ Header2: suite.header,
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "trusted valset is nil in Header2",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, nil, signers),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "invalid client ID ",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers),
+ ClientId: "GAIA",
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "chainIDs do not match",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "mismatched heights",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, 6, clienttypes.NewHeight(0, 4), suite.now, suite.valSet, suite.valSet, signers),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "same block id",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.header,
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ false,
+ },
+ {
+ "header 1 doesn't have 2/3 majority",
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners),
+ Header2: suite.header,
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error {
+ // voteSet contains only altVal which is less than 2/3 of total power (height/1height)
+ wrongVoteSet := tmtypes.NewVoteSet(chainID, int64(misbehaviour.Header1.GetHeight().GetRevisionHeight()), 1, tmproto.PrecommitType, altValSet)
+ blockID, err := tmtypes.BlockIDFromProto(&misbehaviour.Header1.Commit.BlockID)
+ if err != nil {
+ return err
+ }
+
+ tmCommit, err := tmtypes.MakeCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header1.Commit.Round, wrongVoteSet, altSigners, suite.now)
+ misbehaviour.Header1.Commit = tmCommit.ToProto()
+ return err
+ },
+ false,
+ },
+ {
+ "header 2 doesn't have 2/3 majority",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error {
+ // voteSet contains only altVal which is less than 2/3 of total power (height/1height)
+ wrongVoteSet := tmtypes.NewVoteSet(chainID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), 1, tmproto.PrecommitType, altValSet)
+ blockID, err := tmtypes.BlockIDFromProto(&misbehaviour.Header2.Commit.BlockID)
+ if err != nil {
+ return err
+ }
+
+ tmCommit, err := tmtypes.MakeCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header2.Commit.Round, wrongVoteSet, altSigners, suite.now)
+ misbehaviour.Header2.Commit = tmCommit.ToProto()
+ return err
+ },
+ false,
+ },
+ {
+ "validators sign off on wrong commit",
+ &types.Misbehaviour{
+ Header1: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners),
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error {
+ tmBlockID := ibctesting.MakeBlockID(tmhash.Sum([]byte("other_hash")), 3, tmhash.Sum([]byte("other_partset")))
+ misbehaviour.Header2.Commit.BlockID = tmBlockID.ToProto()
+ return nil
+ },
+ false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ err := tc.malleateMisbehaviour(tc.misbehaviour)
+ suite.Require().NoError(err)
+
+ if tc.expPass {
+ suite.Require().NoError(tc.misbehaviour.ValidateBasic(), "valid test case %d failed: %s", i, tc.name)
+ } else {
+ suite.Require().Error(tc.misbehaviour.ValidateBasic(), "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
diff --git a/light-clients/07-tendermint/types/proposal_handle.go b/light-clients/07-tendermint/types/proposal_handle.go
new file mode 100644
index 00000000..c64c52b3
--- /dev/null
+++ b/light-clients/07-tendermint/types/proposal_handle.go
@@ -0,0 +1,134 @@
+package types
+
+import (
+ "reflect"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CheckSubstituteAndUpdateState will try to update the client with the state of the
+// substitute if and only if the proposal passes and one of the following conditions are
+// satisfied:
+// 1) AllowUpdateAfterMisbehaviour and IsFrozen() = true
+// 2) AllowUpdateAfterExpiry=true and Expire(ctx.BlockTime) = true
+//
+// The following must always be true:
+// - The substitute client is the same type as the subject client
+// - The subject and substitute client states match in all parameters (expect frozen height, latest height, and chain-id)
+//
+// In case 1) before updating the client, the client will be unfrozen by resetting
+// the FrozenHeight to the zero Height. If a client is frozen and AllowUpdateAfterMisbehaviour
+// is set to true, the client will be unexpired even if AllowUpdateAfterExpiry is set to false.
+// Note, that even if the subject is updated to the state of the substitute, an error may be
+// returned if the updated client state is invalid or the client is expired.
+func (cs ClientState) CheckSubstituteAndUpdateState(
+ ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore,
+ substituteClientStore sdk.KVStore, substituteClient exported.ClientState,
+ initialHeight exported.Height,
+) (exported.ClientState, error) {
+ substituteClientState, ok := substituteClient.(*ClientState)
+ if !ok {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidClient, "expected type %T, got %T", &ClientState{}, substituteClient,
+ )
+ }
+
+ // substitute clients are not allowed to be upgraded during the voting period
+ // If an upgrade passes before the subject client has been updated, a new proposal must be created
+ // with an initial height that contains the new revision number.
+ if substituteClientState.GetLatestHeight().GetRevisionNumber() != initialHeight.GetRevisionNumber() {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeight, "substitute client revision number must equal initial height revision number (%d != %d)",
+ substituteClientState.GetLatestHeight().GetRevisionNumber(), initialHeight.GetRevisionNumber(),
+ )
+ }
+
+ if !IsMatchingClientState(cs, *substituteClientState) {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidSubstitute, "subject client state does not match substitute client state")
+ }
+
+ // get consensus state corresponding to client state to check if the client is expired
+ consensusState, err := GetConsensusState(subjectClientStore, cdc, cs.GetLatestHeight())
+ if err != nil {
+ return nil, sdkerrors.Wrapf(
+ err, "unexpected error: could not get consensus state from clientstore at height: %d", cs.GetLatestHeight(),
+ )
+ }
+
+ switch {
+
+ case cs.IsFrozen():
+ if !cs.AllowUpdateAfterMisbehaviour {
+ return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unfrozen")
+ }
+
+ // unfreeze the client
+ cs.FrozenHeight = clienttypes.ZeroHeight()
+
+ case cs.IsExpired(consensusState.Timestamp, ctx.BlockTime()):
+ if !cs.AllowUpdateAfterExpiry {
+ return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unexpired")
+ }
+
+ default:
+ return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client cannot be updated with proposal")
+ }
+
+ // copy consensus states and processed time from substitute to subject
+ // starting from initial height and ending on the latest height (inclusive)
+ for i := initialHeight.GetRevisionHeight(); i <= substituteClientState.GetLatestHeight().GetRevisionHeight(); i++ {
+ height := clienttypes.NewHeight(substituteClientState.GetLatestHeight().GetRevisionNumber(), i)
+
+ consensusState, err := GetConsensusState(substituteClientStore, cdc, height)
+ if err != nil {
+ // not all consensus states will be filled in
+ continue
+ }
+ SetConsensusState(subjectClientStore, cdc, consensusState, height)
+
+ processedTime, found := GetProcessedTime(substituteClientStore, height)
+ if !found {
+ continue
+ }
+ SetProcessedTime(subjectClientStore, height, processedTime)
+
+ }
+
+ cs.LatestHeight = substituteClientState.LatestHeight
+
+ // validate the updated client and ensure it isn't expired
+ if err := cs.Validate(); err != nil {
+ return nil, sdkerrors.Wrap(err, "unexpected error: updated subject client state is invalid")
+ }
+
+ latestConsensusState, err := GetConsensusState(subjectClientStore, cdc, cs.GetLatestHeight())
+ if err != nil {
+ return nil, sdkerrors.Wrapf(
+ err, "unexpected error: could not get consensus state for updated subject client from clientstore at height: %d", cs.GetLatestHeight(),
+ )
+ }
+
+ if cs.IsExpired(latestConsensusState.Timestamp, ctx.BlockTime()) {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidClient, "updated subject client is expired")
+ }
+
+ return &cs, nil
+}
+
+// IsMatchingClientState returns true if all the client state parameters match
+// except for frozen height, latest height, and chain-id.
+func IsMatchingClientState(subject, substitute ClientState) bool {
+ // zero out parameters which do not need to match
+ subject.LatestHeight = clienttypes.ZeroHeight()
+ subject.FrozenHeight = clienttypes.ZeroHeight()
+ substitute.LatestHeight = clienttypes.ZeroHeight()
+ substitute.FrozenHeight = clienttypes.ZeroHeight()
+ subject.ChainId = ""
+ substitute.ChainId = ""
+
+ return reflect.DeepEqual(subject, substitute)
+}
diff --git a/light-clients/07-tendermint/types/proposal_handle_test.go b/light-clients/07-tendermint/types/proposal_handle_test.go
new file mode 100644
index 00000000..66a51203
--- /dev/null
+++ b/light-clients/07-tendermint/types/proposal_handle_test.go
@@ -0,0 +1,387 @@
+package types_test
+
+import (
+ "time"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+var (
+ frozenHeight = clienttypes.NewHeight(0, 1)
+)
+
+func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
+ var (
+ substitute string
+ substituteClientState exported.ClientState
+ initialHeight clienttypes.Height
+ )
+ testCases := []struct {
+ name string
+ malleate func()
+ }{
+ {
+ "solo machine used for substitute", func() {
+ substituteClientState = ibctesting.NewSolomachine(suite.T(), suite.cdc, "solo machine", "", 1).ClientState()
+ },
+ },
+ {
+ "initial height and substitute revision numbers do not match", func() {
+ initialHeight = clienttypes.NewHeight(substituteClientState.GetLatestHeight().GetRevisionNumber()+1, 1)
+ },
+ },
+ {
+ "non-matching substitute", func() {
+ substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ tmClientState, ok := substituteClientState.(*types.ClientState)
+ suite.Require().True(ok)
+
+ tmClientState.ChainId = tmClientState.ChainId + "different chain"
+ },
+ },
+ {
+ "updated client is invalid - revision height is zero", func() {
+ substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ tmClientState, ok := substituteClientState.(*types.ClientState)
+ suite.Require().True(ok)
+ // match subject
+ tmClientState.AllowUpdateAfterMisbehaviour = true
+ tmClientState.AllowUpdateAfterExpiry = true
+
+ // will occur. This case should never occur (caught by upstream checks)
+ initialHeight = clienttypes.NewHeight(5, 0)
+ tmClientState.LatestHeight = clienttypes.NewHeight(5, 0)
+ },
+ },
+ {
+ "updated client is expired", func() {
+ substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ tmClientState, ok := substituteClientState.(*types.ClientState)
+ suite.Require().True(ok)
+ initialHeight = tmClientState.LatestHeight
+
+ // match subject
+ tmClientState.AllowUpdateAfterMisbehaviour = true
+ tmClientState.AllowUpdateAfterExpiry = true
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+
+ // update substitute a few times
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ suite.Require().NoError(err)
+ substituteClientState = suite.chainA.GetClientState(substitute)
+
+ err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ suite.chainA.ExpireClient(tmClientState.TrustingPeriod)
+ suite.chainB.ExpireClient(tmClientState.TrustingPeriod)
+ suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
+
+ substituteClientState = suite.chainA.GetClientState(substitute)
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+
+ suite.SetupTest() // reset
+
+ subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectClientState := suite.chainA.GetClientState(subject).(*types.ClientState)
+ subjectClientState.AllowUpdateAfterMisbehaviour = true
+ subjectClientState.AllowUpdateAfterExpiry = true
+
+ // expire subject
+ suite.chainA.ExpireClient(subjectClientState.TrustingPeriod)
+ suite.chainB.ExpireClient(subjectClientState.TrustingPeriod)
+ suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
+
+ tc.malleate()
+
+ subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), subject)
+ substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute)
+
+ updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight)
+ suite.Require().Error(err)
+ suite.Require().Nil(updatedClient)
+ })
+ }
+}
+
+// to expire clients, time needs to be fast forwarded on both chainA and chainB.
+// this is to prevent headers from failing when attempting to update later.
+func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
+ testCases := []struct {
+ name string
+ AllowUpdateAfterExpiry bool
+ AllowUpdateAfterMisbehaviour bool
+ FreezeClient bool
+ ExpireClient bool
+ expPass bool
+ }{
+ {
+ name: "not allowed to be updated, not frozen or expired",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: false,
+ ExpireClient: false,
+ expPass: false,
+ },
+ {
+ name: "not allowed to be updated, client is frozen",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: true,
+ ExpireClient: false,
+ expPass: false,
+ },
+ {
+ name: "not allowed to be updated, client is expired",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: false,
+ ExpireClient: true,
+ expPass: false,
+ },
+ {
+ name: "not allowed to be updated, client is frozen and expired",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: true,
+ ExpireClient: true,
+ expPass: false,
+ },
+ {
+ name: "allowed to be updated only after misbehaviour, not frozen or expired",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: false,
+ ExpireClient: false,
+ expPass: false,
+ },
+ {
+ name: "allowed to be updated only after misbehaviour, client is expired",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: false,
+ ExpireClient: true,
+ expPass: false,
+ },
+ {
+ name: "allowed to be updated only after expiry, not frozen or expired",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: false,
+ ExpireClient: false,
+ expPass: false,
+ },
+ {
+ name: "allowed to be updated only after expiry, client is frozen",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: true,
+ ExpireClient: false,
+ expPass: false,
+ },
+ {
+ name: "PASS: allowed to be updated only after misbehaviour, client is frozen",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: true,
+ ExpireClient: false,
+ expPass: true,
+ },
+ {
+ name: "PASS: allowed to be updated only after misbehaviour, client is frozen and expired",
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: true,
+ ExpireClient: true,
+ expPass: true,
+ },
+ {
+ name: "PASS: allowed to be updated only after expiry, client is expired",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: false,
+ ExpireClient: true,
+ expPass: true,
+ },
+ {
+ name: "allowed to be updated only after expiry, client is frozen and expired",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: false,
+ FreezeClient: true,
+ ExpireClient: true,
+ expPass: false,
+ },
+ {
+ name: "allowed to be updated after expiry and misbehaviour, not frozen or expired",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: false,
+ ExpireClient: false,
+ expPass: false,
+ },
+ {
+ name: "PASS: allowed to be updated after expiry and misbehaviour, client is frozen",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: true,
+ ExpireClient: false,
+ expPass: true,
+ },
+ {
+ name: "PASS: allowed to be updated after expiry and misbehaviour, client is expired",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: false,
+ ExpireClient: true,
+ expPass: true,
+ },
+ {
+ name: "PASS: allowed to be updated after expiry and misbehaviour, client is frozen and expired",
+ AllowUpdateAfterExpiry: true,
+ AllowUpdateAfterMisbehaviour: true,
+ FreezeClient: true,
+ ExpireClient: true,
+ expPass: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ // for each test case a header used for unexpiring clients and unfreezing
+ // a client are each tested to ensure that unexpiry headers cannot update
+ // a client when a unfreezing header is required.
+ suite.Run(tc.name, func() {
+
+ // start by testing unexpiring the client
+ suite.SetupTest() // reset
+
+ // construct subject using test case parameters
+ subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectClientState := suite.chainA.GetClientState(subject).(*types.ClientState)
+ subjectClientState.AllowUpdateAfterExpiry = tc.AllowUpdateAfterExpiry
+ subjectClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour
+
+ // apply freezing or expiry as determined by the test case
+ if tc.FreezeClient {
+ subjectClientState.FrozenHeight = frozenHeight
+ }
+ if tc.ExpireClient {
+ suite.chainA.ExpireClient(subjectClientState.TrustingPeriod)
+ suite.chainB.ExpireClient(subjectClientState.TrustingPeriod)
+ suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
+ }
+
+ // construct the substitute to match the subject client
+ // NOTE: the substitute is explicitly created after the freezing or expiry occurs,
+ // primarily to prevent the substitute from becoming frozen. It also should be
+ // the natural flow of events in practice. The subject will become frozen/expired
+ // and a substitute will be created along with a governance proposal as a response
+
+ substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ substituteClientState := suite.chainA.GetClientState(substitute).(*types.ClientState)
+ substituteClientState.AllowUpdateAfterExpiry = tc.AllowUpdateAfterExpiry
+ substituteClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, substituteClientState)
+
+ initialHeight := substituteClientState.GetLatestHeight()
+
+ // update substitute a few times
+ for i := 0; i < 3; i++ {
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ suite.Require().NoError(err)
+ // skip a block
+ suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
+ }
+
+ // get updated substitute
+ substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+
+ subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), subject)
+ substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute)
+ updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().Equal(clienttypes.ZeroHeight(), updatedClient.GetFrozenHeight())
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(updatedClient)
+ }
+
+ })
+ }
+}
+
+func (suite *TendermintTestSuite) TestIsMatchingClientState() {
+ var (
+ subject, substitute string
+ subjectClientState, substituteClientState *types.ClientState
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "matching clients", func() {
+ subjectClientState = suite.chainA.GetClientState(subject).(*types.ClientState)
+ substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ }, true,
+ },
+ {
+ "matching, frozen height is not used in check for equality", func() {
+ subjectClientState.FrozenHeight = frozenHeight
+ substituteClientState.FrozenHeight = clienttypes.ZeroHeight()
+ }, true,
+ },
+ {
+ "matching, latest height is not used in check for equality", func() {
+ subjectClientState.LatestHeight = clienttypes.NewHeight(0, 10)
+ substituteClientState.FrozenHeight = clienttypes.ZeroHeight()
+ }, true,
+ },
+ {
+ "matching, chain id is different", func() {
+ subjectClientState.ChainId = "bitcoin"
+ substituteClientState.ChainId = "ethereum"
+ }, true,
+ },
+ {
+ "not matching, trusting period is different", func() {
+ subjectClientState.TrustingPeriod = time.Duration(time.Hour * 10)
+ substituteClientState.TrustingPeriod = time.Duration(time.Hour * 1)
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+
+ subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ tc.malleate()
+
+ suite.Require().Equal(tc.expPass, types.IsMatchingClientState(*subjectClientState, *substituteClientState))
+
+ })
+ }
+}
diff --git a/light-clients/07-tendermint/types/store.go b/light-clients/07-tendermint/types/store.go
new file mode 100644
index 00000000..7d6a841b
--- /dev/null
+++ b/light-clients/07-tendermint/types/store.go
@@ -0,0 +1,96 @@
+package types
+
+import (
+ "strings"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// KeyProcessedTime is appended to consensus state key to store the processed time
+var KeyProcessedTime = []byte("/processedTime")
+
+// SetConsensusState stores the consensus state at the given height.
+func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, consensusState *ConsensusState, height exported.Height) {
+ key := host.ConsensusStateKey(height)
+ val := clienttypes.MustMarshalConsensusState(cdc, consensusState)
+ clientStore.Set(key, val)
+}
+
+// GetConsensusState retrieves the consensus state from the client prefixed
+// store. An error is returned if the consensus state does not exist.
+func GetConsensusState(store sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, error) {
+ bz := store.Get(host.ConsensusStateKey(height))
+ if bz == nil {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrConsensusStateNotFound,
+ "consensus state does not exist for height %s", height,
+ )
+ }
+
+ consensusStateI, err := clienttypes.UnmarshalConsensusState(cdc, bz)
+ if err != nil {
+ return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "unmarshal error: %v", err)
+ }
+
+ consensusState, ok := consensusStateI.(*ConsensusState)
+ if !ok {
+ return nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidConsensus,
+ "invalid consensus type %T, expected %T", consensusState, &ConsensusState{},
+ )
+ }
+
+ return consensusState, nil
+}
+
+// IterateProcessedTime iterates through the prefix store and applies the callback.
+// If the cb returns true, then iterator will close and stop.
+func IterateProcessedTime(store sdk.KVStore, cb func(key, val []byte) bool) {
+ iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConsensusStatePrefix))
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ // processed time key in prefix store has format: "consensusState//processedTime"
+ if len(keySplit) != 3 || keySplit[2] != "processedTime" {
+ // ignore all consensus state keys
+ continue
+ }
+
+ if cb(iterator.Key(), iterator.Value()) {
+ break
+ }
+ }
+}
+
+// ProcessedTime Store code
+
+// ProcessedTimeKey returns the key under which the processed time will be stored in the client store.
+func ProcessedTimeKey(height exported.Height) []byte {
+ return append(host.ConsensusStateKey(height), KeyProcessedTime...)
+}
+
+// SetProcessedTime stores the time at which a header was processed and the corresponding consensus state was created.
+// This is useful when validating whether a packet has reached the specified delay period in the tendermint client's
+// verification functions
+func SetProcessedTime(clientStore sdk.KVStore, height exported.Height, timeNs uint64) {
+ key := ProcessedTimeKey(height)
+ val := sdk.Uint64ToBigEndian(timeNs)
+ clientStore.Set(key, val)
+}
+
+// GetProcessedTime gets the time (in nanoseconds) at which this chain received and processed a tendermint header.
+// This is used to validate that a received packet has passed the delay period.
+func GetProcessedTime(clientStore sdk.KVStore, height exported.Height) (uint64, bool) {
+ key := ProcessedTimeKey(height)
+ bz := clientStore.Get(key)
+ if bz == nil {
+ return 0, false
+ }
+ return sdk.BigEndianToUint64(bz), true
+}
diff --git a/light-clients/07-tendermint/types/store_test.go b/light-clients/07-tendermint/types/store_test.go
new file mode 100644
index 00000000..b8badc09
--- /dev/null
+++ b/light-clients/07-tendermint/types/store_test.go
@@ -0,0 +1,113 @@
+package types_test
+
+import (
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+)
+
+func (suite *TendermintTestSuite) TestGetConsensusState() {
+ var (
+ height exported.Height
+ clientA string
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "success", func() {}, true,
+ },
+ {
+ "consensus state not found", func() {
+ // use height with no consensus state set
+ height = height.(clienttypes.Height).Increment()
+ }, false,
+ },
+ {
+ "not a consensus state interface", func() {
+ // marshal an empty client state and set as consensus state
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ clientStateBz := suite.chainA.App.IBCKeeper.ClientKeeper.MustMarshalClientState(&types.ClientState{})
+ store.Set(host.ConsensusStateKey(height), clientStateBz)
+ }, false,
+ },
+ {
+ "invalid consensus state (solomachine)", func() {
+ // marshal and set solomachine consensus state
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ consensusStateBz := suite.chainA.App.IBCKeeper.ClientKeeper.MustMarshalConsensusState(&solomachinetypes.ConsensusState{})
+ store.Set(host.ConsensusStateKey(height), consensusStateBz)
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ clientA, _, _, _, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ clientState := suite.chainA.GetClientState(clientA)
+ height = clientState.GetLatestHeight()
+
+ tc.malleate() // change vars as necessary
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ consensusState, err := types.GetConsensusState(store, suite.chainA.Codec, height)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ expConsensusState, found := suite.chainA.GetConsensusState(clientA, height)
+ suite.Require().True(found)
+ suite.Require().Equal(expConsensusState, consensusState)
+ } else {
+ suite.Require().Error(err)
+ suite.Require().Nil(consensusState)
+ }
+ })
+ }
+}
+
+func (suite *TendermintTestSuite) TestGetProcessedTime() {
+ // Verify ProcessedTime on CreateClient
+ // coordinator increments time before creating client
+ expectedTime := suite.chainA.CurrentHeader.Time.Add(ibctesting.TimeIncrement)
+
+ clientA, err := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ clientState := suite.chainA.GetClientState(clientA)
+ height := clientState.GetLatestHeight()
+
+ store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ actualTime, ok := types.GetProcessedTime(store, height)
+ suite.Require().True(ok, "could not retrieve processed time for stored consensus state")
+ suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value")
+
+ // Verify ProcessedTime on UpdateClient
+ // coordinator increments time before updating client
+ expectedTime = suite.chainA.CurrentHeader.Time.Add(ibctesting.TimeIncrement)
+
+ err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ clientState = suite.chainA.GetClientState(clientA)
+ height = clientState.GetLatestHeight()
+
+ store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ actualTime, ok = types.GetProcessedTime(store, height)
+ suite.Require().True(ok, "could not retrieve processed time for stored consensus state")
+ suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value")
+
+ // try to get processed time for height that doesn't exist in store
+ _, ok = types.GetProcessedTime(store, clienttypes.NewHeight(1, 1))
+ suite.Require().False(ok, "retrieved processed time for a non-existent consensus state")
+}
diff --git a/light-clients/07-tendermint/types/tendermint.pb.go b/light-clients/07-tendermint/types/tendermint.pb.go
new file mode 100644
index 00000000..aa53fb70
--- /dev/null
+++ b/light-clients/07-tendermint/types/tendermint.pb.go
@@ -0,0 +1,1917 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/lightclients/tendermint/v1/tendermint.proto
+
+package types
+
+import (
+ fmt "fmt"
+ _go "github.com/confio/ics23/go"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ types1 "github.com/cosmos/ibc-go/core/23-commitment/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
+ github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes"
+ types2 "github.com/tendermint/tendermint/proto/tendermint/types"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+ time "time"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// ClientState from Tendermint tracks the current validator set, latest height,
+// and a possible frozen height.
+type ClientState struct {
+ ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"`
+ TrustLevel Fraction `protobuf:"bytes,2,opt,name=trust_level,json=trustLevel,proto3" json:"trust_level" yaml:"trust_level"`
+ // duration of the period since the LastestTimestamp during which the
+ // submitted headers are valid for upgrade
+ TrustingPeriod time.Duration `protobuf:"bytes,3,opt,name=trusting_period,json=trustingPeriod,proto3,stdduration" json:"trusting_period" yaml:"trusting_period"`
+ // duration of the staking unbonding period
+ UnbondingPeriod time.Duration `protobuf:"bytes,4,opt,name=unbonding_period,json=unbondingPeriod,proto3,stdduration" json:"unbonding_period" yaml:"unbonding_period"`
+ // defines how much new (untrusted) header's Time can drift into the future.
+ MaxClockDrift time.Duration `protobuf:"bytes,5,opt,name=max_clock_drift,json=maxClockDrift,proto3,stdduration" json:"max_clock_drift" yaml:"max_clock_drift"`
+ // Block height when the client was frozen due to a misbehaviour
+ FrozenHeight types.Height `protobuf:"bytes,6,opt,name=frozen_height,json=frozenHeight,proto3" json:"frozen_height" yaml:"frozen_height"`
+ // Latest height the client was updated to
+ LatestHeight types.Height `protobuf:"bytes,7,opt,name=latest_height,json=latestHeight,proto3" json:"latest_height" yaml:"latest_height"`
+ // Proof specifications used in verifying counterparty state
+ ProofSpecs []*_go.ProofSpec `protobuf:"bytes,8,rep,name=proof_specs,json=proofSpecs,proto3" json:"proof_specs,omitempty" yaml:"proof_specs"`
+ // Path at which next upgraded client will be committed.
+ // Each element corresponds to the key for a single CommitmentProof in the
+ // chained proof. NOTE: ClientState must stored under
+ // `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored
+ // under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using
+ // the default upgrade module, upgrade_path should be []string{"upgrade",
+ // "upgradedIBCState"}`
+ UpgradePath []string `protobuf:"bytes,9,rep,name=upgrade_path,json=upgradePath,proto3" json:"upgrade_path,omitempty" yaml:"upgrade_path"`
+ // This flag, when set to true, will allow governance to recover a client
+ // which has expired
+ AllowUpdateAfterExpiry bool `protobuf:"varint,10,opt,name=allow_update_after_expiry,json=allowUpdateAfterExpiry,proto3" json:"allow_update_after_expiry,omitempty" yaml:"allow_update_after_expiry"`
+ // This flag, when set to true, will allow governance to unfreeze a client
+ // whose chain has experienced a misbehaviour event
+ AllowUpdateAfterMisbehaviour bool `protobuf:"varint,11,opt,name=allow_update_after_misbehaviour,json=allowUpdateAfterMisbehaviour,proto3" json:"allow_update_after_misbehaviour,omitempty" yaml:"allow_update_after_misbehaviour"`
+}
+
+func (m *ClientState) Reset() { *m = ClientState{} }
+func (m *ClientState) String() string { return proto.CompactTextString(m) }
+func (*ClientState) ProtoMessage() {}
+func (*ClientState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_868940ee8c1cf959, []int{0}
+}
+func (m *ClientState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientState.Merge(m, src)
+}
+func (m *ClientState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientState proto.InternalMessageInfo
+
+// ConsensusState defines the consensus state from Tendermint.
+type ConsensusState struct {
+ // timestamp that corresponds to the block height in which the ConsensusState
+ // was stored.
+ Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
+ // commitment root (i.e app hash)
+ Root types1.MerkleRoot `protobuf:"bytes,2,opt,name=root,proto3" json:"root"`
+ NextValidatorsHash github_com_tendermint_tendermint_libs_bytes.HexBytes `protobuf:"bytes,3,opt,name=next_validators_hash,json=nextValidatorsHash,proto3,casttype=github.com/tendermint/tendermint/libs/bytes.HexBytes" json:"next_validators_hash,omitempty" yaml:"next_validators_hash"`
+}
+
+func (m *ConsensusState) Reset() { *m = ConsensusState{} }
+func (m *ConsensusState) String() string { return proto.CompactTextString(m) }
+func (*ConsensusState) ProtoMessage() {}
+func (*ConsensusState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_868940ee8c1cf959, []int{1}
+}
+func (m *ConsensusState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConsensusState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConsensusState.Merge(m, src)
+}
+func (m *ConsensusState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConsensusState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConsensusState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConsensusState proto.InternalMessageInfo
+
+// Misbehaviour is a wrapper over two conflicting Headers
+// that implements Misbehaviour interface expected by ICS-02
+type Misbehaviour struct {
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ Header1 *Header `protobuf:"bytes,2,opt,name=header_1,json=header1,proto3" json:"header_1,omitempty" yaml:"header_1"`
+ Header2 *Header `protobuf:"bytes,3,opt,name=header_2,json=header2,proto3" json:"header_2,omitempty" yaml:"header_2"`
+}
+
+func (m *Misbehaviour) Reset() { *m = Misbehaviour{} }
+func (m *Misbehaviour) String() string { return proto.CompactTextString(m) }
+func (*Misbehaviour) ProtoMessage() {}
+func (*Misbehaviour) Descriptor() ([]byte, []int) {
+ return fileDescriptor_868940ee8c1cf959, []int{2}
+}
+func (m *Misbehaviour) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Misbehaviour) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Misbehaviour.Merge(m, src)
+}
+func (m *Misbehaviour) XXX_Size() int {
+ return m.Size()
+}
+func (m *Misbehaviour) XXX_DiscardUnknown() {
+ xxx_messageInfo_Misbehaviour.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo
+
+// Header defines the Tendermint client consensus Header.
+// It encapsulates all the information necessary to update from a trusted
+// Tendermint ConsensusState. The inclusion of TrustedHeight and
+// TrustedValidators allows this update to process correctly, so long as the
+// ConsensusState for the TrustedHeight exists, this removes race conditions
+// among relayers The SignedHeader and ValidatorSet are the new untrusted update
+// fields for the client. The TrustedHeight is the height of a stored
+// ConsensusState on the client that will be used to verify the new untrusted
+// header. The Trusted ConsensusState must be within the unbonding period of
+// current time in order to correctly verify, and the TrustedValidators must
+// hash to TrustedConsensusState.NextValidatorsHash since that is the last
+// trusted validator set at the TrustedHeight.
+type Header struct {
+ *types2.SignedHeader `protobuf:"bytes,1,opt,name=signed_header,json=signedHeader,proto3,embedded=signed_header" json:"signed_header,omitempty" yaml:"signed_header"`
+ ValidatorSet *types2.ValidatorSet `protobuf:"bytes,2,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty" yaml:"validator_set"`
+ TrustedHeight types.Height `protobuf:"bytes,3,opt,name=trusted_height,json=trustedHeight,proto3" json:"trusted_height" yaml:"trusted_height"`
+ TrustedValidators *types2.ValidatorSet `protobuf:"bytes,4,opt,name=trusted_validators,json=trustedValidators,proto3" json:"trusted_validators,omitempty" yaml:"trusted_validators"`
+}
+
+func (m *Header) Reset() { *m = Header{} }
+func (m *Header) String() string { return proto.CompactTextString(m) }
+func (*Header) ProtoMessage() {}
+func (*Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_868940ee8c1cf959, []int{3}
+}
+func (m *Header) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Header.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Header.Merge(m, src)
+}
+func (m *Header) XXX_Size() int {
+ return m.Size()
+}
+func (m *Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Header proto.InternalMessageInfo
+
+func (m *Header) GetValidatorSet() *types2.ValidatorSet {
+ if m != nil {
+ return m.ValidatorSet
+ }
+ return nil
+}
+
+func (m *Header) GetTrustedHeight() types.Height {
+ if m != nil {
+ return m.TrustedHeight
+ }
+ return types.Height{}
+}
+
+func (m *Header) GetTrustedValidators() *types2.ValidatorSet {
+ if m != nil {
+ return m.TrustedValidators
+ }
+ return nil
+}
+
+// Fraction defines the protobuf message type for tmmath.Fraction that only
+// supports positive values.
+type Fraction struct {
+ Numerator uint64 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"`
+ Denominator uint64 `protobuf:"varint,2,opt,name=denominator,proto3" json:"denominator,omitempty"`
+}
+
+func (m *Fraction) Reset() { *m = Fraction{} }
+func (m *Fraction) String() string { return proto.CompactTextString(m) }
+func (*Fraction) ProtoMessage() {}
+func (*Fraction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_868940ee8c1cf959, []int{4}
+}
+func (m *Fraction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Fraction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Fraction.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Fraction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Fraction.Merge(m, src)
+}
+func (m *Fraction) XXX_Size() int {
+ return m.Size()
+}
+func (m *Fraction) XXX_DiscardUnknown() {
+ xxx_messageInfo_Fraction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Fraction proto.InternalMessageInfo
+
+func (m *Fraction) GetNumerator() uint64 {
+ if m != nil {
+ return m.Numerator
+ }
+ return 0
+}
+
+func (m *Fraction) GetDenominator() uint64 {
+ if m != nil {
+ return m.Denominator
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.tendermint.v1.ClientState")
+ proto.RegisterType((*ConsensusState)(nil), "ibcgo.lightclients.tendermint.v1.ConsensusState")
+ proto.RegisterType((*Misbehaviour)(nil), "ibcgo.lightclients.tendermint.v1.Misbehaviour")
+ proto.RegisterType((*Header)(nil), "ibcgo.lightclients.tendermint.v1.Header")
+ proto.RegisterType((*Fraction)(nil), "ibcgo.lightclients.tendermint.v1.Fraction")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/lightclients/tendermint/v1/tendermint.proto", fileDescriptor_868940ee8c1cf959)
+}
+
+var fileDescriptor_868940ee8c1cf959 = []byte{
+ // 1080 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x41, 0x6f, 0xe3, 0x44,
+ 0x14, 0x6e, 0xda, 0xb2, 0x4d, 0x26, 0xe9, 0xb6, 0x78, 0x4b, 0x37, 0x2d, 0xd9, 0x38, 0x98, 0x15,
+ 0x0a, 0x2b, 0xd5, 0x26, 0x59, 0x24, 0xa4, 0x1e, 0x90, 0x70, 0x17, 0xd4, 0x22, 0x56, 0xaa, 0xdc,
+ 0x05, 0x24, 0x24, 0x64, 0x4d, 0xec, 0x89, 0x3d, 0x5a, 0xdb, 0x63, 0x3c, 0x93, 0xd0, 0xf2, 0x0b,
+ 0xe0, 0xb6, 0xdc, 0x38, 0x70, 0xe0, 0xc4, 0x6f, 0xd9, 0x63, 0x8f, 0x9c, 0x0c, 0x6a, 0xef, 0x1c,
+ 0x72, 0xe4, 0x84, 0x3c, 0x33, 0x76, 0x26, 0x6d, 0x57, 0x65, 0xb9, 0x44, 0xf3, 0xde, 0xfb, 0xde,
+ 0xf7, 0x65, 0xde, 0xbc, 0x79, 0x63, 0x30, 0xc0, 0x23, 0x2f, 0x20, 0x56, 0x84, 0x83, 0x90, 0x79,
+ 0x11, 0x46, 0x09, 0xa3, 0x16, 0x43, 0x89, 0x8f, 0xb2, 0x18, 0x27, 0xcc, 0x9a, 0x0e, 0x14, 0xcb,
+ 0x4c, 0x33, 0xc2, 0x88, 0xd6, 0xe3, 0x29, 0xa6, 0x9a, 0x62, 0x2a, 0xa0, 0xe9, 0x60, 0xb7, 0xa7,
+ 0x30, 0xb0, 0xb3, 0x14, 0x51, 0x6b, 0x0a, 0x23, 0xec, 0x43, 0x46, 0x32, 0xc1, 0xb1, 0xdb, 0xb9,
+ 0x86, 0xe0, 0xbf, 0x32, 0x7a, 0xcf, 0x23, 0xc9, 0x18, 0x13, 0x2b, 0xcd, 0x08, 0x19, 0x97, 0xce,
+ 0x6e, 0x40, 0x48, 0x10, 0x21, 0x8b, 0x5b, 0xa3, 0xc9, 0xd8, 0xf2, 0x27, 0x19, 0x64, 0x98, 0x24,
+ 0x32, 0xae, 0x5f, 0x8d, 0x33, 0x1c, 0x23, 0xca, 0x60, 0x9c, 0x4a, 0xc0, 0x3b, 0x62, 0xab, 0x1e,
+ 0xc9, 0x90, 0x25, 0xfe, 0x77, 0xb1, 0x3d, 0xb1, 0x92, 0x90, 0xf7, 0x55, 0x08, 0x89, 0x63, 0xcc,
+ 0xe2, 0x12, 0x56, 0x59, 0x12, 0xba, 0x15, 0x90, 0x80, 0xf0, 0xa5, 0x55, 0xac, 0x84, 0xd7, 0xf8,
+ 0x7b, 0x0d, 0x34, 0x0f, 0x38, 0xe3, 0x09, 0x83, 0x0c, 0x69, 0x3b, 0xa0, 0xee, 0x85, 0x10, 0x27,
+ 0x2e, 0xf6, 0xdb, 0xb5, 0x5e, 0xad, 0xdf, 0x70, 0xd6, 0xb8, 0x7d, 0xe4, 0x6b, 0x01, 0x68, 0xb2,
+ 0x6c, 0x42, 0x99, 0x1b, 0xa1, 0x29, 0x8a, 0xda, 0xcb, 0xbd, 0x5a, 0xbf, 0x39, 0x7c, 0x64, 0xde,
+ 0x56, 0x5c, 0xf3, 0xb3, 0x0c, 0x7a, 0xc5, 0xb6, 0xed, 0xdd, 0x97, 0xb9, 0xbe, 0x34, 0xcb, 0x75,
+ 0xed, 0x0c, 0xc6, 0xd1, 0xbe, 0xa1, 0x90, 0x19, 0x0e, 0xe0, 0xd6, 0x17, 0x85, 0xa1, 0x8d, 0xc1,
+ 0x06, 0xb7, 0x70, 0x12, 0xb8, 0x29, 0xca, 0x30, 0xf1, 0xdb, 0x2b, 0x5c, 0x6c, 0xc7, 0x14, 0x25,
+ 0x33, 0xcb, 0x92, 0x99, 0x4f, 0x64, 0x49, 0x6d, 0x43, 0x72, 0x6f, 0x2b, 0xdc, 0xf3, 0x7c, 0xe3,
+ 0x97, 0x3f, 0xf5, 0x9a, 0x73, 0xb7, 0xf4, 0x1e, 0x73, 0xa7, 0x86, 0xc1, 0xe6, 0x24, 0x19, 0x91,
+ 0xc4, 0x57, 0x84, 0x56, 0x6f, 0x13, 0x7a, 0x57, 0x0a, 0xdd, 0x17, 0x42, 0x57, 0x09, 0x84, 0xd2,
+ 0x46, 0xe5, 0x96, 0x52, 0x08, 0x6c, 0xc4, 0xf0, 0xd4, 0xf5, 0x22, 0xe2, 0x3d, 0x77, 0xfd, 0x0c,
+ 0x8f, 0x59, 0xfb, 0x8d, 0xd7, 0xdc, 0xd2, 0x95, 0x7c, 0x21, 0xb4, 0x1e, 0xc3, 0xd3, 0x83, 0xc2,
+ 0xf9, 0xa4, 0xf0, 0x69, 0x2e, 0x58, 0x1f, 0x67, 0xe4, 0x07, 0x94, 0xb8, 0x21, 0x2a, 0x0e, 0xa4,
+ 0x7d, 0x87, 0x8b, 0x74, 0xe4, 0x21, 0x15, 0x6d, 0x62, 0xca, 0xfe, 0x99, 0x0e, 0xcc, 0x43, 0x8e,
+ 0xb1, 0x3b, 0x52, 0x67, 0x4b, 0xe8, 0x2c, 0x10, 0x18, 0x4e, 0x4b, 0xd8, 0x02, 0x5b, 0x08, 0x44,
+ 0x90, 0x21, 0xca, 0x4a, 0x81, 0xb5, 0xd7, 0x17, 0x58, 0x20, 0x30, 0x9c, 0x96, 0xb0, 0xa5, 0xc0,
+ 0x11, 0x68, 0xf2, 0x4b, 0xe4, 0xd2, 0x14, 0x79, 0xb4, 0x5d, 0xef, 0xad, 0xf4, 0x9b, 0xc3, 0x4d,
+ 0x13, 0x7b, 0x74, 0xf8, 0xd8, 0x3c, 0x2e, 0x22, 0x27, 0x29, 0xf2, 0xec, 0xed, 0x79, 0x1b, 0x29,
+ 0x70, 0xc3, 0x01, 0x69, 0x09, 0xa1, 0xda, 0x3e, 0x68, 0x4d, 0xd2, 0x20, 0x83, 0x3e, 0x72, 0x53,
+ 0xc8, 0xc2, 0x76, 0xa3, 0xb7, 0xd2, 0x6f, 0xd8, 0xf7, 0x67, 0xb9, 0x7e, 0x4f, 0x9e, 0x9d, 0x12,
+ 0x35, 0x9c, 0xa6, 0x34, 0x8f, 0x21, 0x0b, 0x35, 0x17, 0xec, 0xc0, 0x28, 0x22, 0xdf, 0xbb, 0x93,
+ 0xd4, 0x87, 0x0c, 0xb9, 0x70, 0xcc, 0x50, 0xe6, 0xa2, 0xd3, 0x14, 0x67, 0x67, 0x6d, 0xd0, 0xab,
+ 0xf5, 0xeb, 0xf6, 0xc3, 0x59, 0xae, 0xf7, 0x04, 0xd1, 0x2b, 0xa1, 0x86, 0xb3, 0xcd, 0x63, 0x5f,
+ 0xf2, 0xd0, 0x27, 0x45, 0xe4, 0x53, 0x1e, 0xd0, 0xbe, 0x03, 0xfa, 0x0d, 0x59, 0x31, 0xa6, 0x23,
+ 0x14, 0xc2, 0x29, 0x26, 0x93, 0xac, 0xdd, 0xe4, 0x32, 0x8f, 0x66, 0xb9, 0xfe, 0xde, 0x2b, 0x65,
+ 0xd4, 0x04, 0xc3, 0xe9, 0x5c, 0x15, 0x7b, 0xaa, 0x84, 0xf7, 0x57, 0x7f, 0xfc, 0x4d, 0x5f, 0x32,
+ 0x7e, 0x5f, 0x06, 0x77, 0x0f, 0x48, 0x42, 0x51, 0x42, 0x27, 0x54, 0xdc, 0x79, 0x1b, 0x34, 0xaa,
+ 0xd1, 0xc3, 0x2f, 0x7d, 0x73, 0xb8, 0x7b, 0xad, 0x2d, 0x9f, 0x95, 0x08, 0xbb, 0x5e, 0x1c, 0xe7,
+ 0x8b, 0xa2, 0xfb, 0xe6, 0x69, 0xda, 0xc7, 0x60, 0x35, 0x23, 0x84, 0xc9, 0xa9, 0xf0, 0x70, 0xa1,
+ 0x1f, 0xe6, 0x93, 0x68, 0x3a, 0x30, 0x9f, 0xa2, 0xec, 0x79, 0x84, 0x1c, 0x42, 0x98, 0xbd, 0x5a,
+ 0x10, 0x39, 0x3c, 0x4f, 0xfb, 0xa9, 0x06, 0xb6, 0x12, 0x74, 0xca, 0xdc, 0x6a, 0xf0, 0x52, 0x37,
+ 0x84, 0x34, 0xe4, 0x37, 0xbf, 0x65, 0x7f, 0x3d, 0xcb, 0xf5, 0xb7, 0x45, 0x15, 0x6e, 0x42, 0x19,
+ 0xff, 0xe4, 0xfa, 0x87, 0x01, 0x66, 0xe1, 0x64, 0x54, 0xc8, 0xa9, 0x0f, 0x82, 0xb2, 0x8c, 0xf0,
+ 0x88, 0x5a, 0xa3, 0x33, 0x86, 0xa8, 0x79, 0x88, 0x4e, 0xed, 0x62, 0xe1, 0x68, 0x05, 0xdd, 0x57,
+ 0x15, 0xdb, 0x21, 0xa4, 0xa1, 0x2c, 0xd4, 0xcf, 0xcb, 0xa0, 0xa5, 0xd6, 0x4f, 0x1b, 0x80, 0x86,
+ 0x68, 0xed, 0x6a, 0x36, 0xda, 0x5b, 0xb3, 0x5c, 0xdf, 0x14, 0x7f, 0xab, 0x0a, 0x19, 0x4e, 0x5d,
+ 0xac, 0x8f, 0x7c, 0xcd, 0x03, 0xf5, 0x10, 0x41, 0x1f, 0x65, 0xee, 0x40, 0x56, 0xa6, 0x7f, 0xfb,
+ 0xbc, 0x3c, 0xe4, 0x19, 0x76, 0xf7, 0x22, 0xd7, 0xd7, 0xc4, 0x7a, 0x30, 0xcb, 0xf5, 0x0d, 0x21,
+ 0x53, 0xd2, 0x19, 0xce, 0x9a, 0x58, 0x0e, 0x14, 0x91, 0xa1, 0x9c, 0x93, 0xff, 0x4b, 0x64, 0x78,
+ 0x4d, 0x64, 0x58, 0x89, 0x0c, 0x65, 0x4d, 0x7e, 0x5d, 0x01, 0x77, 0x04, 0x5a, 0x83, 0x60, 0x9d,
+ 0xe2, 0x20, 0x41, 0xbe, 0x2b, 0x20, 0xb2, 0x71, 0xba, 0xaa, 0x8e, 0x78, 0x22, 0x4f, 0x38, 0x4c,
+ 0x0a, 0x76, 0xce, 0x73, 0xbd, 0x36, 0x9f, 0x05, 0x0b, 0x14, 0x86, 0xd3, 0xa2, 0x0a, 0x56, 0xfb,
+ 0x16, 0xac, 0x57, 0xe7, 0xec, 0x52, 0x54, 0x36, 0xd7, 0x0d, 0x12, 0xd5, 0x01, 0x9e, 0x20, 0x66,
+ 0xb7, 0xe7, 0xf4, 0x0b, 0xe9, 0x86, 0xd3, 0x9a, 0x2a, 0x38, 0x6d, 0x04, 0xc4, 0x83, 0xc0, 0xf5,
+ 0xf9, 0x30, 0x5b, 0xf9, 0x0f, 0xc3, 0xec, 0x81, 0x1c, 0x66, 0x6f, 0x29, 0x0f, 0x4d, 0xc5, 0x60,
+ 0x38, 0xeb, 0xd2, 0x21, 0xc7, 0x59, 0x04, 0xb4, 0x12, 0x31, 0x6f, 0x59, 0xf9, 0xc8, 0xdc, 0xb6,
+ 0x8f, 0x07, 0xb3, 0x5c, 0xdf, 0x59, 0x54, 0x99, 0x73, 0x18, 0xce, 0x9b, 0xd2, 0x39, 0x6f, 0x5e,
+ 0xe3, 0x73, 0x50, 0x2f, 0x1f, 0x5b, 0xad, 0x03, 0x1a, 0xc9, 0x24, 0x46, 0x59, 0x11, 0xe1, 0x67,
+ 0xb3, 0xea, 0xcc, 0x1d, 0x5a, 0x0f, 0x34, 0x7d, 0x94, 0x90, 0x18, 0x27, 0x3c, 0xbe, 0xcc, 0xe3,
+ 0xaa, 0xcb, 0x7e, 0xf6, 0xf2, 0xa2, 0x5b, 0x3b, 0xbf, 0xe8, 0xd6, 0xfe, 0xba, 0xe8, 0xd6, 0x5e,
+ 0x5c, 0x76, 0x97, 0xce, 0x2f, 0xbb, 0x4b, 0x7f, 0x5c, 0x76, 0x97, 0xbe, 0xd9, 0x57, 0x2e, 0x9a,
+ 0x47, 0x68, 0x4c, 0xa8, 0x85, 0x47, 0xde, 0x5e, 0xf9, 0x51, 0xb6, 0x57, 0x7e, 0x95, 0x7d, 0xf0,
+ 0xd1, 0xde, 0xd5, 0x8f, 0xa6, 0xd1, 0x1d, 0x3e, 0x4f, 0x1e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff,
+ 0xf8, 0x0c, 0x1b, 0x17, 0xc6, 0x09, 0x00, 0x00,
+}
+
+func (m *ClientState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AllowUpdateAfterMisbehaviour {
+ i--
+ if m.AllowUpdateAfterMisbehaviour {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x58
+ }
+ if m.AllowUpdateAfterExpiry {
+ i--
+ if m.AllowUpdateAfterExpiry {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ }
+ if len(m.UpgradePath) > 0 {
+ for iNdEx := len(m.UpgradePath) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.UpgradePath[iNdEx])
+ copy(dAtA[i:], m.UpgradePath[iNdEx])
+ i = encodeVarintTendermint(dAtA, i, uint64(len(m.UpgradePath[iNdEx])))
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ if len(m.ProofSpecs) > 0 {
+ for iNdEx := len(m.ProofSpecs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ProofSpecs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ {
+ size, err := m.LatestHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ {
+ size, err := m.FrozenHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxClockDrift, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxClockDrift):])
+ if err3 != nil {
+ return 0, err3
+ }
+ i -= n3
+ i = encodeVarintTendermint(dAtA, i, uint64(n3))
+ i--
+ dAtA[i] = 0x2a
+ n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.UnbondingPeriod, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.UnbondingPeriod):])
+ if err4 != nil {
+ return 0, err4
+ }
+ i -= n4
+ i = encodeVarintTendermint(dAtA, i, uint64(n4))
+ i--
+ dAtA[i] = 0x22
+ n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.TrustingPeriod, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.TrustingPeriod):])
+ if err5 != nil {
+ return 0, err5
+ }
+ i -= n5
+ i = encodeVarintTendermint(dAtA, i, uint64(n5))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.TrustLevel.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.ChainId) > 0 {
+ i -= len(m.ChainId)
+ copy(dAtA[i:], m.ChainId)
+ i = encodeVarintTendermint(dAtA, i, uint64(len(m.ChainId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConsensusState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NextValidatorsHash) > 0 {
+ i -= len(m.NextValidatorsHash)
+ copy(dAtA[i:], m.NextValidatorsHash)
+ i = encodeVarintTendermint(dAtA, i, uint64(len(m.NextValidatorsHash)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.Root.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
+ if err8 != nil {
+ return 0, err8
+ }
+ i -= n8
+ i = encodeVarintTendermint(dAtA, i, uint64(n8))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Misbehaviour) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Header2 != nil {
+ {
+ size, err := m.Header2.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Header1 != nil {
+ {
+ size, err := m.Header1.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintTendermint(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Header) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Header) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TrustedValidators != nil {
+ {
+ size, err := m.TrustedValidators.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.TrustedHeight.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if m.ValidatorSet != nil {
+ {
+ size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.SignedHeader != nil {
+ {
+ size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintTendermint(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Fraction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Fraction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Fraction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Denominator != 0 {
+ i = encodeVarintTendermint(dAtA, i, uint64(m.Denominator))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Numerator != 0 {
+ i = encodeVarintTendermint(dAtA, i, uint64(m.Numerator))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintTendermint(dAtA []byte, offset int, v uint64) int {
+ offset -= sovTendermint(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ClientState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ChainId)
+ if l > 0 {
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ l = m.TrustLevel.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.TrustingPeriod)
+ n += 1 + l + sovTendermint(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.UnbondingPeriod)
+ n += 1 + l + sovTendermint(uint64(l))
+ l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxClockDrift)
+ n += 1 + l + sovTendermint(uint64(l))
+ l = m.FrozenHeight.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ l = m.LatestHeight.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ if len(m.ProofSpecs) > 0 {
+ for _, e := range m.ProofSpecs {
+ l = e.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ }
+ if len(m.UpgradePath) > 0 {
+ for _, s := range m.UpgradePath {
+ l = len(s)
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ }
+ if m.AllowUpdateAfterExpiry {
+ n += 2
+ }
+ if m.AllowUpdateAfterMisbehaviour {
+ n += 2
+ }
+ return n
+}
+
+func (m *ConsensusState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
+ n += 1 + l + sovTendermint(uint64(l))
+ l = m.Root.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ l = len(m.NextValidatorsHash)
+ if l > 0 {
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ return n
+}
+
+func (m *Misbehaviour) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ if m.Header1 != nil {
+ l = m.Header1.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ if m.Header2 != nil {
+ l = m.Header2.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ return n
+}
+
+func (m *Header) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.SignedHeader != nil {
+ l = m.SignedHeader.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ if m.ValidatorSet != nil {
+ l = m.ValidatorSet.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ l = m.TrustedHeight.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ if m.TrustedValidators != nil {
+ l = m.TrustedValidators.Size()
+ n += 1 + l + sovTendermint(uint64(l))
+ }
+ return n
+}
+
+func (m *Fraction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Numerator != 0 {
+ n += 1 + sovTendermint(uint64(m.Numerator))
+ }
+ if m.Denominator != 0 {
+ n += 1 + sovTendermint(uint64(m.Denominator))
+ }
+ return n
+}
+
+func sovTendermint(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozTendermint(x uint64) (n int) {
+ return sovTendermint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ClientState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChainId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustLevel", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TrustLevel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustingPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.TrustingPeriod, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnbondingPeriod", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.UnbondingPeriod, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxClockDrift", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MaxClockDrift, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FrozenHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.FrozenHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LatestHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LatestHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofSpecs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ProofSpecs = append(m.ProofSpecs, &_go.ProofSpec{})
+ if err := m.ProofSpecs[len(m.ProofSpecs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpgradePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UpgradePath = append(m.UpgradePath, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterExpiry", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowUpdateAfterExpiry = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterMisbehaviour", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowUpdateAfterMisbehaviour = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTendermint(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConsensusState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Root.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...)
+ if m.NextValidatorsHash == nil {
+ m.NextValidatorsHash = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTendermint(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Misbehaviour) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header1", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header1 == nil {
+ m.Header1 = &Header{}
+ }
+ if err := m.Header1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header2", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header2 == nil {
+ m.Header2 = &Header{}
+ }
+ if err := m.Header2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTendermint(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Header) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Header: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SignedHeader == nil {
+ m.SignedHeader = &types2.SignedHeader{}
+ }
+ if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ValidatorSet == nil {
+ m.ValidatorSet = &types2.ValidatorSet{}
+ }
+ if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustedHeight", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.TrustedHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustedValidators", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.TrustedValidators == nil {
+ m.TrustedValidators = &types2.ValidatorSet{}
+ }
+ if err := m.TrustedValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTendermint(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Fraction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Fraction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Fraction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Numerator", wireType)
+ }
+ m.Numerator = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Numerator |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Denominator", wireType)
+ }
+ m.Denominator = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Denominator |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipTendermint(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthTendermint
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipTendermint(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowTendermint
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthTendermint
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupTendermint
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthTendermint
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthTendermint = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowTendermint = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupTendermint = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/light-clients/07-tendermint/types/tendermint_test.go b/light-clients/07-tendermint/types/tendermint_test.go
new file mode 100644
index 00000000..4f9b8142
--- /dev/null
+++ b/light-clients/07-tendermint/types/tendermint_test.go
@@ -0,0 +1,95 @@
+package types_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+ tmbytes "github.com/tendermint/tendermint/libs/bytes"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/simapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+const (
+ chainID = "gaia"
+ chainIDRevision0 = "gaia-revision-0"
+ chainIDRevision1 = "gaia-revision-1"
+ clientID = "gaiamainnet"
+ trustingPeriod time.Duration = time.Hour * 24 * 7 * 2
+ ubdPeriod time.Duration = time.Hour * 24 * 7 * 3
+ maxClockDrift time.Duration = time.Second * 10
+)
+
+var (
+ height = clienttypes.NewHeight(0, 4)
+ newClientHeight = clienttypes.NewHeight(1, 1)
+ upgradePath = []string{"upgrade", "upgradedIBCState"}
+)
+
+type TendermintTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+
+ // TODO: deprecate usage in favor of testing package
+ ctx sdk.Context
+ cdc codec.Marshaler
+ privVal tmtypes.PrivValidator
+ valSet *tmtypes.ValidatorSet
+ valsHash tmbytes.HexBytes
+ header *ibctmtypes.Header
+ now time.Time
+ headerTime time.Time
+ clientTime time.Time
+}
+
+func (suite *TendermintTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)
+ suite.coordinator.CommitNBlocks(suite.chainA, 2)
+ suite.coordinator.CommitNBlocks(suite.chainB, 2)
+
+ // TODO: deprecate usage in favor of testing package
+ checkTx := false
+ app := simapp.Setup(checkTx)
+
+ suite.cdc = app.AppCodec()
+
+ // now is the time of the current chain, must be after the updating header
+ // mocks ctx.BlockTime()
+ suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC)
+ suite.clientTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
+ // Header time is intended to be time for any new header used for updates
+ suite.headerTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC)
+
+ suite.privVal = ibctestingmock.NewPV()
+
+ pubKey, err := suite.privVal.GetPubKey()
+ suite.Require().NoError(err)
+
+ heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1)
+
+ val := tmtypes.NewValidator(pubKey, 10)
+ suite.valSet = tmtypes.NewValidatorSet([]*tmtypes.Validator{val})
+ suite.valsHash = suite.valSet.Hash()
+ suite.header = suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
+ suite.ctx = app.BaseApp.NewContext(checkTx, tmproto.Header{Height: 1, Time: suite.now})
+}
+
+func TestTendermintTestSuite(t *testing.T) {
+ suite.Run(t, new(TendermintTestSuite))
+}
diff --git a/light-clients/07-tendermint/types/update.go b/light-clients/07-tendermint/types/update.go
new file mode 100644
index 00000000..e692e746
--- /dev/null
+++ b/light-clients/07-tendermint/types/update.go
@@ -0,0 +1,186 @@
+package types
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/tendermint/tendermint/light"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// CheckHeaderAndUpdateState checks if the provided header is valid, and if valid it will:
+// create the consensus state for the header.Height
+// and update the client state if the header height is greater than the latest client state height
+// It returns an error if:
+// - the client or header provided are not parseable to tendermint types
+// - the header is invalid
+// - header height is less than or equal to the trusted header height
+// - header revision is not equal to trusted header revision
+// - header valset commit verification fails
+// - header timestamp is past the trusting period in relation to the consensus state
+// - header timestamp is less than or equal to the consensus state timestamp
+//
+// UpdateClient may be used to either create a consensus state for:
+// - a future height greater than the latest client state height
+// - a past height that was skipped during bisection
+// If we are updating to a past height, a consensus state is created for that height to be persisted in client store
+// If we are updating to a future height, the consensus state is created and the client state is updated to reflect
+// the new latest height
+// UpdateClient must only be used to update within a single revision, thus header revision number and trusted height's revision
+// number must be the same. To update to a new revision, use a separate upgrade path
+// Tendermint client validity checking uses the bisection algorithm described
+// in the [Tendermint spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md).
+func (cs ClientState) CheckHeaderAndUpdateState(
+ ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
+ header exported.Header,
+) (exported.ClientState, exported.ConsensusState, error) {
+ tmHeader, ok := header.(*Header)
+ if !ok {
+ return nil, nil, sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeader, "expected type %T, got %T", &Header{}, header,
+ )
+ }
+
+ // get consensus state from clientStore
+ tmConsState, err := GetConsensusState(clientStore, cdc, tmHeader.TrustedHeight)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrapf(
+ err, "could not get consensus state from clientstore at TrustedHeight: %s", tmHeader.TrustedHeight,
+ )
+ }
+
+ if err := checkValidity(&cs, tmConsState, tmHeader, ctx.BlockTime()); err != nil {
+ return nil, nil, err
+ }
+
+ newClientState, consensusState := update(ctx, clientStore, &cs, tmHeader)
+ return newClientState, consensusState, nil
+}
+
+// checkTrustedHeader checks that consensus state matches trusted fields of Header
+func checkTrustedHeader(header *Header, consState *ConsensusState) error {
+ tmTrustedValidators, err := tmtypes.ValidatorSetFromProto(header.TrustedValidators)
+ if err != nil {
+ return sdkerrors.Wrap(err, "trusted validator set in not tendermint validator set type")
+ }
+
+ // assert that trustedVals is NextValidators of last trusted header
+ // to do this, we check that trustedVals.Hash() == consState.NextValidatorsHash
+ tvalHash := tmTrustedValidators.Hash()
+ if !bytes.Equal(consState.NextValidatorsHash, tvalHash) {
+ return sdkerrors.Wrapf(
+ ErrInvalidValidatorSet,
+ "trusted validators %s, does not hash to latest trusted validators. Expected: %X, got: %X",
+ header.TrustedValidators, consState.NextValidatorsHash, tvalHash,
+ )
+ }
+ return nil
+}
+
+// checkValidity checks if the Tendermint header is valid.
+// CONTRACT: consState.Height == header.TrustedHeight
+func checkValidity(
+ clientState *ClientState, consState *ConsensusState,
+ header *Header, currentTimestamp time.Time,
+) error {
+ if err := checkTrustedHeader(header, consState); err != nil {
+ return err
+ }
+
+ // UpdateClient only accepts updates with a header at the same revision
+ // as the trusted consensus state
+ if header.GetHeight().GetRevisionNumber() != header.TrustedHeight.RevisionNumber {
+ return sdkerrors.Wrapf(
+ ErrInvalidHeaderHeight,
+ "header height revision %d does not match trusted header revision %d",
+ header.GetHeight().GetRevisionNumber(), header.TrustedHeight.RevisionNumber,
+ )
+ }
+
+ tmTrustedValidators, err := tmtypes.ValidatorSetFromProto(header.TrustedValidators)
+ if err != nil {
+ return sdkerrors.Wrap(err, "trusted validator set in not tendermint validator set type")
+ }
+
+ tmSignedHeader, err := tmtypes.SignedHeaderFromProto(header.SignedHeader)
+ if err != nil {
+ return sdkerrors.Wrap(err, "signed header in not tendermint signed header type")
+ }
+
+ tmValidatorSet, err := tmtypes.ValidatorSetFromProto(header.ValidatorSet)
+ if err != nil {
+ return sdkerrors.Wrap(err, "validator set in not tendermint validator set type")
+ }
+
+ // assert header height is newer than consensus state
+ if header.GetHeight().LTE(header.TrustedHeight) {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrInvalidHeader,
+ "header height ≤ consensus state height (%s ≤ %s)", header.GetHeight(), header.TrustedHeight,
+ )
+ }
+
+ chainID := clientState.GetChainID()
+ // If chainID is in revision format, then set revision number of chainID with the revision number
+ // of the header we are verifying
+ // This is useful if the update is at a previous revision rather than an update to the latest revision
+ // of the client.
+ // The chainID must be set correctly for the previous revision before attempting verification.
+ // Updates for previous revisions are not supported if the chainID is not in revision format.
+ if clienttypes.IsRevisionFormat(chainID) {
+ chainID, _ = clienttypes.SetRevisionNumber(chainID, header.GetHeight().GetRevisionNumber())
+ }
+
+ // Construct a trusted header using the fields in consensus state
+ // Only Height, Time, and NextValidatorsHash are necessary for verification
+ trustedHeader := tmtypes.Header{
+ ChainID: chainID,
+ Height: int64(header.TrustedHeight.RevisionHeight),
+ Time: consState.Timestamp,
+ NextValidatorsHash: consState.NextValidatorsHash,
+ }
+ signedHeader := tmtypes.SignedHeader{
+ Header: &trustedHeader,
+ }
+
+ // Verify next header with the passed-in trustedVals
+ // - asserts trusting period not passed
+ // - assert header timestamp is not past the trusting period
+ // - assert header timestamp is past latest stored consensus state timestamp
+ // - assert that a TrustLevel proportion of TrustedValidators signed new Commit
+ err = light.Verify(
+ &signedHeader,
+ tmTrustedValidators, tmSignedHeader, tmValidatorSet,
+ clientState.TrustingPeriod, currentTimestamp, clientState.MaxClockDrift, clientState.TrustLevel.ToTendermint(),
+ )
+ if err != nil {
+ return sdkerrors.Wrap(err, "failed to verify header")
+ }
+ return nil
+}
+
+// update the consensus state from a new header and set processed time metadata
+func update(ctx sdk.Context, clientStore sdk.KVStore, clientState *ClientState, header *Header) (*ClientState, *ConsensusState) {
+ height := header.GetHeight().(clienttypes.Height)
+ if height.GT(clientState.LatestHeight) {
+ clientState.LatestHeight = height
+ }
+ consensusState := &ConsensusState{
+ Timestamp: header.GetTime(),
+ Root: commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()),
+ NextValidatorsHash: header.Header.NextValidatorsHash,
+ }
+
+ // set context time as processed time as this is state internal to tendermint client logic.
+ // client state and consensus state will be set by client keeper
+ SetProcessedTime(clientStore, header.GetHeight(), uint64(ctx.BlockTime().UnixNano()))
+
+ return clientState, consensusState
+}
diff --git a/light-clients/07-tendermint/types/update_test.go b/light-clients/07-tendermint/types/update_test.go
new file mode 100644
index 00000000..d9e550ed
--- /dev/null
+++ b/light-clients/07-tendermint/types/update_test.go
@@ -0,0 +1,281 @@
+package types_test
+
+import (
+ "time"
+
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ types "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
+ var (
+ clientState *types.ClientState
+ consensusState *types.ConsensusState
+ consStateHeight clienttypes.Height
+ newHeader *types.Header
+ currentTime time.Time
+ )
+
+ // Setup different validators and signers for testing different types of updates
+ altPrivVal := ibctestingmock.NewPV()
+ altPubKey, err := altPrivVal.GetPubKey()
+ suite.Require().NoError(err)
+
+ revisionHeight := int64(height.RevisionHeight)
+
+ // create modified heights to use for test-cases
+ heightPlus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+1)
+ heightMinus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1)
+ heightMinus3 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-3)
+ heightPlus5 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+5)
+
+ altVal := tmtypes.NewValidator(altPubKey, revisionHeight)
+
+ // Create bothValSet with both suite validator and altVal. Would be valid update
+ bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
+ // Create alternative validator set with only altVal, invalid update (too much change in valSet)
+ altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal})
+
+ signers := []tmtypes.PrivValidator{suite.privVal}
+
+ // Create signer array and ensure it is in same order as bothValSet
+ _, suiteVal := suite.valSet.GetByIndex(0)
+ bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
+
+ altSigners := []tmtypes.PrivValidator{altPrivVal}
+
+ testCases := []struct {
+ name string
+ setup func()
+ expPass bool
+ }{
+ {
+ name: "successful update with next height and same validator set",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: true,
+ },
+ {
+ name: "successful update with future height and different validator set",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners)
+ currentTime = suite.now
+ },
+ expPass: true,
+ },
+ {
+ name: "successful update with next height and different validator set",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash())
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners)
+ currentTime = suite.now
+ },
+ expPass: true,
+ },
+ {
+ name: "successful update for a previous height",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ consStateHeight = heightMinus3
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightMinus1.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners)
+ currentTime = suite.now
+ },
+ expPass: true,
+ },
+ {
+ name: "successful update for a previous revision",
+ setup: func() {
+ clientState = types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners)
+ currentTime = suite.now
+ },
+ expPass: true,
+ },
+ {
+ name: "unsuccessful update with incorrect header chain-id",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader("ethermint", int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update to a future revision",
+ setup: func() {
+ clientState = types.NewClientState(chainIDRevision0, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update: header height revision and trusted height revision mismatch",
+ setup: func() {
+ clientState = types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 3, height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update with next height: update header mismatches nextValSetHash",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update with next height: update header mismatches different nextValSetHash",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash())
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, bothValSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update with future height: too much change in validator set",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, altValSet, suite.valSet, altSigners)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful updates, passed in incorrect trusted validators for given consensus state",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update: trusting period has passed since last client timestamp",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ // make current time pass trusting period from last timestamp on clientstate
+ currentTime = suite.now.Add(trustingPeriod)
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update: header timestamp is past current timestamp",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful update: header timestamp is not past last client timestamp",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.clientTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "header basic validation failed",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ // cause new header to fail validatebasic by changing commit height to mismatch header height
+ newHeader.SignedHeader.Commit.Height = revisionHeight - 1
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ {
+ name: "header height < consensus height",
+ setup: func() {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(height.RevisionNumber, heightPlus5.RevisionHeight), commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ // Make new header at height less than latest client state
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightMinus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ },
+ expPass: false,
+ },
+ }
+
+ for i, tc := range testCases {
+ tc := tc
+
+ consStateHeight = height // must be explicitly changed
+ // setup test
+ tc.setup()
+
+ // Set current timestamp in context
+ ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
+
+ // Set trusted consensus state in client store
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState)
+
+ height := newHeader.GetHeight()
+ expectedConsensus := &types.ConsensusState{
+ Timestamp: newHeader.GetTime(),
+ Root: commitmenttypes.NewMerkleRoot(newHeader.Header.GetAppHash()),
+ NextValidatorsHash: newHeader.Header.NextValidatorsHash,
+ }
+
+ newClientState, consensusState, err := clientState.CheckHeaderAndUpdateState(
+ ctx,
+ suite.cdc,
+ suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore
+ newHeader,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+
+ // Determine if clientState should be updated or not
+ // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height
+ if height.GT(clientState.LatestHeight) {
+ // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
+ suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update")
+ } else {
+ // Update will add past consensus state, clientState should not be updated at all
+ suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header")
+ }
+
+ suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name)
+ } else {
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ suite.Require().Nil(newClientState, "invalid test case %d passed: %s", i, tc.name)
+ suite.Require().Nil(consensusState, "invalid test case %d passed: %s", i, tc.name)
+ }
+ }
+}
diff --git a/light-clients/07-tendermint/types/upgrade.go b/light-clients/07-tendermint/types/upgrade.go
new file mode 100644
index 00000000..397e9cfd
--- /dev/null
+++ b/light-clients/07-tendermint/types/upgrade.go
@@ -0,0 +1,156 @@
+package types
+
+import (
+ "fmt"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+)
+
+// VerifyUpgradeAndUpdateState checks if the upgraded client has been committed by the current client
+// It will zero out all client-specific fields (e.g. TrustingPeriod and verify all data
+// in client state that must be the same across all valid Tendermint clients for the new chain.
+// VerifyUpgrade will return an error if:
+// - the upgradedClient is not a Tendermint ClientState
+// - the lastest height of the client state does not have the same revision number or has a greater
+// height than the committed client.
+// - the height of upgraded client is not greater than that of current client
+// - the latest height of the new client does not match or is greater than the height in committed client
+// - any Tendermint chain specified parameter in upgraded client such as ChainID, UnbondingPeriod,
+// and ProofSpecs do not match parameters set by committed client
+func (cs ClientState) VerifyUpgradeAndUpdateState(
+ ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
+ upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState,
+ proofUpgradeClient, proofUpgradeConsState []byte,
+) (exported.ClientState, exported.ConsensusState, error) {
+ if len(cs.UpgradePath) == 0 {
+ return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade client, no upgrade path set")
+ }
+
+ // last height of current counterparty chain must be client's latest height
+ lastHeight := cs.GetLatestHeight()
+
+ if !upgradedClient.GetLatestHeight().GT(lastHeight) {
+ return nil, nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "upgraded client height %s must be at greater than current client height %s",
+ upgradedClient.GetLatestHeight(), lastHeight)
+ }
+
+ // counterparty chain must commit the upgraded client with all client-customizable fields zeroed out
+ // at the upgrade path specified by current client
+ // counterparty must also commit to the upgraded consensus state at a sub-path under the upgrade path specified
+ tmUpgradeClient, ok := upgradedClient.(*ClientState)
+ if !ok {
+ return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "upgraded client must be Tendermint client. expected: %T got: %T",
+ &ClientState{}, upgradedClient)
+ }
+ tmUpgradeConsState, ok := upgradedConsState.(*ConsensusState)
+ if !ok {
+ return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "upgraded consensus state must be Tendermint consensus state. expected %T, got: %T",
+ &ConsensusState{}, upgradedConsState)
+ }
+
+ // unmarshal proofs
+ var merkleProofClient, merkleProofConsState commitmenttypes.MerkleProof
+ if err := cdc.UnmarshalBinaryBare(proofUpgradeClient, &merkleProofClient); err != nil {
+ return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal client merkle proof: %v", err)
+ }
+ if err := cdc.UnmarshalBinaryBare(proofUpgradeConsState, &merkleProofConsState); err != nil {
+ return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal consensus state merkle proof: %v", err)
+ }
+
+ // Must prove against latest consensus state to ensure we are verifying against latest upgrade plan
+ // This verifies that upgrade is intended for the provided revision, since committed client must exist
+ // at this consensus state
+ consState, err := GetConsensusState(clientStore, cdc, lastHeight)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrap(err, "could not retrieve consensus state for lastHeight")
+ }
+
+ if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) {
+ return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidClient, "cannot upgrade an expired client")
+ }
+
+ // Verify client proof
+ bz, err := cdc.MarshalInterface(upgradedClient)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "could not marshal client state: %v", err)
+ }
+ // construct clientState Merkle path
+ upgradeClientPath := constructUpgradeClientMerklePath(cs.UpgradePath, lastHeight)
+ if err := merkleProofClient.VerifyMembership(cs.ProofSpecs, consState.GetRoot(), upgradeClientPath, bz); err != nil {
+ return nil, nil, sdkerrors.Wrapf(err, "client state proof failed. Path: %s", upgradeClientPath.Pretty())
+ }
+
+ // Verify consensus state proof
+ bz, err = cdc.MarshalInterface(upgradedConsState)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "could not marshal consensus state: %v", err)
+ }
+ // construct consensus state Merkle path
+ upgradeConsStatePath := constructUpgradeConsStateMerklePath(cs.UpgradePath, lastHeight)
+ if err := merkleProofConsState.VerifyMembership(cs.ProofSpecs, consState.GetRoot(), upgradeConsStatePath, bz); err != nil {
+ return nil, nil, sdkerrors.Wrapf(err, "consensus state proof failed. Path: %s", upgradeConsStatePath.Pretty())
+ }
+
+ // Construct new client state and consensus state
+ // Relayer chosen client parameters are ignored.
+ // All chain-chosen parameters come from committed client, all client-chosen parameters
+ // come from current client.
+ newClientState := NewClientState(
+ tmUpgradeClient.ChainId, cs.TrustLevel, cs.TrustingPeriod, tmUpgradeClient.UnbondingPeriod,
+ cs.MaxClockDrift, tmUpgradeClient.LatestHeight, tmUpgradeClient.ProofSpecs, tmUpgradeClient.UpgradePath,
+ cs.AllowUpdateAfterExpiry, cs.AllowUpdateAfterMisbehaviour,
+ )
+
+ if err := newClientState.Validate(); err != nil {
+ return nil, nil, sdkerrors.Wrap(err, "updated client state failed basic validation")
+ }
+
+ // The new consensus state is merely used as a trusted kernel against which headers on the new
+ // chain can be verified. The root is empty as it cannot be known in advance, thus no proof verification will pass.
+ // The timestamp and the NextValidatorsHash of the consensus state is the blocktime and NextValidatorsHash
+ // of the last block committed by the old chain. This will allow the first block of the new chain to be verified against
+ // the last validators of the old chain so long as it is submitted within the TrustingPeriod of this client.
+ // NOTE: We do not set processed time for this consensus state since this consensus state should not be used for packet verification
+ // as the root is empty. The next consensus state submitted using update will be usable for packet-verification.
+ newConsState := NewConsensusState(
+ tmUpgradeConsState.Timestamp, commitmenttypes.MerkleRoot{}, tmUpgradeConsState.NextValidatorsHash,
+ )
+
+ return newClientState, newConsState, nil
+}
+
+// construct MerklePath for the committed client from upgradePath
+func constructUpgradeClientMerklePath(upgradePath []string, lastHeight exported.Height) commitmenttypes.MerklePath {
+ // copy all elements from upgradePath except final element
+ clientPath := make([]string, len(upgradePath)-1)
+ copy(clientPath, upgradePath)
+
+ // append lastHeight and `upgradedClient` to last key of upgradePath and use as lastKey of clientPath
+ // this will create the IAVL key that is used to store client in upgrade store
+ lastKey := upgradePath[len(upgradePath)-1]
+ appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedClient)
+
+ clientPath = append(clientPath, appendedKey)
+ return commitmenttypes.NewMerklePath(clientPath...)
+}
+
+// construct MerklePath for the committed consensus state from upgradePath
+func constructUpgradeConsStateMerklePath(upgradePath []string, lastHeight exported.Height) commitmenttypes.MerklePath {
+ // copy all elements from upgradePath except final element
+ consPath := make([]string, len(upgradePath)-1)
+ copy(consPath, upgradePath)
+
+ // append lastHeight and `upgradedClient` to last key of upgradePath and use as lastKey of clientPath
+ // this will create the IAVL key that is used to store client in upgrade store
+ lastKey := upgradePath[len(upgradePath)-1]
+ appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedConsState)
+
+ consPath = append(consPath, appendedKey)
+ return commitmenttypes.NewMerklePath(consPath...)
+}
diff --git a/light-clients/07-tendermint/types/upgrade_test.go b/light-clients/07-tendermint/types/upgrade_test.go
new file mode 100644
index 00000000..7be3a494
--- /dev/null
+++ b/light-clients/07-tendermint/types/upgrade_test.go
@@ -0,0 +1,512 @@
+package types_test
+
+import (
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+)
+
+func (suite *TendermintTestSuite) TestVerifyUpgrade() {
+ var (
+ upgradedClient exported.ClientState
+ upgradedConsState exported.ConsensusState
+ lastHeight clienttypes.Height
+ clientA string
+ proofUpgradedClient, proofUpgradedConsState []byte
+ )
+
+ testCases := []struct {
+ name string
+ setup func()
+ expPass bool
+ }{
+ {
+ name: "successful upgrade",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: true,
+ },
+ {
+ name: "successful upgrade to same revision",
+ setup: func() {
+ upgradedHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+2))
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, upgradedHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: true,
+ },
+
+ {
+ name: "unsuccessful upgrade: upgrade height revision height is more than the current client revision height",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is 10 blocks from now
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+10))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: chain-specified parameters do not match committed client",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // change upgradedClient client-specified parameters
+ upgradedClient = types.NewClientState("wrongchainID", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, true)
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: client-specified parameters do not match previous client",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // change upgradedClient client-specified parameters
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, false)
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: relayer-submitted consensus state does not match counterparty-committed consensus state",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // change submitted upgradedConsensusState
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("maliciousValidators"),
+ }
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: client proof unmarshal failed",
+ setup: func() {
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+
+ proofUpgradedClient = []byte("proof")
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: consensus state proof unmarshal failed",
+ setup: func() {
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+
+ proofUpgradedConsState = []byte("proof")
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: client proof verification failed",
+ setup: func() {
+ // create but do not store upgraded client
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: consensus state proof verification failed",
+ setup: func() {
+ // create but do not store upgraded client
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: upgrade path is empty",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+
+ // SetClientState with empty upgrade path
+ tmClient, _ := cs.(*types.ClientState)
+ tmClient.UpgradePath = []string{""}
+ suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient)
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: upgraded height is not greater than current height",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: consensus state for upgrade height cannot be found",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+100))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: client is expired",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ // expire chainB's client
+ suite.chainA.ExpireClient(ubdPeriod)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: updated unbonding period is equal to trusting period",
+ setup: func() {
+
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: final client is not valid",
+ setup: func() {
+
+ // new client has smaller unbonding period such that old trusting period is no longer valid
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ // reset suite
+ suite.SetupTest()
+
+ clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ tc.setup()
+
+ cs := suite.chainA.GetClientState(clientA)
+ clientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+
+ // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
+ upgradedClient = upgradedClient.ZeroCustomFields()
+
+ clientState, consensusState, err := cs.VerifyUpgradeAndUpdateState(
+ suite.chainA.GetContext(),
+ suite.cdc,
+ clientStore,
+ upgradedClient,
+ upgradedConsState,
+ proofUpgradedClient,
+ proofUpgradedConsState,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name)
+ suite.Require().NotNil(clientState, "verify upgrade failed on valid case: %s", tc.name)
+ suite.Require().NotNil(consensusState, "verify upgrade failed on valid case: %s", tc.name)
+ } else {
+ suite.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name)
+ suite.Require().Nil(clientState, "verify upgrade passed on invalid case: %s", tc.name)
+
+ suite.Require().Nil(consensusState, "verify upgrade passed on invalid case: %s", tc.name)
+
+ }
+ }
+}
diff --git a/light-clients/09-localhost/doc.go b/light-clients/09-localhost/doc.go
new file mode 100644
index 00000000..40a0f060
--- /dev/null
+++ b/light-clients/09-localhost/doc.go
@@ -0,0 +1,5 @@
+/*
+Package localhost implements a concrete `ConsensusState`, `Header`,
+`Misbehaviour` and `Equivocation` types for the loop-back client.
+*/
+package localhost
diff --git a/light-clients/09-localhost/module.go b/light-clients/09-localhost/module.go
new file mode 100644
index 00000000..57b9c5bb
--- /dev/null
+++ b/light-clients/09-localhost/module.go
@@ -0,0 +1,10 @@
+package localhost
+
+import (
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+)
+
+// Name returns the IBC client name
+func Name() string {
+ return types.SubModuleName
+}
diff --git a/light-clients/09-localhost/types/client_state.go b/light-clients/09-localhost/types/client_state.go
new file mode 100644
index 00000000..5a4a41a1
--- /dev/null
+++ b/light-clients/09-localhost/types/client_state.go
@@ -0,0 +1,346 @@
+package types
+
+import (
+ "bytes"
+ "encoding/binary"
+ "reflect"
+ "strings"
+
+ ics23 "github.com/confio/ics23/go"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var _ exported.ClientState = (*ClientState)(nil)
+
+// NewClientState creates a new ClientState instance
+func NewClientState(chainID string, height clienttypes.Height) *ClientState {
+ return &ClientState{
+ ChainId: chainID,
+ Height: height,
+ }
+}
+
+// GetChainID returns an empty string
+func (cs ClientState) GetChainID() string {
+ return cs.ChainId
+}
+
+// ClientType is localhost.
+func (cs ClientState) ClientType() string {
+ return exported.Localhost
+}
+
+// GetLatestHeight returns the latest height stored.
+func (cs ClientState) GetLatestHeight() exported.Height {
+ return cs.Height
+}
+
+// IsFrozen returns false.
+func (cs ClientState) IsFrozen() bool {
+ return false
+}
+
+// GetFrozenHeight returns an uninitialized IBC Height.
+func (cs ClientState) GetFrozenHeight() exported.Height {
+ return clienttypes.ZeroHeight()
+}
+
+// Validate performs a basic validation of the client state fields.
+func (cs ClientState) Validate() error {
+ if strings.TrimSpace(cs.ChainId) == "" {
+ return sdkerrors.Wrap(sdkerrors.ErrInvalidChainID, "chain id cannot be blank")
+ }
+ if cs.Height.RevisionHeight == 0 {
+ return sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "local revision height cannot be zero")
+ }
+ return nil
+}
+
+// GetProofSpecs returns nil since localhost does not have to verify proofs
+func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec {
+ return nil
+}
+
+// ZeroCustomFields returns the same client state since there are no custom fields in localhost
+func (cs ClientState) ZeroCustomFields() exported.ClientState {
+ return &cs
+}
+
+// Initialize ensures that initial consensus state for localhost is nil
+func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, consState exported.ConsensusState) error {
+ if consState != nil {
+ return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "initial consensus state for localhost must be nil.")
+ }
+ return nil
+}
+
+// ExportMetadata is a no-op for localhost client
+func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata {
+ return nil
+}
+
+// CheckHeaderAndUpdateState updates the localhost client. It only needs access to the context
+func (cs *ClientState) CheckHeaderAndUpdateState(
+ ctx sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, _ exported.Header,
+) (exported.ClientState, exported.ConsensusState, error) {
+ // use the chain ID from context since the localhost client is from the running chain (i.e self).
+ cs.ChainId = ctx.ChainID()
+ revision := clienttypes.ParseChainID(cs.ChainId)
+ cs.Height = clienttypes.NewHeight(revision, uint64(ctx.BlockHeight()))
+ return cs, nil, nil
+}
+
+// CheckMisbehaviourAndUpdateState implements ClientState
+// Since localhost is the client of the running chain, misbehaviour cannot be submitted to it
+// Thus, CheckMisbehaviourAndUpdateState returns an error for localhost
+func (cs ClientState) CheckMisbehaviourAndUpdateState(
+ _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, _ exported.Misbehaviour,
+) (exported.ClientState, error) {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "cannot submit misbehaviour to localhost client")
+}
+
+// CheckSubstituteAndUpdateState returns an error. The localhost cannot be modified by
+// proposals.
+func (cs ClientState) CheckSubstituteAndUpdateState(
+ ctx sdk.Context, _ codec.BinaryMarshaler, _, _ sdk.KVStore,
+ _ exported.ClientState, _ exported.Height,
+) (exported.ClientState, error) {
+ return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "cannot update localhost client with a proposal")
+}
+
+// VerifyUpgradeAndUpdateState returns an error since localhost cannot be upgraded
+func (cs ClientState) VerifyUpgradeAndUpdateState(
+ _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore,
+ _ exported.ClientState, _ exported.ConsensusState, _, _ []byte,
+) (exported.ClientState, exported.ConsensusState, error) {
+ return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade localhost client")
+}
+
+// VerifyClientState verifies that the localhost client state is stored locally
+func (cs ClientState) VerifyClientState(
+ store sdk.KVStore, cdc codec.BinaryMarshaler,
+ _ exported.Height, _ exported.Prefix, _ string, _ []byte, clientState exported.ClientState,
+) error {
+ path := host.KeyClientState
+ bz := store.Get([]byte(path))
+ if bz == nil {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedClientStateVerification,
+ "not found for path: %s", path)
+ }
+
+ selfClient := clienttypes.MustUnmarshalClientState(cdc, bz)
+
+ if !reflect.DeepEqual(selfClient, clientState) {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedClientStateVerification,
+ "stored clientState != provided clientState: \n%v\n≠\n%v",
+ selfClient, clientState,
+ )
+ }
+ return nil
+}
+
+// VerifyClientConsensusState returns nil since a local host client does not store consensus
+// states.
+func (cs ClientState) VerifyClientConsensusState(
+ sdk.KVStore, codec.BinaryMarshaler,
+ exported.Height, string, exported.Height, exported.Prefix,
+ []byte, exported.ConsensusState,
+) error {
+ return nil
+}
+
+// VerifyConnectionState verifies a proof of the connection state of the
+// specified connection end stored locally.
+func (cs ClientState) VerifyConnectionState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ _ exported.Height,
+ _ exported.Prefix,
+ _ []byte,
+ connectionID string,
+ connectionEnd exported.ConnectionI,
+) error {
+ path := host.ConnectionKey(connectionID)
+ bz := store.Get(path)
+ if bz == nil {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedConnectionStateVerification, "not found for path %s", path)
+ }
+
+ var prevConnection connectiontypes.ConnectionEnd
+ err := cdc.UnmarshalBinaryBare(bz, &prevConnection)
+ if err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(&prevConnection, connectionEnd) {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrFailedConnectionStateVerification,
+ "connection end ≠ previous stored connection: \n%v\n≠\n%v", connectionEnd, prevConnection,
+ )
+ }
+
+ return nil
+}
+
+// VerifyChannelState verifies a proof of the channel state of the specified
+// channel end, under the specified port, stored on the local machine.
+func (cs ClientState) VerifyChannelState(
+ store sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+ _ exported.Height,
+ prefix exported.Prefix,
+ _ []byte,
+ portID,
+ channelID string,
+ channel exported.ChannelI,
+) error {
+ path := host.ChannelKey(portID, channelID)
+ bz := store.Get(path)
+ if bz == nil {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedChannelStateVerification, "not found for path %s", path)
+ }
+
+ var prevChannel channeltypes.Channel
+ err := cdc.UnmarshalBinaryBare(bz, &prevChannel)
+ if err != nil {
+ return err
+ }
+
+ if !reflect.DeepEqual(&prevChannel, channel) {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrFailedChannelStateVerification,
+ "channel end ≠ previous stored channel: \n%v\n≠\n%v", channel, prevChannel,
+ )
+ }
+
+ return nil
+}
+
+// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
+// the specified port, specified channel, and specified sequence.
+func (cs ClientState) VerifyPacketCommitment(
+ store sdk.KVStore,
+ _ codec.BinaryMarshaler,
+ _ exported.Height,
+ _ uint64,
+ _ uint64,
+ _ exported.Prefix,
+ _ []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ commitmentBytes []byte,
+) error {
+ path := host.PacketCommitmentKey(portID, channelID, sequence)
+
+ data := store.Get(path)
+ if len(data) == 0 {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedPacketCommitmentVerification, "not found for path %s", path)
+ }
+
+ if !bytes.Equal(data, commitmentBytes) {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrFailedPacketCommitmentVerification,
+ "commitment ≠ previous commitment: \n%X\n≠\n%X", commitmentBytes, data,
+ )
+ }
+
+ return nil
+}
+
+// VerifyPacketAcknowledgement verifies a proof of an incoming packet
+// acknowledgement at the specified port, specified channel, and specified sequence.
+func (cs ClientState) VerifyPacketAcknowledgement(
+ store sdk.KVStore,
+ _ codec.BinaryMarshaler,
+ _ exported.Height,
+ _ uint64,
+ _ uint64,
+ _ exported.Prefix,
+ _ []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+ acknowledgement []byte,
+) error {
+ path := host.PacketAcknowledgementKey(portID, channelID, sequence)
+
+ data := store.Get(path)
+ if len(data) == 0 {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedPacketAckVerification, "not found for path %s", path)
+ }
+
+ if !bytes.Equal(data, acknowledgement) {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrFailedPacketAckVerification,
+ "ak bytes ≠ previous ack: \n%X\n≠\n%X", acknowledgement, data,
+ )
+ }
+
+ return nil
+}
+
+// VerifyPacketReceiptAbsence verifies a proof of the absence of an
+// incoming packet receipt at the specified port, specified channel, and
+// specified sequence.
+func (cs ClientState) VerifyPacketReceiptAbsence(
+ store sdk.KVStore,
+ _ codec.BinaryMarshaler,
+ _ exported.Height,
+ _ uint64,
+ _ uint64,
+ _ exported.Prefix,
+ _ []byte,
+ portID,
+ channelID string,
+ sequence uint64,
+) error {
+ path := host.PacketReceiptKey(portID, channelID, sequence)
+
+ data := store.Get(path)
+ if data != nil {
+ return sdkerrors.Wrap(clienttypes.ErrFailedPacketReceiptVerification, "expected no packet receipt")
+ }
+
+ return nil
+}
+
+// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
+// received of the specified channel at the specified port.
+func (cs ClientState) VerifyNextSequenceRecv(
+ store sdk.KVStore,
+ _ codec.BinaryMarshaler,
+ _ exported.Height,
+ _ uint64,
+ _ uint64,
+ _ exported.Prefix,
+ _ []byte,
+ portID,
+ channelID string,
+ nextSequenceRecv uint64,
+) error {
+ path := host.NextSequenceRecvKey(portID, channelID)
+
+ data := store.Get(path)
+ if len(data) == 0 {
+ return sdkerrors.Wrapf(clienttypes.ErrFailedNextSeqRecvVerification, "not found for path %s", path)
+ }
+
+ prevSequenceRecv := binary.BigEndian.Uint64(data)
+ if prevSequenceRecv != nextSequenceRecv {
+ return sdkerrors.Wrapf(
+ clienttypes.ErrFailedNextSeqRecvVerification,
+ "next sequence receive ≠ previous stored sequence (%d ≠ %d)", nextSequenceRecv, prevSequenceRecv,
+ )
+ }
+
+ return nil
+}
diff --git a/light-clients/09-localhost/types/client_state_test.go b/light-clients/09-localhost/types/client_state_test.go
new file mode 100644
index 00000000..bc58f625
--- /dev/null
+++ b/light-clients/09-localhost/types/client_state_test.go
@@ -0,0 +1,520 @@
+package types_test
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+)
+
+const (
+ testConnectionID = "connectionid"
+ testPortID = "testportid"
+ testChannelID = "testchannelid"
+ testSequence = 1
+)
+
+func (suite *LocalhostTestSuite) TestValidate() {
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ expPass bool
+ }{
+ {
+ name: "valid client",
+ clientState: types.NewClientState("chainID", clienttypes.NewHeight(3, 10)),
+ expPass: true,
+ },
+ {
+ name: "invalid chain id",
+ clientState: types.NewClientState(" ", clienttypes.NewHeight(3, 10)),
+ expPass: false,
+ },
+ {
+ name: "invalid height",
+ clientState: types.NewClientState("chainID", clienttypes.ZeroHeight()),
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ err := tc.clientState.Validate()
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+func (suite *LocalhostTestSuite) TestInitialize() {
+ testCases := []struct {
+ name string
+ consState exported.ConsensusState
+ expPass bool
+ }{
+ {
+ "valid initialization",
+ nil,
+ true,
+ },
+ {
+ "invalid consenus state",
+ &ibctmtypes.ConsensusState{},
+ false,
+ },
+ }
+
+ clientState := types.NewClientState("chainID", clienttypes.NewHeight(3, 10))
+
+ for _, tc := range testCases {
+ err := clientState.Initialize(suite.ctx, suite.cdc, suite.store, tc.consState)
+
+ if tc.expPass {
+ suite.Require().NoError(err, "valid testcase: %s failed", tc.name)
+ } else {
+ suite.Require().Error(err, "invalid testcase: %s passed", tc.name)
+ }
+ }
+}
+
+func (suite *LocalhostTestSuite) TestVerifyClientState() {
+ clientState := types.NewClientState("chainID", clientHeight)
+ invalidClient := types.NewClientState("chainID", clienttypes.NewHeight(0, 12))
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ malleate func()
+ counterparty *types.ClientState
+ expPass bool
+ }{
+ {
+ name: "proof verification success",
+ clientState: clientState,
+ malleate: func() {
+ bz := clienttypes.MustMarshalClientState(suite.cdc, clientState)
+ suite.store.Set(host.ClientStateKey(), bz)
+ },
+ counterparty: clientState,
+ expPass: true,
+ },
+ {
+ name: "proof verification failed: invalid client",
+ clientState: clientState,
+ malleate: func() {
+ bz := clienttypes.MustMarshalClientState(suite.cdc, clientState)
+ suite.store.Set(host.ClientStateKey(), bz)
+ },
+ counterparty: invalidClient,
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: client not stored",
+ clientState: clientState,
+ malleate: func() {},
+ counterparty: clientState,
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+ tc.malleate()
+
+ err := tc.clientState.VerifyClientState(
+ suite.store, suite.cdc, clienttypes.NewHeight(0, 10), nil, "", []byte{}, tc.counterparty,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+
+}
+
+func (suite *LocalhostTestSuite) TestVerifyClientConsensusState() {
+ clientState := types.NewClientState("chainID", clientHeight)
+ err := clientState.VerifyClientConsensusState(
+ nil, nil, nil, "", nil, nil, nil, nil,
+ )
+ suite.Require().NoError(err)
+}
+
+func (suite *LocalhostTestSuite) TestCheckHeaderAndUpdateState() {
+ clientState := types.NewClientState("chainID", clientHeight)
+ cs, _, err := clientState.CheckHeaderAndUpdateState(suite.ctx, nil, nil, nil)
+ suite.Require().NoError(err)
+ suite.Require().Equal(uint64(0), cs.GetLatestHeight().GetRevisionNumber())
+ suite.Require().Equal(suite.ctx.BlockHeight(), int64(cs.GetLatestHeight().GetRevisionHeight()))
+ suite.Require().Equal(suite.ctx.BlockHeader().ChainID, clientState.ChainId)
+}
+
+func (suite *LocalhostTestSuite) TestMisbehaviourAndUpdateState() {
+ clientState := types.NewClientState("chainID", clientHeight)
+ cs, err := clientState.CheckMisbehaviourAndUpdateState(suite.ctx, nil, nil, nil)
+ suite.Require().Error(err)
+ suite.Require().Nil(cs)
+}
+
+func (suite *LocalhostTestSuite) TestProposedHeaderAndUpdateState() {
+ clientState := types.NewClientState("chainID", clientHeight)
+ cs, err := clientState.CheckSubstituteAndUpdateState(suite.ctx, nil, nil, nil, nil, nil)
+ suite.Require().Error(err)
+ suite.Require().Nil(cs)
+}
+
+func (suite *LocalhostTestSuite) TestVerifyConnectionState() {
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, commitmenttypes.NewMerklePrefix([]byte("ibc")))
+ conn1 := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, []*connectiontypes.Version{connectiontypes.NewVersion("1", nil)}, 0)
+ conn2 := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, []*connectiontypes.Version{connectiontypes.NewVersion("2", nil)}, 0)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ malleate func()
+ connection connectiontypes.ConnectionEnd
+ expPass bool
+ }{
+ {
+ name: "proof verification success",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ bz, err := suite.cdc.MarshalBinaryBare(&conn1)
+ suite.Require().NoError(err)
+ suite.store.Set(host.ConnectionKey(testConnectionID), bz)
+ },
+ connection: conn1,
+ expPass: true,
+ },
+ {
+ name: "proof verification failed: connection not stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {},
+ connection: conn1,
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: unmarshal error",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(host.ConnectionKey(testConnectionID), []byte("connection"))
+ },
+ connection: conn1,
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: different connection stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ bz, err := suite.cdc.MarshalBinaryBare(&conn2)
+ suite.Require().NoError(err)
+ suite.store.Set(host.ConnectionKey(testConnectionID), bz)
+ },
+ connection: conn1,
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+ tc.malleate()
+
+ err := tc.clientState.VerifyConnectionState(
+ suite.store, suite.cdc, clientHeight, nil, []byte{}, testConnectionID, &tc.connection,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *LocalhostTestSuite) TestVerifyChannelState() {
+ counterparty := channeltypes.NewCounterparty(testPortID, testChannelID)
+ ch1 := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0")
+ ch2 := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "2.0.0")
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ malleate func()
+ channel channeltypes.Channel
+ expPass bool
+ }{
+ {
+ name: "proof verification success",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ bz, err := suite.cdc.MarshalBinaryBare(&ch1)
+ suite.Require().NoError(err)
+ suite.store.Set(host.ChannelKey(testPortID, testChannelID), bz)
+ },
+ channel: ch1,
+ expPass: true,
+ },
+ {
+ name: "proof verification failed: channel not stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {},
+ channel: ch1,
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: unmarshal failed",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(host.ChannelKey(testPortID, testChannelID), []byte("channel"))
+
+ },
+ channel: ch1,
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: different channel stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ bz, err := suite.cdc.MarshalBinaryBare(&ch2)
+ suite.Require().NoError(err)
+ suite.store.Set(host.ChannelKey(testPortID, testChannelID), bz)
+
+ },
+ channel: ch1,
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+ tc.malleate()
+
+ err := tc.clientState.VerifyChannelState(
+ suite.store, suite.cdc, clientHeight, nil, []byte{}, testPortID, testChannelID, &tc.channel,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *LocalhostTestSuite) TestVerifyPacketCommitment() {
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ malleate func()
+ commitment []byte
+ expPass bool
+ }{
+ {
+ name: "proof verification success",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(
+ host.PacketCommitmentKey(testPortID, testChannelID, testSequence), []byte("commitment"),
+ )
+ },
+ commitment: []byte("commitment"),
+ expPass: true,
+ },
+ {
+ name: "proof verification failed: different commitment stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(
+ host.PacketCommitmentKey(testPortID, testChannelID, testSequence), []byte("different"),
+ )
+ },
+ commitment: []byte("commitment"),
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: no commitment stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {},
+ commitment: []byte{},
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+ tc.malleate()
+
+ err := tc.clientState.VerifyPacketCommitment(
+ suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.commitment,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *LocalhostTestSuite) TestVerifyPacketAcknowledgement() {
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ malleate func()
+ ack []byte
+ expPass bool
+ }{
+ {
+ name: "proof verification success",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(
+ host.PacketAcknowledgementKey(testPortID, testChannelID, testSequence), []byte("acknowledgement"),
+ )
+ },
+ ack: []byte("acknowledgement"),
+ expPass: true,
+ },
+ {
+ name: "proof verification failed: different ack stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(
+ host.PacketAcknowledgementKey(testPortID, testChannelID, testSequence), []byte("different"),
+ )
+ },
+ ack: []byte("acknowledgement"),
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: no commitment stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {},
+ ack: []byte{},
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+ tc.malleate()
+
+ err := tc.clientState.VerifyPacketAcknowledgement(
+ suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.ack,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
+func (suite *LocalhostTestSuite) TestVerifyPacketReceiptAbsence() {
+ clientState := types.NewClientState("chainID", clientHeight)
+
+ err := clientState.VerifyPacketReceiptAbsence(
+ suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence,
+ )
+
+ suite.Require().NoError(err, "receipt absence failed")
+
+ suite.store.Set(host.PacketReceiptKey(testPortID, testChannelID, testSequence), []byte("receipt"))
+
+ err = clientState.VerifyPacketReceiptAbsence(
+ suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence,
+ )
+ suite.Require().Error(err, "receipt exists in store")
+}
+
+func (suite *LocalhostTestSuite) TestVerifyNextSeqRecv() {
+ nextSeqRecv := uint64(5)
+
+ testCases := []struct {
+ name string
+ clientState *types.ClientState
+ malleate func()
+ nextSeqRecv uint64
+ expPass bool
+ }{
+ {
+ name: "proof verification success",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(
+ host.NextSequenceRecvKey(testPortID, testChannelID),
+ sdk.Uint64ToBigEndian(nextSeqRecv),
+ )
+ },
+ nextSeqRecv: nextSeqRecv,
+ expPass: true,
+ },
+ {
+ name: "proof verification failed: different nextSeqRecv stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {
+ suite.store.Set(
+ host.NextSequenceRecvKey(testPortID, testChannelID),
+ sdk.Uint64ToBigEndian(3),
+ )
+ },
+ nextSeqRecv: nextSeqRecv,
+ expPass: false,
+ },
+ {
+ name: "proof verification failed: no nextSeqRecv stored",
+ clientState: types.NewClientState("chainID", clientHeight),
+ malleate: func() {},
+ nextSeqRecv: nextSeqRecv,
+ expPass: false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+ tc.malleate()
+
+ err := tc.clientState.VerifyNextSequenceRecv(
+ suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, nextSeqRecv,
+ )
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
diff --git a/light-clients/09-localhost/types/codec.go b/light-clients/09-localhost/types/codec.go
new file mode 100644
index 00000000..b338dfb6
--- /dev/null
+++ b/light-clients/09-localhost/types/codec.go
@@ -0,0 +1,15 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf
+// Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterImplementations(
+ (*exported.ClientState)(nil),
+ &ClientState{},
+ )
+}
diff --git a/light-clients/09-localhost/types/errors.go b/light-clients/09-localhost/types/errors.go
new file mode 100644
index 00000000..57ad7c1f
--- /dev/null
+++ b/light-clients/09-localhost/types/errors.go
@@ -0,0 +1,10 @@
+package types
+
+import (
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// Localhost sentinel errors
+var (
+ ErrConsensusStatesNotStored = sdkerrors.Register(SubModuleName, 2, "localhost does not store consensus states")
+)
diff --git a/light-clients/09-localhost/types/keys.go b/light-clients/09-localhost/types/keys.go
new file mode 100644
index 00000000..2fe7c7e4
--- /dev/null
+++ b/light-clients/09-localhost/types/keys.go
@@ -0,0 +1,6 @@
+package types
+
+const (
+ // SubModuleName for the localhost (loopback) client
+ SubModuleName = "localhost"
+)
diff --git a/light-clients/09-localhost/types/localhost.pb.go b/light-clients/09-localhost/types/localhost.pb.go
new file mode 100644
index 00000000..bf2ec3a5
--- /dev/null
+++ b/light-clients/09-localhost/types/localhost.pb.go
@@ -0,0 +1,369 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibcgo/lightclients/localhost/v1/localhost.proto
+
+package types
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/ibc-go/core/02-client/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// ClientState defines a loopback (localhost) client. It requires (read-only)
+// access to keys outside the client prefix.
+type ClientState struct {
+ // self chain ID
+ ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty" yaml:"chain_id"`
+ // self latest block height
+ Height types.Height `protobuf:"bytes,2,opt,name=height,proto3" json:"height"`
+}
+
+func (m *ClientState) Reset() { *m = ClientState{} }
+func (m *ClientState) String() string { return proto.CompactTextString(m) }
+func (*ClientState) ProtoMessage() {}
+func (*ClientState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1a6dbd867337bf2e, []int{0}
+}
+func (m *ClientState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientState.Merge(m, src)
+}
+func (m *ClientState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientState proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.localhost.v1.ClientState")
+}
+
+func init() {
+ proto.RegisterFile("ibcgo/lightclients/localhost/v1/localhost.proto", fileDescriptor_1a6dbd867337bf2e)
+}
+
+var fileDescriptor_1a6dbd867337bf2e = []byte{
+ // 275 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcf, 0x4c, 0x4a, 0x4e,
+ 0xcf, 0xd7, 0xcf, 0xc9, 0x4c, 0xcf, 0x28, 0x49, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0x29, 0xd6, 0xcf,
+ 0xc9, 0x4f, 0x4e, 0xcc, 0xc9, 0xc8, 0x2f, 0x2e, 0xd1, 0x2f, 0x33, 0x44, 0x70, 0xf4, 0x0a, 0x8a,
+ 0xf2, 0x4b, 0xf2, 0x85, 0xe4, 0xc1, 0x1a, 0xf4, 0x90, 0x35, 0xe8, 0x21, 0xd4, 0x94, 0x19, 0x4a,
+ 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0xd5, 0xea, 0x83, 0x58, 0x10, 0x6d, 0x52, 0x8a, 0x10, 0x7b,
+ 0x92, 0xf3, 0x8b, 0x52, 0xf5, 0x21, 0xda, 0x40, 0x86, 0x43, 0x58, 0x10, 0x25, 0x4a, 0xf5, 0x5c,
+ 0xdc, 0xce, 0x60, 0x7e, 0x70, 0x49, 0x62, 0x49, 0xaa, 0x90, 0x1e, 0x17, 0x47, 0x72, 0x46, 0x62,
+ 0x66, 0x5e, 0x7c, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf0, 0xa7, 0x7b, 0xf2,
+ 0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x19, 0xa5, 0x20, 0x76, 0x30, 0xd3, 0x33, 0x45,
+ 0xc8, 0x8a, 0x8b, 0x2d, 0x23, 0x15, 0xe4, 0x2a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x19,
+ 0x3d, 0x88, 0x4b, 0x41, 0x56, 0xea, 0x41, 0x2d, 0x2a, 0x33, 0xd4, 0xf3, 0x00, 0xab, 0x71, 0x62,
+ 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xc3, 0x8a, 0xa5, 0x63, 0x81, 0x3c, 0x83, 0x53, 0xf0,
+ 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c,
+ 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x59, 0xa6, 0x67, 0x96, 0x64, 0x94,
+ 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0x83, 0xc2, 0x4d, 0x17,
+ 0x16, 0x70, 0xba, 0xb0, 0x90, 0x33, 0xb0, 0xd4, 0x45, 0x04, 0x5e, 0x49, 0x65, 0x41, 0x6a, 0x71,
+ 0x12, 0x1b, 0xd8, 0x73, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, 0xa2, 0xe9, 0xaa, 0x69,
+ 0x01, 0x00, 0x00,
+}
+
+func (m *ClientState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Height.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLocalhost(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.ChainId) > 0 {
+ i -= len(m.ChainId)
+ copy(dAtA[i:], m.ChainId)
+ i = encodeVarintLocalhost(dAtA, i, uint64(len(m.ChainId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintLocalhost(dAtA []byte, offset int, v uint64) int {
+ offset -= sovLocalhost(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ClientState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ChainId)
+ if l > 0 {
+ n += 1 + l + sovLocalhost(uint64(l))
+ }
+ l = m.Height.Size()
+ n += 1 + l + sovLocalhost(uint64(l))
+ return n
+}
+
+func sovLocalhost(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozLocalhost(x uint64) (n int) {
+ return sovLocalhost(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ClientState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLocalhost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLocalhost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthLocalhost
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthLocalhost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ChainId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLocalhost
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLocalhost
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLocalhost
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLocalhost(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthLocalhost
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipLocalhost(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLocalhost
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLocalhost
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLocalhost
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthLocalhost
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupLocalhost
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthLocalhost
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthLocalhost = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowLocalhost = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupLocalhost = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/light-clients/09-localhost/types/localhost_test.go b/light-clients/09-localhost/types/localhost_test.go
new file mode 100644
index 00000000..8ebaef84
--- /dev/null
+++ b/light-clients/09-localhost/types/localhost_test.go
@@ -0,0 +1,43 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/simapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+const (
+ height = 4
+)
+
+var (
+ clientHeight = clienttypes.NewHeight(0, 10)
+)
+
+type LocalhostTestSuite struct {
+ suite.Suite
+
+ cdc codec.Marshaler
+ ctx sdk.Context
+ store sdk.KVStore
+}
+
+func (suite *LocalhostTestSuite) SetupTest() {
+ isCheckTx := false
+ app := simapp.Setup(isCheckTx)
+
+ suite.cdc = app.AppCodec()
+ suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{Height: 1, ChainID: "ibc-chain"})
+ suite.store = app.IBCKeeper.ClientKeeper.ClientStore(suite.ctx, exported.Localhost)
+}
+
+func TestLocalhostTestSuite(t *testing.T) {
+ suite.Run(t, new(LocalhostTestSuite))
+}
diff --git a/proto/ibcgo/apps/transfer/v1/genesis.proto b/proto/ibcgo/apps/transfer/v1/genesis.proto
new file mode 100644
index 00000000..50a68179
--- /dev/null
+++ b/proto/ibcgo/apps/transfer/v1/genesis.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+package ibcgo.apps.transfer.v1;
+
+option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+
+import "ibcgo/apps/transfer/v1/transfer.proto";
+import "gogoproto/gogo.proto";
+
+// GenesisState defines the ibc-transfer genesis state
+message GenesisState {
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ repeated DenomTrace denom_traces = 2 [
+ (gogoproto.castrepeated) = "Traces",
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"denom_traces\""
+ ];
+ Params params = 3 [ (gogoproto.nullable) = false ];
+}
diff --git a/proto/ibcgo/apps/transfer/v1/query.proto b/proto/ibcgo/apps/transfer/v1/query.proto
new file mode 100644
index 00000000..f7dcb5f8
--- /dev/null
+++ b/proto/ibcgo/apps/transfer/v1/query.proto
@@ -0,0 +1,68 @@
+syntax = "proto3";
+
+package ibcgo.apps.transfer.v1;
+
+import "gogoproto/gogo.proto";
+import "cosmos/base/query/v1beta1/pagination.proto";
+import "ibcgo/apps/transfer/v1/transfer.proto";
+import "google/api/annotations.proto";
+
+option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+
+// Query provides defines the gRPC querier service.
+service Query {
+ // DenomTrace queries a denomination trace information.
+ rpc DenomTrace(QueryDenomTraceRequest) returns (QueryDenomTraceResponse) {
+ option (google.api.http).get = "/ibc/apps/transfer/v1/denom_traces/{hash}";
+ }
+
+ // DenomTraces queries all denomination traces.
+ rpc DenomTraces(QueryDenomTracesRequest) returns (QueryDenomTracesResponse) {
+ option (google.api.http).get = "/ibc/apps/transfer/v1/denom_traces";
+ }
+
+ // Params queries all parameters of the ibc-transfer module.
+ rpc Params(QueryParamsRequest) returns (QueryParamsResponse) {
+ option (google.api.http).get = "/ibc/apps/transfer/v1/params";
+ }
+}
+
+// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
+// method
+message QueryDenomTraceRequest {
+ // hash (in hex format) of the denomination trace information.
+ string hash = 1;
+}
+
+// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
+// method.
+message QueryDenomTraceResponse {
+ // denom_trace returns the requested denomination trace information.
+ DenomTrace denom_trace = 1;
+}
+
+// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
+// method
+message QueryDenomTracesRequest {
+ // pagination defines an optional pagination for the request.
+ cosmos.base.query.v1beta1.PageRequest pagination = 1;
+}
+
+// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
+// method.
+message QueryDenomTracesResponse {
+ // denom_traces returns all denominations trace information.
+ repeated DenomTrace denom_traces = 1
+ [ (gogoproto.castrepeated) = "Traces", (gogoproto.nullable) = false ];
+ // pagination defines the pagination in the response.
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+}
+
+// QueryParamsRequest is the request type for the Query/Params RPC method.
+message QueryParamsRequest {}
+
+// QueryParamsResponse is the response type for the Query/Params RPC method.
+message QueryParamsResponse {
+ // params defines the parameters of the module.
+ Params params = 1;
+}
diff --git a/proto/ibcgo/apps/transfer/v1/transfer.proto b/proto/ibcgo/apps/transfer/v1/transfer.proto
new file mode 100644
index 00000000..78c9ed91
--- /dev/null
+++ b/proto/ibcgo/apps/transfer/v1/transfer.proto
@@ -0,0 +1,45 @@
+syntax = "proto3";
+
+package ibcgo.apps.transfer.v1;
+
+option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+
+import "gogoproto/gogo.proto";
+
+// FungibleTokenPacketData defines a struct for the packet payload
+// See FungibleTokenPacketData spec:
+// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+message FungibleTokenPacketData {
+ // the token denomination to be transferred
+ string denom = 1;
+ // the token amount to be transferred
+ uint64 amount = 2;
+ // the sender address
+ string sender = 3;
+ // the recipient address on the destination chain
+ string receiver = 4;
+}
+
+// DenomTrace contains the base denomination for ICS20 fungible tokens and the
+// source tracing information path.
+message DenomTrace {
+ // path defines the chain of port/channel identifiers used for tracing the
+ // source of the fungible token.
+ string path = 1;
+ // base denomination of the relayed fungible token.
+ string base_denom = 2;
+}
+
+// Params defines the set of IBC transfer parameters.
+// NOTE: To prevent a single token from being transferred, set the
+// TransfersEnabled parameter to true and then set the bank module's SendEnabled
+// parameter for the denomination to false.
+message Params {
+ // send_enabled enables or disables all cross-chain token transfers from this
+ // chain.
+ bool send_enabled = 1 [ (gogoproto.moretags) = "yaml:\"send_enabled\"" ];
+ // receive_enabled enables or disables all cross-chain token transfers to this
+ // chain.
+ bool receive_enabled = 2
+ [ (gogoproto.moretags) = "yaml:\"receive_enabled\"" ];
+}
diff --git a/proto/ibcgo/apps/transfer/v1/tx.proto b/proto/ibcgo/apps/transfer/v1/tx.proto
new file mode 100644
index 00000000..a6b6a5d6
--- /dev/null
+++ b/proto/ibcgo/apps/transfer/v1/tx.proto
@@ -0,0 +1,48 @@
+syntax = "proto3";
+
+package ibcgo.apps.transfer.v1;
+
+option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+
+import "gogoproto/gogo.proto";
+import "cosmos/base/v1beta1/coin.proto";
+import "ibcgo/core/client/v1/client.proto";
+
+// Msg defines the ibc/transfer Msg service.
+service Msg {
+ // Transfer defines a rpc handler method for MsgTransfer.
+ rpc Transfer(MsgTransfer) returns (MsgTransferResponse);
+}
+
+// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
+// ICS20 enabled chains. See ICS Spec here:
+// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+message MsgTransfer {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ // the port on which the packet will be sent
+ string source_port = 1 [ (gogoproto.moretags) = "yaml:\"source_port\"" ];
+ // the channel by which the packet will be sent
+ string source_channel = 2
+ [ (gogoproto.moretags) = "yaml:\"source_channel\"" ];
+ // the tokens to be transferred
+ cosmos.base.v1beta1.Coin token = 3 [ (gogoproto.nullable) = false ];
+ // the sender address
+ string sender = 4;
+ // the recipient address on the destination chain
+ string receiver = 5;
+ // Timeout height relative to the current block height.
+ // The timeout is disabled when set to 0.
+ ibcgo.core.client.v1.Height timeout_height = 6 [
+ (gogoproto.moretags) = "yaml:\"timeout_height\"",
+ (gogoproto.nullable) = false
+ ];
+ // Timeout timestamp (in nanoseconds) relative to the current block timestamp.
+ // The timeout is disabled when set to 0.
+ uint64 timeout_timestamp = 7
+ [ (gogoproto.moretags) = "yaml:\"timeout_timestamp\"" ];
+}
+
+// MsgTransferResponse defines the Msg/Transfer response type.
+message MsgTransferResponse {}
diff --git a/proto/ibcgo/core/channel/v1/channel.proto b/proto/ibcgo/core/channel/v1/channel.proto
new file mode 100644
index 00000000..459e852d
--- /dev/null
+++ b/proto/ibcgo/core/channel/v1/channel.proto
@@ -0,0 +1,157 @@
+syntax = "proto3";
+
+package ibcgo.core.channel.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/client/v1/client.proto";
+
+// Channel defines pipeline for exactly-once packet delivery between specific
+// modules on separate blockchains, which has at least one end capable of
+// sending packets and one end capable of receiving packets.
+message Channel {
+ option (gogoproto.goproto_getters) = false;
+
+ // current state of the channel end
+ State state = 1;
+ // whether the channel is ordered or unordered
+ Order ordering = 2;
+ // counterparty channel end
+ Counterparty counterparty = 3 [ (gogoproto.nullable) = false ];
+ // list of connection identifiers, in order, along which packets sent on
+ // this channel will travel
+ repeated string connection_hops = 4
+ [ (gogoproto.moretags) = "yaml:\"connection_hops\"" ];
+ // opaque channel version, which is agreed upon during the handshake
+ string version = 5;
+}
+
+// IdentifiedChannel defines a channel with additional port and channel
+// identifier fields.
+message IdentifiedChannel {
+ option (gogoproto.goproto_getters) = false;
+
+ // current state of the channel end
+ State state = 1;
+ // whether the channel is ordered or unordered
+ Order ordering = 2;
+ // counterparty channel end
+ Counterparty counterparty = 3 [ (gogoproto.nullable) = false ];
+ // list of connection identifiers, in order, along which packets sent on
+ // this channel will travel
+ repeated string connection_hops = 4
+ [ (gogoproto.moretags) = "yaml:\"connection_hops\"" ];
+ // opaque channel version, which is agreed upon during the handshake
+ string version = 5;
+ // port identifier
+ string port_id = 6;
+ // channel identifier
+ string channel_id = 7;
+}
+
+// State defines if a channel is in one of the following states:
+// CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
+enum State {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // Default State
+ STATE_UNINITIALIZED_UNSPECIFIED = 0
+ [ (gogoproto.enumvalue_customname) = "UNINITIALIZED" ];
+ // A channel has just started the opening handshake.
+ STATE_INIT = 1 [ (gogoproto.enumvalue_customname) = "INIT" ];
+ // A channel has acknowledged the handshake step on the counterparty chain.
+ STATE_TRYOPEN = 2 [ (gogoproto.enumvalue_customname) = "TRYOPEN" ];
+ // A channel has completed the handshake. Open channels are
+ // ready to send and receive packets.
+ STATE_OPEN = 3 [ (gogoproto.enumvalue_customname) = "OPEN" ];
+ // A channel has been closed and can no longer be used to send or receive
+ // packets.
+ STATE_CLOSED = 4 [ (gogoproto.enumvalue_customname) = "CLOSED" ];
+}
+
+// Order defines if a channel is ORDERED or UNORDERED
+enum Order {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // zero-value for channel ordering
+ ORDER_NONE_UNSPECIFIED = 0 [ (gogoproto.enumvalue_customname) = "NONE" ];
+ // packets can be delivered in any order, which may differ from the order in
+ // which they were sent.
+ ORDER_UNORDERED = 1 [ (gogoproto.enumvalue_customname) = "UNORDERED" ];
+ // packets are delivered exactly in the order which they were sent
+ ORDER_ORDERED = 2 [ (gogoproto.enumvalue_customname) = "ORDERED" ];
+}
+
+// Counterparty defines a channel end counterparty
+message Counterparty {
+ option (gogoproto.goproto_getters) = false;
+
+ // port on the counterparty chain which owns the other end of the channel.
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ // channel end on the counterparty chain
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+}
+
+// Packet defines a type that carries data across different chains through IBC
+message Packet {
+ option (gogoproto.goproto_getters) = false;
+
+ // number corresponds to the order of sends and receives, where a Packet
+ // with an earlier sequence number must be sent and received before a Packet
+ // with a later sequence number.
+ uint64 sequence = 1;
+ // identifies the port on the sending chain.
+ string source_port = 2 [ (gogoproto.moretags) = "yaml:\"source_port\"" ];
+ // identifies the channel end on the sending chain.
+ string source_channel = 3
+ [ (gogoproto.moretags) = "yaml:\"source_channel\"" ];
+ // identifies the port on the receiving chain.
+ string destination_port = 4
+ [ (gogoproto.moretags) = "yaml:\"destination_port\"" ];
+ // identifies the channel end on the receiving chain.
+ string destination_channel = 5
+ [ (gogoproto.moretags) = "yaml:\"destination_channel\"" ];
+ // actual opaque bytes transferred directly to the application module
+ bytes data = 6;
+ // block height after which the packet times out
+ ibcgo.core.client.v1.Height timeout_height = 7 [
+ (gogoproto.moretags) = "yaml:\"timeout_height\"",
+ (gogoproto.nullable) = false
+ ];
+ // block timestamp (in nanoseconds) after which the packet times out
+ uint64 timeout_timestamp = 8
+ [ (gogoproto.moretags) = "yaml:\"timeout_timestamp\"" ];
+}
+
+// PacketState defines the generic type necessary to retrieve and store
+// packet commitments, acknowledgements, and receipts.
+// Caller is responsible for knowing the context necessary to interpret this
+// state as a commitment, acknowledgement, or a receipt.
+message PacketState {
+ option (gogoproto.goproto_getters) = false;
+
+ // channel port identifier.
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ // channel unique identifier.
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ // packet sequence.
+ uint64 sequence = 3;
+ // embedded data that represents packet state.
+ bytes data = 4;
+}
+
+// Acknowledgement is the recommended acknowledgement format to be used by
+// app-specific protocols.
+// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
+// conflicts with other protobuf message formats used for acknowledgements.
+// The first byte of any message with this format will be the non-ASCII values
+// `0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
+// https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
+message Acknowledgement {
+ // response contains either a result or an error and must be non-empty
+ oneof response {
+ bytes result = 21;
+ string error = 22;
+ }
+}
diff --git a/proto/ibcgo/core/channel/v1/genesis.proto b/proto/ibcgo/core/channel/v1/genesis.proto
new file mode 100644
index 00000000..12f67486
--- /dev/null
+++ b/proto/ibcgo/core/channel/v1/genesis.proto
@@ -0,0 +1,42 @@
+syntax = "proto3";
+
+package ibcgo.core.channel.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/channel/v1/channel.proto";
+
+// GenesisState defines the ibc channel submodule's genesis state.
+message GenesisState {
+ repeated IdentifiedChannel channels = 1 [
+ (gogoproto.casttype) = "IdentifiedChannel",
+ (gogoproto.nullable) = false
+ ];
+ repeated PacketState acknowledgements = 2 [ (gogoproto.nullable) = false ];
+ repeated PacketState commitments = 3 [ (gogoproto.nullable) = false ];
+ repeated PacketState receipts = 4 [ (gogoproto.nullable) = false ];
+ repeated PacketSequence send_sequences = 5 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"send_sequences\""
+ ];
+ repeated PacketSequence recv_sequences = 6 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"recv_sequences\""
+ ];
+ repeated PacketSequence ack_sequences = 7 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"ack_sequences\""
+ ];
+ // the sequence for the next generated channel identifier
+ uint64 next_channel_sequence = 8
+ [ (gogoproto.moretags) = "yaml:\"next_channel_sequence\"" ];
+}
+
+// PacketSequence defines the genesis type necessary to retrieve and store
+// next send and receive sequences.
+message PacketSequence {
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ uint64 sequence = 3;
+}
diff --git a/proto/ibcgo/core/channel/v1/query.proto b/proto/ibcgo/core/channel/v1/query.proto
new file mode 100644
index 00000000..a989b2ad
--- /dev/null
+++ b/proto/ibcgo/core/channel/v1/query.proto
@@ -0,0 +1,389 @@
+syntax = "proto3";
+
+package ibcgo.core.channel.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+
+import "ibcgo/core/client/v1/client.proto";
+import "cosmos/base/query/v1beta1/pagination.proto";
+import "ibcgo/core/channel/v1/channel.proto";
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+import "gogoproto/gogo.proto";
+
+// Query provides defines the gRPC querier service
+service Query {
+ // Channel queries an IBC Channel.
+ rpc Channel(QueryChannelRequest) returns (QueryChannelResponse) {
+ option (google.api.http).get =
+ "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}";
+ }
+
+ // Channels queries all the IBC channels of a chain.
+ rpc Channels(QueryChannelsRequest) returns (QueryChannelsResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels";
+ }
+
+ // ConnectionChannels queries all the channels associated with a connection
+ // end.
+ rpc ConnectionChannels(QueryConnectionChannelsRequest)
+ returns (QueryConnectionChannelsResponse) {
+ option (google.api.http).get =
+ "/ibc/core/channel/v1/connections/{connection}/channels";
+ }
+
+ // ChannelClientState queries for the client state for the channel associated
+ // with the provided channel identifiers.
+ rpc ChannelClientState(QueryChannelClientStateRequest)
+ returns (QueryChannelClientStateResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/client_state";
+ }
+
+ // ChannelConsensusState queries for the consensus state for the channel
+ // associated with the provided channel identifiers.
+ rpc ChannelConsensusState(QueryChannelConsensusStateRequest)
+ returns (QueryChannelConsensusStateResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/consensus_state/revision/"
+ "{revision_number}/height/{revision_height}";
+ }
+
+ // PacketCommitment queries a stored packet commitment hash.
+ rpc PacketCommitment(QueryPacketCommitmentRequest)
+ returns (QueryPacketCommitmentResponse) {
+ option (google.api.http).get =
+ "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/"
+ "packet_commitments/{sequence}";
+ }
+
+ // PacketCommitments returns all the packet commitments hashes associated
+ // with a channel.
+ rpc PacketCommitments(QueryPacketCommitmentsRequest)
+ returns (QueryPacketCommitmentsResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/packet_commitments";
+ }
+
+ // PacketReceipt queries if a given packet sequence has been received on the
+ // queried chain
+ rpc PacketReceipt(QueryPacketReceiptRequest)
+ returns (QueryPacketReceiptResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/packet_receipts/{sequence}";
+ }
+
+ // PacketAcknowledgement queries a stored packet acknowledgement hash.
+ rpc PacketAcknowledgement(QueryPacketAcknowledgementRequest)
+ returns (QueryPacketAcknowledgementResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/packet_acks/{sequence}";
+ }
+
+ // PacketAcknowledgements returns all the packet acknowledgements associated
+ // with a channel.
+ rpc PacketAcknowledgements(QueryPacketAcknowledgementsRequest)
+ returns (QueryPacketAcknowledgementsResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/packet_acknowledgements";
+ }
+
+ // UnreceivedPackets returns all the unreceived IBC packets associated with a
+ // channel and sequences.
+ rpc UnreceivedPackets(QueryUnreceivedPacketsRequest)
+ returns (QueryUnreceivedPacketsResponse) {
+ option (google.api.http).get =
+ "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/"
+ "packet_commitments/"
+ "{packet_commitment_sequences}/unreceived_packets";
+ }
+
+ // UnreceivedAcks returns all the unreceived IBC acknowledgements associated
+ // with a channel and sequences.
+ rpc UnreceivedAcks(QueryUnreceivedAcksRequest)
+ returns (QueryUnreceivedAcksResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/packet_commitments/"
+ "{packet_ack_sequences}/unreceived_acks";
+ }
+
+ // NextSequenceReceive returns the next receive sequence for a given channel.
+ rpc NextSequenceReceive(QueryNextSequenceReceiveRequest)
+ returns (QueryNextSequenceReceiveResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
+ "ports/{port_id}/next_sequence";
+ }
+}
+
+// QueryChannelRequest is the request type for the Query/Channel RPC method
+message QueryChannelRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+}
+
+// QueryChannelResponse is the response type for the Query/Channel RPC method.
+// Besides the Channel end, it includes a proof and the height from which the
+// proof was retrieved.
+message QueryChannelResponse {
+ // channel associated with the request identifiers
+ ibcgo.core.channel.v1.Channel channel = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryChannelsRequest is the request type for the Query/Channels RPC method
+message QueryChannelsRequest {
+ // pagination request
+ cosmos.base.query.v1beta1.PageRequest pagination = 1;
+}
+
+// QueryChannelsResponse is the response type for the Query/Channels RPC method.
+message QueryChannelsResponse {
+ // list of stored channels of the chain.
+ repeated ibcgo.core.channel.v1.IdentifiedChannel channels = 1;
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+ // query block height
+ ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryConnectionChannelsRequest is the request type for the
+// Query/QueryConnectionChannels RPC method
+message QueryConnectionChannelsRequest {
+ // connection unique identifier
+ string connection = 1;
+ // pagination request
+ cosmos.base.query.v1beta1.PageRequest pagination = 2;
+}
+
+// QueryConnectionChannelsResponse is the Response type for the
+// Query/QueryConnectionChannels RPC method
+message QueryConnectionChannelsResponse {
+ // list of channels associated with a connection.
+ repeated ibcgo.core.channel.v1.IdentifiedChannel channels = 1;
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+ // query block height
+ ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryChannelClientStateRequest is the request type for the Query/ClientState
+// RPC method
+message QueryChannelClientStateRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+}
+
+// QueryChannelClientStateResponse is the Response type for the
+// Query/QueryChannelClientState RPC method
+message QueryChannelClientStateResponse {
+ // client state associated with the channel
+ ibcgo.core.client.v1.IdentifiedClientState identified_client_state = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryChannelConsensusStateRequest is the request type for the
+// Query/ConsensusState RPC method
+message QueryChannelConsensusStateRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // revision number of the consensus state
+ uint64 revision_number = 3;
+ // revision height of the consensus state
+ uint64 revision_height = 4;
+}
+
+// QueryChannelClientStateResponse is the Response type for the
+// Query/QueryChannelClientState RPC method
+message QueryChannelConsensusStateResponse {
+ // consensus state associated with the channel
+ google.protobuf.Any consensus_state = 1;
+ // client ID associated with the consensus state
+ string client_id = 2;
+ // merkle proof of existence
+ bytes proof = 3;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+}
+
+// QueryPacketCommitmentRequest is the request type for the
+// Query/PacketCommitment RPC method
+message QueryPacketCommitmentRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // packet sequence
+ uint64 sequence = 3;
+}
+
+// QueryPacketCommitmentResponse defines the client query response for a packet
+// which also includes a proof and the height from which the proof was
+// retrieved
+message QueryPacketCommitmentResponse {
+ // packet associated with the request fields
+ bytes commitment = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryPacketCommitmentsRequest is the request type for the
+// Query/QueryPacketCommitments RPC method
+message QueryPacketCommitmentsRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // pagination request
+ cosmos.base.query.v1beta1.PageRequest pagination = 3;
+}
+
+// QueryPacketCommitmentsResponse is the request type for the
+// Query/QueryPacketCommitments RPC method
+message QueryPacketCommitmentsResponse {
+ repeated ibcgo.core.channel.v1.PacketState commitments = 1;
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+ // query block height
+ ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryPacketReceiptRequest is the request type for the
+// Query/PacketReceipt RPC method
+message QueryPacketReceiptRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // packet sequence
+ uint64 sequence = 3;
+}
+
+// QueryPacketReceiptResponse defines the client query response for a packet
+// receipt which also includes a proof, and the height from which the proof was
+// retrieved
+message QueryPacketReceiptResponse {
+ // success flag for if receipt exists
+ bool received = 2;
+ // merkle proof of existence
+ bytes proof = 3;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+}
+
+// QueryPacketAcknowledgementRequest is the request type for the
+// Query/PacketAcknowledgement RPC method
+message QueryPacketAcknowledgementRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // packet sequence
+ uint64 sequence = 3;
+}
+
+// QueryPacketAcknowledgementResponse defines the client query response for a
+// packet which also includes a proof and the height from which the
+// proof was retrieved
+message QueryPacketAcknowledgementResponse {
+ // packet associated with the request fields
+ bytes acknowledgement = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryPacketAcknowledgementsRequest is the request type for the
+// Query/QueryPacketCommitments RPC method
+message QueryPacketAcknowledgementsRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // pagination request
+ cosmos.base.query.v1beta1.PageRequest pagination = 3;
+}
+
+// QueryPacketAcknowledgemetsResponse is the request type for the
+// Query/QueryPacketAcknowledgements RPC method
+message QueryPacketAcknowledgementsResponse {
+ repeated ibcgo.core.channel.v1.PacketState acknowledgements = 1;
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+ // query block height
+ ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryUnreceivedPacketsRequest is the request type for the
+// Query/UnreceivedPackets RPC method
+message QueryUnreceivedPacketsRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // list of packet sequences
+ repeated uint64 packet_commitment_sequences = 3;
+}
+
+// QueryUnreceivedPacketsResponse is the response type for the
+// Query/UnreceivedPacketCommitments RPC method
+message QueryUnreceivedPacketsResponse {
+ // list of unreceived packet sequences
+ repeated uint64 sequences = 1;
+ // query block height
+ ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+}
+
+// QueryUnreceivedAcks is the request type for the
+// Query/UnreceivedAcks RPC method
+message QueryUnreceivedAcksRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+ // list of acknowledgement sequences
+ repeated uint64 packet_ack_sequences = 3;
+}
+
+// QueryUnreceivedAcksResponse is the response type for the
+// Query/UnreceivedAcks RPC method
+message QueryUnreceivedAcksResponse {
+ // list of unreceived acknowledgement sequences
+ repeated uint64 sequences = 1;
+ // query block height
+ ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+}
+
+// QueryNextSequenceReceiveRequest is the request type for the
+// Query/QueryNextSequenceReceiveRequest RPC method
+message QueryNextSequenceReceiveRequest {
+ // port unique identifier
+ string port_id = 1;
+ // channel unique identifier
+ string channel_id = 2;
+}
+
+// QuerySequenceResponse is the request type for the
+// Query/QueryNextSequenceReceiveResponse RPC method
+message QueryNextSequenceReceiveResponse {
+ // next sequence receive number
+ uint64 next_sequence_receive = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
diff --git a/proto/ibcgo/core/channel/v1/tx.proto b/proto/ibcgo/core/channel/v1/tx.proto
new file mode 100644
index 00000000..290c3a94
--- /dev/null
+++ b/proto/ibcgo/core/channel/v1/tx.proto
@@ -0,0 +1,239 @@
+syntax = "proto3";
+
+package ibcgo.core.channel.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/client/v1/client.proto";
+import "ibcgo/core/channel/v1/channel.proto";
+
+// Msg defines the ibc/channel Msg service.
+service Msg {
+ // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit.
+ rpc ChannelOpenInit(MsgChannelOpenInit) returns (MsgChannelOpenInitResponse);
+
+ // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry.
+ rpc ChannelOpenTry(MsgChannelOpenTry) returns (MsgChannelOpenTryResponse);
+
+ // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck.
+ rpc ChannelOpenAck(MsgChannelOpenAck) returns (MsgChannelOpenAckResponse);
+
+ // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.
+ rpc ChannelOpenConfirm(MsgChannelOpenConfirm)
+ returns (MsgChannelOpenConfirmResponse);
+
+ // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.
+ rpc ChannelCloseInit(MsgChannelCloseInit)
+ returns (MsgChannelCloseInitResponse);
+
+ // ChannelCloseConfirm defines a rpc handler method for
+ // MsgChannelCloseConfirm.
+ rpc ChannelCloseConfirm(MsgChannelCloseConfirm)
+ returns (MsgChannelCloseConfirmResponse);
+
+ // RecvPacket defines a rpc handler method for MsgRecvPacket.
+ rpc RecvPacket(MsgRecvPacket) returns (MsgRecvPacketResponse);
+
+ // Timeout defines a rpc handler method for MsgTimeout.
+ rpc Timeout(MsgTimeout) returns (MsgTimeoutResponse);
+
+ // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose.
+ rpc TimeoutOnClose(MsgTimeoutOnClose) returns (MsgTimeoutOnCloseResponse);
+
+ // Acknowledgement defines a rpc handler method for MsgAcknowledgement.
+ rpc Acknowledgement(MsgAcknowledgement) returns (MsgAcknowledgementResponse);
+}
+
+// MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
+// is called by a relayer on Chain A.
+message MsgChannelOpenInit {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ Channel channel = 2 [ (gogoproto.nullable) = false ];
+ string signer = 3;
+}
+
+// MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
+message MsgChannelOpenInitResponse {}
+
+// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
+// on Chain B.
+message MsgChannelOpenTry {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ // in the case of crossing hello's, when both chains call OpenInit, we need
+ // the channel identifier of the previous channel in state INIT
+ string previous_channel_id = 2
+ [ (gogoproto.moretags) = "yaml:\"previous_channel_id\"" ];
+ Channel channel = 3 [ (gogoproto.nullable) = false ];
+ string counterparty_version = 4
+ [ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ];
+ bytes proof_init = 5 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
+ ibcgo.core.client.v1.Height proof_height = 6 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 7;
+}
+
+// MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.
+message MsgChannelOpenTryResponse {}
+
+// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
+// the change of channel state to TRYOPEN on Chain B.
+message MsgChannelOpenAck {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ string counterparty_channel_id = 3
+ [ (gogoproto.moretags) = "yaml:\"counterparty_channel_id\"" ];
+ string counterparty_version = 4
+ [ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ];
+ bytes proof_try = 5 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ];
+ ibcgo.core.client.v1.Height proof_height = 6 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 7;
+}
+
+// MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.
+message MsgChannelOpenAckResponse {}
+
+// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
+// acknowledge the change of channel state to OPEN on Chain A.
+message MsgChannelOpenConfirm {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ bytes proof_ack = 3 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ];
+ ibcgo.core.client.v1.Height proof_height = 4 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 5;
+}
+
+// MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response
+// type.
+message MsgChannelOpenConfirmResponse {}
+
+// MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
+// to close a channel with Chain B.
+message MsgChannelCloseInit {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ string signer = 3;
+}
+
+// MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
+message MsgChannelCloseInitResponse {}
+
+// MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
+// to acknowledge the change of channel state to CLOSED on Chain A.
+message MsgChannelCloseConfirm {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ bytes proof_init = 3 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
+ ibcgo.core.client.v1.Height proof_height = 4 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 5;
+}
+
+// MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response
+// type.
+message MsgChannelCloseConfirmResponse {}
+
+// MsgRecvPacket receives incoming IBC packet
+message MsgRecvPacket {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ Packet packet = 1 [ (gogoproto.nullable) = false ];
+ bytes proof_commitment = 2
+ [ (gogoproto.moretags) = "yaml:\"proof_commitment\"" ];
+ ibcgo.core.client.v1.Height proof_height = 3 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 4;
+}
+
+// MsgRecvPacketResponse defines the Msg/RecvPacket response type.
+message MsgRecvPacketResponse {}
+
+// MsgTimeout receives timed-out packet
+message MsgTimeout {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ Packet packet = 1 [ (gogoproto.nullable) = false ];
+ bytes proof_unreceived = 2
+ [ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ];
+ ibcgo.core.client.v1.Height proof_height = 3 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ uint64 next_sequence_recv = 4
+ [ (gogoproto.moretags) = "yaml:\"next_sequence_recv\"" ];
+ string signer = 5;
+}
+
+// MsgTimeoutResponse defines the Msg/Timeout response type.
+message MsgTimeoutResponse {}
+
+// MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
+message MsgTimeoutOnClose {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ Packet packet = 1 [ (gogoproto.nullable) = false ];
+ bytes proof_unreceived = 2
+ [ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ];
+ bytes proof_close = 3 [ (gogoproto.moretags) = "yaml:\"proof_close\"" ];
+ ibcgo.core.client.v1.Height proof_height = 4 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ uint64 next_sequence_recv = 5
+ [ (gogoproto.moretags) = "yaml:\"next_sequence_recv\"" ];
+ string signer = 6;
+}
+
+// MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
+message MsgTimeoutOnCloseResponse {}
+
+// MsgAcknowledgement receives incoming IBC acknowledgement
+message MsgAcknowledgement {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ Packet packet = 1 [ (gogoproto.nullable) = false ];
+ bytes acknowledgement = 2;
+ bytes proof_acked = 3 [ (gogoproto.moretags) = "yaml:\"proof_acked\"" ];
+ ibcgo.core.client.v1.Height proof_height = 4 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 5;
+}
+
+// MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.
+message MsgAcknowledgementResponse {}
diff --git a/proto/ibcgo/core/client/v1/client.proto b/proto/ibcgo/core/client/v1/client.proto
new file mode 100644
index 00000000..6e036bed
--- /dev/null
+++ b/proto/ibcgo/core/client/v1/client.proto
@@ -0,0 +1,96 @@
+syntax = "proto3";
+
+package ibcgo.core.client.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+
+// IdentifiedClientState defines a client state with an additional client
+// identifier field.
+message IdentifiedClientState {
+ // client identifier
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // client state
+ google.protobuf.Any client_state = 2
+ [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+}
+
+// ConsensusStateWithHeight defines a consensus state with an additional height
+// field.
+message ConsensusStateWithHeight {
+ // consensus state height
+ Height height = 1 [ (gogoproto.nullable) = false ];
+ // consensus state
+ google.protobuf.Any consensus_state = 2
+ [ (gogoproto.moretags) = "yaml\"consensus_state\"" ];
+}
+
+// ClientConsensusStates defines all the stored consensus states for a given
+// client.
+message ClientConsensusStates {
+ // client identifier
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // consensus states and their heights associated with the client
+ repeated ConsensusStateWithHeight consensus_states = 2 [
+ (gogoproto.moretags) = "yaml:\"consensus_states\"",
+ (gogoproto.nullable) = false
+ ];
+}
+
+// ClientUpdateProposal is a governance proposal. If it passes, the substitute
+// client's consensus states starting from the 'initial height' are copied over
+// to the subjects client state. The proposal handler may fail if the subject
+// and the substitute do not match in client and chain parameters (with
+// exception to latest height, frozen height, and chain-id). The updated client
+// must also be valid (cannot be expired).
+message ClientUpdateProposal {
+ option (gogoproto.goproto_getters) = false;
+ // the title of the update proposal
+ string title = 1;
+ // the description of the proposal
+ string description = 2;
+ // the client identifier for the client to be updated if the proposal passes
+ string subject_client_id = 3
+ [ (gogoproto.moretags) = "yaml:\"subject_client_id\"" ];
+ // the substitute client identifier for the client standing in for the subject
+ // client
+ string substitute_client_id = 4
+ [ (gogoproto.moretags) = "yaml:\"susbtitute_client_id\"" ];
+ // the intital height to copy consensus states from the substitute to the
+ // subject
+ Height initial_height = 5 [
+ (gogoproto.moretags) = "yaml:\"initial_height\"",
+ (gogoproto.nullable) = false
+ ];
+}
+
+// Height is a monotonically increasing data type
+// that can be compared against another Height for the purposes of updating and
+// freezing clients
+//
+// Normally the RevisionHeight is incremented at each height while keeping
+// RevisionNumber the same. However some consensus algorithms may choose to
+// reset the height in certain conditions e.g. hard forks, state-machine
+// breaking changes In these cases, the RevisionNumber is incremented so that
+// height continues to be monitonically increasing even as the RevisionHeight
+// gets reset
+message Height {
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+
+ // the revision that the client is currently on
+ uint64 revision_number = 1
+ [ (gogoproto.moretags) = "yaml:\"revision_number\"" ];
+ // the height within the given revision
+ uint64 revision_height = 2
+ [ (gogoproto.moretags) = "yaml:\"revision_height\"" ];
+}
+
+// Params defines the set of IBC light client parameters.
+message Params {
+ // allowed_clients defines the list of allowed client state types.
+ repeated string allowed_clients = 1
+ [ (gogoproto.moretags) = "yaml:\"allowed_clients\"" ];
+}
diff --git a/proto/ibcgo/core/client/v1/genesis.proto b/proto/ibcgo/core/client/v1/genesis.proto
new file mode 100644
index 00000000..fc1c5d2d
--- /dev/null
+++ b/proto/ibcgo/core/client/v1/genesis.proto
@@ -0,0 +1,56 @@
+syntax = "proto3";
+
+package ibcgo.core.client.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+
+import "ibcgo/core/client/v1/client.proto";
+import "gogoproto/gogo.proto";
+
+// GenesisState defines the ibc client submodule's genesis state.
+message GenesisState {
+ // client states with their corresponding identifiers
+ repeated IdentifiedClientState clients = 1 [
+ (gogoproto.nullable) = false,
+ (gogoproto.castrepeated) = "IdentifiedClientStates"
+ ];
+ // consensus states from each client
+ repeated ClientConsensusStates clients_consensus = 2 [
+ (gogoproto.nullable) = false,
+ (gogoproto.castrepeated) = "ClientsConsensusStates",
+ (gogoproto.moretags) = "yaml:\"clients_consensus\""
+ ];
+ // metadata from each client
+ repeated IdentifiedGenesisMetadata clients_metadata = 3 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"clients_metadata\""
+ ];
+ Params params = 4 [ (gogoproto.nullable) = false ];
+ // create localhost on initialization
+ bool create_localhost = 5
+ [ (gogoproto.moretags) = "yaml:\"create_localhost\"" ];
+ // the sequence for the next generated client identifier
+ uint64 next_client_sequence = 6
+ [ (gogoproto.moretags) = "yaml:\"next_client_sequence\"" ];
+}
+
+// GenesisMetadata defines the genesis type for metadata that clients may return
+// with ExportMetadata
+message GenesisMetadata {
+ option (gogoproto.goproto_getters) = false;
+
+ // store key of metadata without clientID-prefix
+ bytes key = 1;
+ // metadata value
+ bytes value = 2;
+}
+
+// IdentifiedGenesisMetadata has the client metadata with the corresponding
+// client id.
+message IdentifiedGenesisMetadata {
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ repeated GenesisMetadata client_metadata = 2 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"client_metadata\""
+ ];
+}
diff --git a/proto/ibcgo/core/client/v1/query.proto b/proto/ibcgo/core/client/v1/query.proto
new file mode 100644
index 00000000..fe218af4
--- /dev/null
+++ b/proto/ibcgo/core/client/v1/query.proto
@@ -0,0 +1,143 @@
+syntax = "proto3";
+
+package ibcgo.core.client.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+
+import "cosmos/base/query/v1beta1/pagination.proto";
+import "ibcgo/core/client/v1/client.proto";
+import "google/protobuf/any.proto";
+import "google/api/annotations.proto";
+import "gogoproto/gogo.proto";
+
+// Query provides defines the gRPC querier service
+service Query {
+ // ClientState queries an IBC light client.
+ rpc ClientState(QueryClientStateRequest) returns (QueryClientStateResponse) {
+ option (google.api.http).get =
+ "/ibc/core/client/v1/client_states/{client_id}";
+ }
+
+ // ClientStates queries all the IBC light clients of a chain.
+ rpc ClientStates(QueryClientStatesRequest)
+ returns (QueryClientStatesResponse) {
+ option (google.api.http).get = "/ibc/core/client/v1/client_states";
+ }
+
+ // ConsensusState queries a consensus state associated with a client state at
+ // a given height.
+ rpc ConsensusState(QueryConsensusStateRequest)
+ returns (QueryConsensusStateResponse) {
+ option (google.api.http).get = "/ibc/core/client/v1/consensus_states/"
+ "{client_id}/revision/{revision_number}/"
+ "height/{revision_height}";
+ }
+
+ // ConsensusStates queries all the consensus state associated with a given
+ // client.
+ rpc ConsensusStates(QueryConsensusStatesRequest)
+ returns (QueryConsensusStatesResponse) {
+ option (google.api.http).get =
+ "/ibc/core/client/v1/consensus_states/{client_id}";
+ }
+
+ // ClientParams queries all parameters of the ibc client.
+ rpc ClientParams(QueryClientParamsRequest)
+ returns (QueryClientParamsResponse) {
+ option (google.api.http).get = "/ibc/client/v1/params";
+ }
+}
+
+// QueryClientStateRequest is the request type for the Query/ClientState RPC
+// method
+message QueryClientStateRequest {
+ // client state unique identifier
+ string client_id = 1;
+}
+
+// QueryClientStateResponse is the response type for the Query/ClientState RPC
+// method. Besides the client state, it includes a proof and the height from
+// which the proof was retrieved.
+message QueryClientStateResponse {
+ // client state associated with the request identifier
+ google.protobuf.Any client_state = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryClientStatesRequest is the request type for the Query/ClientStates RPC
+// method
+message QueryClientStatesRequest {
+ // pagination request
+ cosmos.base.query.v1beta1.PageRequest pagination = 1;
+}
+
+// QueryClientStatesResponse is the response type for the Query/ClientStates RPC
+// method.
+message QueryClientStatesResponse {
+ // list of stored ClientStates of the chain.
+ repeated IdentifiedClientState client_states = 1 [
+ (gogoproto.nullable) = false,
+ (gogoproto.castrepeated) = "IdentifiedClientStates"
+ ];
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+}
+
+// QueryConsensusStateRequest is the request type for the Query/ConsensusState
+// RPC method. Besides the consensus state, it includes a proof and the height
+// from which the proof was retrieved.
+message QueryConsensusStateRequest {
+ // client identifier
+ string client_id = 1;
+ // consensus state revision number
+ uint64 revision_number = 2;
+ // consensus state revision height
+ uint64 revision_height = 3;
+ // latest_height overrrides the height field and queries the latest stored
+ // ConsensusState
+ bool latest_height = 4;
+}
+
+// QueryConsensusStateResponse is the response type for the Query/ConsensusState
+// RPC method
+message QueryConsensusStateResponse {
+ // consensus state associated with the client identifier at the given height
+ google.protobuf.Any consensus_state = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
+// RPC method.
+message QueryConsensusStatesRequest {
+ // client identifier
+ string client_id = 1;
+ // pagination request
+ cosmos.base.query.v1beta1.PageRequest pagination = 2;
+}
+
+// QueryConsensusStatesResponse is the response type for the
+// Query/ConsensusStates RPC method
+message QueryConsensusStatesResponse {
+ // consensus states associated with the identifier
+ repeated ConsensusStateWithHeight consensus_states = 1
+ [ (gogoproto.nullable) = false ];
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+}
+
+// QueryClientParamsRequest is the request type for the Query/ClientParams RPC
+// method.
+message QueryClientParamsRequest {}
+
+// QueryClientParamsResponse is the response type for the Query/ClientParams RPC
+// method.
+message QueryClientParamsResponse {
+ // params defines the parameters of the module.
+ Params params = 1;
+}
diff --git a/proto/ibcgo/core/client/v1/tx.proto b/proto/ibcgo/core/client/v1/tx.proto
new file mode 100644
index 00000000..722f6b49
--- /dev/null
+++ b/proto/ibcgo/core/client/v1/tx.proto
@@ -0,0 +1,107 @@
+syntax = "proto3";
+
+package ibcgo.core.client.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "ibcgo/core/client/v1/client.proto";
+
+// Msg defines the ibc/client Msg service.
+service Msg {
+ // CreateClient defines a rpc handler method for MsgCreateClient.
+ rpc CreateClient(MsgCreateClient) returns (MsgCreateClientResponse);
+
+ // UpdateClient defines a rpc handler method for MsgUpdateClient.
+ rpc UpdateClient(MsgUpdateClient) returns (MsgUpdateClientResponse);
+
+ // UpgradeClient defines a rpc handler method for MsgUpgradeClient.
+ rpc UpgradeClient(MsgUpgradeClient) returns (MsgUpgradeClientResponse);
+
+ // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.
+ rpc SubmitMisbehaviour(MsgSubmitMisbehaviour)
+ returns (MsgSubmitMisbehaviourResponse);
+}
+
+// MsgCreateClient defines a message to create an IBC client
+message MsgCreateClient {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ // light client state
+ google.protobuf.Any client_state = 1
+ [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ // consensus state associated with the client that corresponds to a given
+ // height.
+ google.protobuf.Any consensus_state = 2
+ [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ // signer address
+ string signer = 3;
+}
+
+// MsgCreateClientResponse defines the Msg/CreateClient response type.
+message MsgCreateClientResponse {}
+
+// MsgUpdateClient defines an sdk.Msg to update a IBC client state using
+// the given header.
+message MsgUpdateClient {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ // client unique identifier
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // header to update the light client
+ google.protobuf.Any header = 2;
+ // signer address
+ string signer = 3;
+}
+
+// MsgUpdateClientResponse defines the Msg/UpdateClient response type.
+message MsgUpdateClientResponse {}
+
+// MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
+// state
+message MsgUpgradeClient {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ // client unique identifier
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // upgraded client state
+ google.protobuf.Any client_state = 2
+ [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ // upgraded consensus state, only contains enough information to serve as a
+ // basis of trust in update logic
+ google.protobuf.Any consensus_state = 3
+ [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ // proof that old chain committed to new client
+ bytes proof_upgrade_client = 4
+ [ (gogoproto.moretags) = "yaml:\"proof_upgrade_client\"" ];
+ // proof that old chain committed to new consensus state
+ bytes proof_upgrade_consensus_state = 5
+ [ (gogoproto.moretags) = "yaml:\"proof_upgrade_consensus_state\"" ];
+ // signer address
+ string signer = 6;
+}
+
+// MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.
+message MsgUpgradeClientResponse {}
+
+// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
+// light client misbehaviour.
+message MsgSubmitMisbehaviour {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ // client unique identifier
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // misbehaviour used for freezing the light client
+ google.protobuf.Any misbehaviour = 2;
+ // signer address
+ string signer = 3;
+}
+
+// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response
+// type.
+message MsgSubmitMisbehaviourResponse {}
diff --git a/proto/ibcgo/core/commitment/v1/commitment.proto b/proto/ibcgo/core/commitment/v1/commitment.proto
new file mode 100644
index 00000000..373a77ff
--- /dev/null
+++ b/proto/ibcgo/core/commitment/v1/commitment.proto
@@ -0,0 +1,39 @@
+syntax = "proto3";
+
+package ibcgo.core.commitment.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/23-commitment/types";
+
+import "gogoproto/gogo.proto";
+import "confio/proofs.proto";
+
+// MerkleRoot defines a merkle root hash.
+// In the Cosmos SDK, the AppHash of a block header becomes the root.
+message MerkleRoot {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes hash = 1;
+}
+
+// MerklePrefix is merkle path prefixed to the key.
+// The constructed key from the Path and the key will be append(Path.KeyPath,
+// append(Path.KeyPrefix, key...))
+message MerklePrefix {
+ bytes key_prefix = 1 [ (gogoproto.moretags) = "yaml:\"key_prefix\"" ];
+}
+
+// MerklePath is the path used to verify commitment proofs, which can be an
+// arbitrary structured object (defined by a commitment type).
+// MerklePath is represented from root-to-leaf
+message MerklePath {
+ option (gogoproto.goproto_stringer) = false;
+
+ repeated string key_path = 1 [ (gogoproto.moretags) = "yaml:\"key_path\"" ];
+}
+
+// MerkleProof is a wrapper type over a chain of CommitmentProofs.
+// It demonstrates membership or non-membership for an element or set of
+// elements, verifiable in conjunction with a known commitment root. Proofs
+// should be succinct.
+// MerkleProofs are ordered from leaf-to-root
+message MerkleProof { repeated ics23.CommitmentProof proofs = 1; }
diff --git a/proto/ibcgo/core/connection/v1/connection.proto b/proto/ibcgo/core/connection/v1/connection.proto
new file mode 100644
index 00000000..39f3925c
--- /dev/null
+++ b/proto/ibcgo/core/connection/v1/connection.proto
@@ -0,0 +1,108 @@
+syntax = "proto3";
+
+package ibcgo.core.connection.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/commitment/v1/commitment.proto";
+
+// ICS03 - Connection Data Structures as defined in
+// https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics#data-structures
+
+// ConnectionEnd defines a stateful object on a chain connected to another
+// separate one.
+// NOTE: there must only be 2 defined ConnectionEnds to establish
+// a connection between two chains.
+message ConnectionEnd {
+ option (gogoproto.goproto_getters) = false;
+ // client associated with this connection.
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // IBC version which can be utilised to determine encodings or protocols for
+ // channels or packets utilising this connection.
+ repeated Version versions = 2;
+ // current state of the connection end.
+ State state = 3;
+ // counterparty chain associated with this connection.
+ Counterparty counterparty = 4 [ (gogoproto.nullable) = false ];
+ // delay period that must pass before a consensus state can be used for
+ // packet-verification NOTE: delay period logic is only implemented by some
+ // clients.
+ uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
+}
+
+// IdentifiedConnection defines a connection with additional connection
+// identifier field.
+message IdentifiedConnection {
+ option (gogoproto.goproto_getters) = false;
+ // connection identifier.
+ string id = 1 [ (gogoproto.moretags) = "yaml:\"id\"" ];
+ // client associated with this connection.
+ string client_id = 2 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // IBC version which can be utilised to determine encodings or protocols for
+ // channels or packets utilising this connection
+ repeated Version versions = 3;
+ // current state of the connection end.
+ State state = 4;
+ // counterparty chain associated with this connection.
+ Counterparty counterparty = 5 [ (gogoproto.nullable) = false ];
+ // delay period associated with this connection.
+ uint64 delay_period = 6 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
+}
+
+// State defines if a connection is in one of the following states:
+// INIT, TRYOPEN, OPEN or UNINITIALIZED.
+enum State {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // Default State
+ STATE_UNINITIALIZED_UNSPECIFIED = 0
+ [ (gogoproto.enumvalue_customname) = "UNINITIALIZED" ];
+ // A connection end has just started the opening handshake.
+ STATE_INIT = 1 [ (gogoproto.enumvalue_customname) = "INIT" ];
+ // A connection end has acknowledged the handshake step on the counterparty
+ // chain.
+ STATE_TRYOPEN = 2 [ (gogoproto.enumvalue_customname) = "TRYOPEN" ];
+ // A connection end has completed the handshake.
+ STATE_OPEN = 3 [ (gogoproto.enumvalue_customname) = "OPEN" ];
+}
+
+// Counterparty defines the counterparty chain associated with a connection end.
+message Counterparty {
+ option (gogoproto.goproto_getters) = false;
+
+ // identifies the client on the counterparty chain associated with a given
+ // connection.
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // identifies the connection end on the counterparty chain associated with a
+ // given connection.
+ string connection_id = 2 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ // commitment merkle prefix of the counterparty chain.
+ ibcgo.core.commitment.v1.MerklePrefix prefix = 3
+ [ (gogoproto.nullable) = false ];
+}
+
+// ClientPaths define all the connection paths for a client state.
+message ClientPaths {
+ // list of connection paths
+ repeated string paths = 1;
+}
+
+// ConnectionPaths define all the connection paths for a given client state.
+message ConnectionPaths {
+ // client state unique identifier
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // list of connection paths
+ repeated string paths = 2;
+}
+
+// Version defines the versioning scheme used to negotiate the IBC verison in
+// the connection handshake.
+message Version {
+ option (gogoproto.goproto_getters) = false;
+
+ // unique version identifier
+ string identifier = 1;
+ // list of features compatible with the specified identifier
+ repeated string features = 2;
+}
diff --git a/proto/ibcgo/core/connection/v1/genesis.proto b/proto/ibcgo/core/connection/v1/genesis.proto
new file mode 100644
index 00000000..3e693c84
--- /dev/null
+++ b/proto/ibcgo/core/connection/v1/genesis.proto
@@ -0,0 +1,21 @@
+syntax = "proto3";
+
+package ibcgo.core.connection.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/connection/v1/connection.proto";
+
+// GenesisState defines the ibc connection submodule's genesis state.
+message GenesisState {
+ repeated IdentifiedConnection connections = 1
+ [ (gogoproto.nullable) = false ];
+ repeated ConnectionPaths client_connection_paths = 2 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"client_connection_paths\""
+ ];
+ // the sequence for the next generated connection identifier
+ uint64 next_connection_sequence = 3
+ [ (gogoproto.moretags) = "yaml:\"next_connection_sequence\"" ];
+}
diff --git a/proto/ibcgo/core/connection/v1/query.proto b/proto/ibcgo/core/connection/v1/query.proto
new file mode 100644
index 00000000..c4ff165a
--- /dev/null
+++ b/proto/ibcgo/core/connection/v1/query.proto
@@ -0,0 +1,145 @@
+syntax = "proto3";
+
+package ibcgo.core.connection.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+
+import "gogoproto/gogo.proto";
+import "cosmos/base/query/v1beta1/pagination.proto";
+import "ibcgo/core/client/v1/client.proto";
+import "ibcgo/core/connection/v1/connection.proto";
+import "google/api/annotations.proto";
+import "google/protobuf/any.proto";
+
+// Query provides defines the gRPC querier service
+service Query {
+ // Connection queries an IBC connection end.
+ rpc Connection(QueryConnectionRequest) returns (QueryConnectionResponse) {
+ option (google.api.http).get =
+ "/ibc/core/connection/v1/connections/{connection_id}";
+ }
+
+ // Connections queries all the IBC connections of a chain.
+ rpc Connections(QueryConnectionsRequest) returns (QueryConnectionsResponse) {
+ option (google.api.http).get = "/ibc/core/connection/v1/connections";
+ }
+
+ // ClientConnections queries the connection paths associated with a client
+ // state.
+ rpc ClientConnections(QueryClientConnectionsRequest)
+ returns (QueryClientConnectionsResponse) {
+ option (google.api.http).get =
+ "/ibc/core/connection/v1/client_connections/{client_id}";
+ }
+
+ // ConnectionClientState queries the client state associated with the
+ // connection.
+ rpc ConnectionClientState(QueryConnectionClientStateRequest)
+ returns (QueryConnectionClientStateResponse) {
+ option (google.api.http).get =
+ "/ibc/core/connection/v1/connections/{connection_id}/client_state";
+ }
+
+ // ConnectionConsensusState queries the consensus state associated with the
+ // connection.
+ rpc ConnectionConsensusState(QueryConnectionConsensusStateRequest)
+ returns (QueryConnectionConsensusStateResponse) {
+ option (google.api.http).get =
+ "/ibc/core/connection/v1/connections/{connection_id}/consensus_state/"
+ "revision/{revision_number}/height/{revision_height}";
+ }
+}
+
+// QueryConnectionRequest is the request type for the Query/Connection RPC
+// method
+message QueryConnectionRequest {
+ // connection unique identifier
+ string connection_id = 1;
+}
+
+// QueryConnectionResponse is the response type for the Query/Connection RPC
+// method. Besides the connection end, it includes a proof and the height from
+// which the proof was retrieved.
+message QueryConnectionResponse {
+ // connection associated with the request identifier
+ ibcgo.core.connection.v1.ConnectionEnd connection = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryConnectionsRequest is the request type for the Query/Connections RPC
+// method
+message QueryConnectionsRequest {
+ cosmos.base.query.v1beta1.PageRequest pagination = 1;
+}
+
+// QueryConnectionsResponse is the response type for the Query/Connections RPC
+// method.
+message QueryConnectionsResponse {
+ // list of stored connections of the chain.
+ repeated ibcgo.core.connection.v1.IdentifiedConnection connections = 1;
+ // pagination response
+ cosmos.base.query.v1beta1.PageResponse pagination = 2;
+ // query block height
+ ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryClientConnectionsRequest is the request type for the
+// Query/ClientConnections RPC method
+message QueryClientConnectionsRequest {
+ // client identifier associated with a connection
+ string client_id = 1;
+}
+
+// QueryClientConnectionsResponse is the response type for the
+// Query/ClientConnections RPC method
+message QueryClientConnectionsResponse {
+ // slice of all the connection paths associated with a client.
+ repeated string connection_paths = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was generated
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryConnectionClientStateRequest is the request type for the
+// Query/ConnectionClientState RPC method
+message QueryConnectionClientStateRequest {
+ // connection identifier
+ string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+}
+
+// QueryConnectionClientStateResponse is the response type for the
+// Query/ConnectionClientState RPC method
+message QueryConnectionClientStateResponse {
+ // client state associated with the channel
+ ibcgo.core.client.v1.IdentifiedClientState identified_client_state = 1;
+ // merkle proof of existence
+ bytes proof = 2;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+}
+
+// QueryConnectionConsensusStateRequest is the request type for the
+// Query/ConnectionConsensusState RPC method
+message QueryConnectionConsensusStateRequest {
+ // connection identifier
+ string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ uint64 revision_number = 2;
+ uint64 revision_height = 3;
+}
+
+// QueryConnectionConsensusStateResponse is the response type for the
+// Query/ConnectionConsensusState RPC method
+message QueryConnectionConsensusStateResponse {
+ // consensus state associated with the channel
+ google.protobuf.Any consensus_state = 1;
+ // client ID associated with the consensus state
+ string client_id = 2;
+ // merkle proof of existence
+ bytes proof = 3;
+ // height at which the proof was retrieved
+ ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+}
diff --git a/proto/ibcgo/core/connection/v1/tx.proto b/proto/ibcgo/core/connection/v1/tx.proto
new file mode 100644
index 00000000..a371633c
--- /dev/null
+++ b/proto/ibcgo/core/connection/v1/tx.proto
@@ -0,0 +1,140 @@
+syntax = "proto3";
+
+package ibcgo.core.connection.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "ibcgo/core/client/v1/client.proto";
+import "ibcgo/core/connection/v1/connection.proto";
+
+// Msg defines the ibc/connection Msg service.
+service Msg {
+ // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.
+ rpc ConnectionOpenInit(MsgConnectionOpenInit)
+ returns (MsgConnectionOpenInitResponse);
+
+ // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.
+ rpc ConnectionOpenTry(MsgConnectionOpenTry)
+ returns (MsgConnectionOpenTryResponse);
+
+ // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.
+ rpc ConnectionOpenAck(MsgConnectionOpenAck)
+ returns (MsgConnectionOpenAckResponse);
+
+ // ConnectionOpenConfirm defines a rpc handler method for
+ // MsgConnectionOpenConfirm.
+ rpc ConnectionOpenConfirm(MsgConnectionOpenConfirm)
+ returns (MsgConnectionOpenConfirmResponse);
+}
+
+// MsgConnectionOpenInit defines the msg sent by an account on Chain A to
+// initialize a connection with Chain B.
+message MsgConnectionOpenInit {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ Counterparty counterparty = 2 [ (gogoproto.nullable) = false ];
+ Version version = 3;
+ uint64 delay_period = 4 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
+ string signer = 5;
+}
+
+// MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
+// type.
+message MsgConnectionOpenInitResponse {}
+
+// MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
+// connection on Chain B.
+message MsgConnectionOpenTry {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ // in the case of crossing hello's, when both chains call OpenInit, we need
+ // the connection identifier of the previous connection in state INIT
+ string previous_connection_id = 2
+ [ (gogoproto.moretags) = "yaml:\"previous_connection_id\"" ];
+ google.protobuf.Any client_state = 3
+ [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ Counterparty counterparty = 4 [ (gogoproto.nullable) = false ];
+ uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
+ repeated Version counterparty_versions = 6
+ [ (gogoproto.moretags) = "yaml:\"counterparty_versions\"" ];
+ ibcgo.core.client.v1.Height proof_height = 7 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ // proof of the initialization the connection on Chain A: `UNITIALIZED ->
+ // INIT`
+ bytes proof_init = 8 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
+ // proof of client state included in message
+ bytes proof_client = 9 [ (gogoproto.moretags) = "yaml:\"proof_client\"" ];
+ // proof of client consensus state
+ bytes proof_consensus = 10
+ [ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ];
+ ibcgo.core.client.v1.Height consensus_height = 11 [
+ (gogoproto.moretags) = "yaml:\"consensus_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 12;
+}
+
+// MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.
+message MsgConnectionOpenTryResponse {}
+
+// MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
+// acknowledge the change of connection state to TRYOPEN on Chain B.
+message MsgConnectionOpenAck {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ string counterparty_connection_id = 2
+ [ (gogoproto.moretags) = "yaml:\"counterparty_connection_id\"" ];
+ Version version = 3;
+ google.protobuf.Any client_state = 4
+ [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ ibcgo.core.client.v1.Height proof_height = 5 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ // proof of the initialization the connection on Chain B: `UNITIALIZED ->
+ // TRYOPEN`
+ bytes proof_try = 6 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ];
+ // proof of client state included in message
+ bytes proof_client = 7 [ (gogoproto.moretags) = "yaml:\"proof_client\"" ];
+ // proof of client consensus state
+ bytes proof_consensus = 8
+ [ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ];
+ ibcgo.core.client.v1.Height consensus_height = 9 [
+ (gogoproto.moretags) = "yaml:\"consensus_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 10;
+}
+
+// MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.
+message MsgConnectionOpenAckResponse {}
+
+// MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
+// acknowledge the change of connection state to OPEN on Chain A.
+message MsgConnectionOpenConfirm {
+ option (gogoproto.equal) = false;
+ option (gogoproto.goproto_getters) = false;
+
+ string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ // proof for the change of the connection state on Chain A: `INIT -> OPEN`
+ bytes proof_ack = 2 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ];
+ ibcgo.core.client.v1.Height proof_height = 3 [
+ (gogoproto.moretags) = "yaml:\"proof_height\"",
+ (gogoproto.nullable) = false
+ ];
+ string signer = 4;
+}
+
+// MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm
+// response type.
+message MsgConnectionOpenConfirmResponse {}
diff --git a/proto/ibcgo/core/types/v1/genesis.proto b/proto/ibcgo/core/types/v1/genesis.proto
new file mode 100644
index 00000000..fd73a2b0
--- /dev/null
+++ b/proto/ibcgo/core/types/v1/genesis.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+
+package ibcgo.core.types.v1;
+
+option go_package = "github.com/cosmos/ibc-go/core/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/client/v1/genesis.proto";
+import "ibcgo/core/connection/v1/genesis.proto";
+import "ibcgo/core/channel/v1/genesis.proto";
+
+// GenesisState defines the ibc module's genesis state.
+message GenesisState {
+ // ICS002 - Clients genesis state
+ ibcgo.core.client.v1.GenesisState client_genesis = 1 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"client_genesis\""
+ ];
+ // ICS003 - Connections genesis state
+ ibcgo.core.connection.v1.GenesisState connection_genesis = 2 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"connection_genesis\""
+ ];
+ // ICS004 - Channel genesis state
+ ibcgo.core.channel.v1.GenesisState channel_genesis = 3 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"channel_genesis\""
+ ];
+}
diff --git a/proto/ibcgo/lightclients/localhost/v1/localhost.proto b/proto/ibcgo/lightclients/localhost/v1/localhost.proto
new file mode 100644
index 00000000..110a81b7
--- /dev/null
+++ b/proto/ibcgo/lightclients/localhost/v1/localhost.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+package ibcgo.lightclients.localhost.v1;
+
+option go_package = "github.com/cosmos/ibc-go/light-clients/09-localhost/types";
+
+import "gogoproto/gogo.proto";
+import "ibcgo/core/client/v1/client.proto";
+
+// ClientState defines a loopback (localhost) client. It requires (read-only)
+// access to keys outside the client prefix.
+message ClientState {
+ option (gogoproto.goproto_getters) = false;
+ // self chain ID
+ string chain_id = 1 [ (gogoproto.moretags) = "yaml:\"chain_id\"" ];
+ // self latest block height
+ ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+}
diff --git a/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto b/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
new file mode 100644
index 00000000..d4d22848
--- /dev/null
+++ b/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
@@ -0,0 +1,206 @@
+syntax = "proto3";
+
+package ibcgo.lightclients.solomachine.v1;
+
+option go_package = "github.com/cosmos/ibc-go/light-clients/06-solomachine/types";
+
+import "ibcgo/core/connection/v1/connection.proto";
+import "ibcgo/core/channel/v1/channel.proto";
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+
+// ClientState defines a solo machine client that tracks the current consensus
+// state and if the client is frozen.
+message ClientState {
+ option (gogoproto.goproto_getters) = false;
+ // latest sequence of the client state
+ uint64 sequence = 1;
+ // frozen sequence of the solo machine
+ uint64 frozen_sequence = 2
+ [ (gogoproto.moretags) = "yaml:\"frozen_sequence\"" ];
+ ConsensusState consensus_state = 3
+ [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ // when set to true, will allow governance to update a solo machine client.
+ // The client will be unfrozen if it is frozen.
+ bool allow_update_after_proposal = 4
+ [ (gogoproto.moretags) = "yaml:\"allow_update_after_proposal\"" ];
+}
+
+// ConsensusState defines a solo machine consensus state. The sequence of a
+// consensus state is contained in the "height" key used in storing the
+// consensus state.
+message ConsensusState {
+ option (gogoproto.goproto_getters) = false;
+ // public key of the solo machine
+ google.protobuf.Any public_key = 1
+ [ (gogoproto.moretags) = "yaml:\"public_key\"" ];
+ // diversifier allows the same public key to be re-used across different solo
+ // machine clients (potentially on different chains) without being considered
+ // misbehaviour.
+ string diversifier = 2;
+ uint64 timestamp = 3;
+}
+
+// Header defines a solo machine consensus header
+message Header {
+ option (gogoproto.goproto_getters) = false;
+ // sequence to update solo machine public key at
+ uint64 sequence = 1;
+ uint64 timestamp = 2;
+ bytes signature = 3;
+ google.protobuf.Any new_public_key = 4
+ [ (gogoproto.moretags) = "yaml:\"new_public_key\"" ];
+ string new_diversifier = 5
+ [ (gogoproto.moretags) = "yaml:\"new_diversifier\"" ];
+}
+
+// Misbehaviour defines misbehaviour for a solo machine which consists
+// of a sequence and two signatures over different messages at that sequence.
+message Misbehaviour {
+ option (gogoproto.goproto_getters) = false;
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ uint64 sequence = 2;
+ SignatureAndData signature_one = 3
+ [ (gogoproto.moretags) = "yaml:\"signature_one\"" ];
+ SignatureAndData signature_two = 4
+ [ (gogoproto.moretags) = "yaml:\"signature_two\"" ];
+}
+
+// SignatureAndData contains a signature and the data signed over to create that
+// signature.
+message SignatureAndData {
+ option (gogoproto.goproto_getters) = false;
+ bytes signature = 1;
+ DataType data_type = 2 [ (gogoproto.moretags) = "yaml:\"data_type\"" ];
+ bytes data = 3;
+ uint64 timestamp = 4;
+}
+
+// TimestampedSignatureData contains the signature data and the timestamp of the
+// signature.
+message TimestampedSignatureData {
+ option (gogoproto.goproto_getters) = false;
+ bytes signature_data = 1 [ (gogoproto.moretags) = "yaml:\"signature_data\"" ];
+ uint64 timestamp = 2;
+}
+
+// SignBytes defines the signed bytes used for signature verification.
+message SignBytes {
+ option (gogoproto.goproto_getters) = false;
+
+ uint64 sequence = 1;
+ uint64 timestamp = 2;
+ string diversifier = 3;
+ // type of the data used
+ DataType data_type = 4 [ (gogoproto.moretags) = "yaml:\"data_type\"" ];
+ // marshaled data
+ bytes data = 5;
+}
+
+// DataType defines the type of solo machine proof being created. This is done
+// to preserve uniqueness of different data sign byte encodings.
+enum DataType {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // Default State
+ DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0
+ [ (gogoproto.enumvalue_customname) = "UNSPECIFIED" ];
+ // Data type for client state verification
+ DATA_TYPE_CLIENT_STATE = 1 [ (gogoproto.enumvalue_customname) = "CLIENT" ];
+ // Data type for consensus state verification
+ DATA_TYPE_CONSENSUS_STATE = 2
+ [ (gogoproto.enumvalue_customname) = "CONSENSUS" ];
+ // Data type for connection state verification
+ DATA_TYPE_CONNECTION_STATE = 3
+ [ (gogoproto.enumvalue_customname) = "CONNECTION" ];
+ // Data type for channel state verification
+ DATA_TYPE_CHANNEL_STATE = 4 [ (gogoproto.enumvalue_customname) = "CHANNEL" ];
+ // Data type for packet commitment verification
+ DATA_TYPE_PACKET_COMMITMENT = 5
+ [ (gogoproto.enumvalue_customname) = "PACKETCOMMITMENT" ];
+ // Data type for packet acknowledgement verification
+ DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6
+ [ (gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT" ];
+ // Data type for packet receipt absence verification
+ DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7
+ [ (gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE" ];
+ // Data type for next sequence recv verification
+ DATA_TYPE_NEXT_SEQUENCE_RECV = 8
+ [ (gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV" ];
+ // Data type for header verification
+ DATA_TYPE_HEADER = 9 [ (gogoproto.enumvalue_customname) = "HEADER" ];
+}
+
+// HeaderData returns the SignBytes data for update verification.
+message HeaderData {
+ option (gogoproto.goproto_getters) = false;
+
+ // header public key
+ google.protobuf.Any new_pub_key = 1
+ [ (gogoproto.moretags) = "yaml:\"new_pub_key\"" ];
+ // header diversifier
+ string new_diversifier = 2
+ [ (gogoproto.moretags) = "yaml:\"new_diversifier\"" ];
+}
+
+// ClientStateData returns the SignBytes data for client state verification.
+message ClientStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ google.protobuf.Any client_state = 2
+ [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+}
+
+// ConsensusStateData returns the SignBytes data for consensus state
+// verification.
+message ConsensusStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ google.protobuf.Any consensus_state = 2
+ [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+}
+
+// ConnectionStateData returns the SignBytes data for connection state
+// verification.
+message ConnectionStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ ibcgo.core.connection.v1.ConnectionEnd connection = 2;
+}
+
+// ChannelStateData returns the SignBytes data for channel state
+// verification.
+message ChannelStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ ibcgo.core.channel.v1.Channel channel = 2;
+}
+
+// PacketCommitmentData returns the SignBytes data for packet commitment
+// verification.
+message PacketCommitmentData {
+ bytes path = 1;
+ bytes commitment = 2;
+}
+
+// PacketAcknowledgementData returns the SignBytes data for acknowledgement
+// verification.
+message PacketAcknowledgementData {
+ bytes path = 1;
+ bytes acknowledgement = 2;
+}
+
+// PacketReceiptAbsenceData returns the SignBytes data for
+// packet receipt absence verification.
+message PacketReceiptAbsenceData { bytes path = 1; }
+
+// NextSequenceRecvData returns the SignBytes data for verification of the next
+// sequence to be received.
+message NextSequenceRecvData {
+ bytes path = 1;
+ uint64 next_seq_recv = 2 [ (gogoproto.moretags) = "yaml:\"next_seq_recv\"" ];
+}
diff --git a/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto b/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
new file mode 100644
index 00000000..d6a408b6
--- /dev/null
+++ b/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
@@ -0,0 +1,146 @@
+syntax = "proto3";
+
+package ibcgo.lightclients.tendermint.v1;
+
+option go_package = "github.com/cosmos/ibc-go/light-clients/07-tendermint/types";
+
+import "tendermint/types/validator.proto";
+import "tendermint/types/types.proto";
+import "confio/proofs.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+import "ibcgo/core/client/v1/client.proto";
+import "ibcgo/core/commitment/v1/commitment.proto";
+import "gogoproto/gogo.proto";
+
+// ClientState from Tendermint tracks the current validator set, latest height,
+// and a possible frozen height.
+message ClientState {
+ option (gogoproto.goproto_getters) = false;
+
+ string chain_id = 1;
+ Fraction trust_level = 2 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"trust_level\""
+ ];
+ // duration of the period since the LastestTimestamp during which the
+ // submitted headers are valid for upgrade
+ google.protobuf.Duration trusting_period = 3 [
+ (gogoproto.nullable) = false,
+ (gogoproto.stdduration) = true,
+ (gogoproto.moretags) = "yaml:\"trusting_period\""
+ ];
+ // duration of the staking unbonding period
+ google.protobuf.Duration unbonding_period = 4 [
+ (gogoproto.nullable) = false,
+ (gogoproto.stdduration) = true,
+ (gogoproto.moretags) = "yaml:\"unbonding_period\""
+ ];
+ // defines how much new (untrusted) header's Time can drift into the future.
+ google.protobuf.Duration max_clock_drift = 5 [
+ (gogoproto.nullable) = false,
+ (gogoproto.stdduration) = true,
+ (gogoproto.moretags) = "yaml:\"max_clock_drift\""
+ ];
+ // Block height when the client was frozen due to a misbehaviour
+ ibcgo.core.client.v1.Height frozen_height = 6 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"frozen_height\""
+ ];
+ // Latest height the client was updated to
+ ibcgo.core.client.v1.Height latest_height = 7 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"latest_height\""
+ ];
+
+ // Proof specifications used in verifying counterparty state
+ repeated ics23.ProofSpec proof_specs = 8
+ [ (gogoproto.moretags) = "yaml:\"proof_specs\"" ];
+
+ // Path at which next upgraded client will be committed.
+ // Each element corresponds to the key for a single CommitmentProof in the
+ // chained proof. NOTE: ClientState must stored under
+ // `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored
+ // under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using
+ // the default upgrade module, upgrade_path should be []string{"upgrade",
+ // "upgradedIBCState"}`
+ repeated string upgrade_path = 9
+ [ (gogoproto.moretags) = "yaml:\"upgrade_path\"" ];
+
+ // This flag, when set to true, will allow governance to recover a client
+ // which has expired
+ bool allow_update_after_expiry = 10
+ [ (gogoproto.moretags) = "yaml:\"allow_update_after_expiry\"" ];
+ // This flag, when set to true, will allow governance to unfreeze a client
+ // whose chain has experienced a misbehaviour event
+ bool allow_update_after_misbehaviour = 11
+ [ (gogoproto.moretags) = "yaml:\"allow_update_after_misbehaviour\"" ];
+}
+
+// ConsensusState defines the consensus state from Tendermint.
+message ConsensusState {
+ option (gogoproto.goproto_getters) = false;
+
+ // timestamp that corresponds to the block height in which the ConsensusState
+ // was stored.
+ google.protobuf.Timestamp timestamp = 1
+ [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ];
+ // commitment root (i.e app hash)
+ ibcgo.core.commitment.v1.MerkleRoot root = 2 [ (gogoproto.nullable) = false ];
+ bytes next_validators_hash = 3 [
+ (gogoproto.casttype) =
+ "github.com/tendermint/tendermint/libs/bytes.HexBytes",
+ (gogoproto.moretags) = "yaml:\"next_validators_hash\""
+ ];
+}
+
+// Misbehaviour is a wrapper over two conflicting Headers
+// that implements Misbehaviour interface expected by ICS-02
+message Misbehaviour {
+ option (gogoproto.goproto_getters) = false;
+
+ string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ Header header_1 = 2 [
+ (gogoproto.customname) = "Header1",
+ (gogoproto.moretags) = "yaml:\"header_1\""
+ ];
+ Header header_2 = 3 [
+ (gogoproto.customname) = "Header2",
+ (gogoproto.moretags) = "yaml:\"header_2\""
+ ];
+}
+
+// Header defines the Tendermint client consensus Header.
+// It encapsulates all the information necessary to update from a trusted
+// Tendermint ConsensusState. The inclusion of TrustedHeight and
+// TrustedValidators allows this update to process correctly, so long as the
+// ConsensusState for the TrustedHeight exists, this removes race conditions
+// among relayers The SignedHeader and ValidatorSet are the new untrusted update
+// fields for the client. The TrustedHeight is the height of a stored
+// ConsensusState on the client that will be used to verify the new untrusted
+// header. The Trusted ConsensusState must be within the unbonding period of
+// current time in order to correctly verify, and the TrustedValidators must
+// hash to TrustedConsensusState.NextValidatorsHash since that is the last
+// trusted validator set at the TrustedHeight.
+message Header {
+ .tendermint.types.SignedHeader signed_header = 1 [
+ (gogoproto.embed) = true,
+ (gogoproto.moretags) = "yaml:\"signed_header\""
+ ];
+
+ .tendermint.types.ValidatorSet validator_set = 2
+ [ (gogoproto.moretags) = "yaml:\"validator_set\"" ];
+ ibcgo.core.client.v1.Height trusted_height = 3 [
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"trusted_height\""
+ ];
+ .tendermint.types.ValidatorSet trusted_validators = 4
+ [ (gogoproto.moretags) = "yaml:\"trusted_validators\"" ];
+}
+
+// Fraction defines the protobuf message type for tmmath.Fraction that only
+// supports positive values.
+message Fraction {
+ uint64 numerator = 1;
+ uint64 denominator = 2;
+}
diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 00000000..f213124c
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,3 @@
+Generally we should avoid shell scripting and write tests purely in Golang.
+However, some libraries are not Goroutine-safe (e.g. app simulations cannot be run safely in parallel),
+and OS-native threading may be more efficient for many parallel simulations, so we use shell scripts here.
diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py
new file mode 100644
index 00000000..2492b9a8
--- /dev/null
+++ b/scripts/linkify_changelog.py
@@ -0,0 +1,15 @@
+import fileinput
+import re
+
+# This script goes through the provided file, and replaces any " \#",
+# with the valid mark down formatted link to it. e.g.
+# " [\#number](https://github.com/cosmos/cosmos-sdk/issues/)
+# Note that if the number is for a PR, github will auto-redirect you when you click the link.
+# It is safe to run the script multiple times in succession.
+#
+# Example:
+#
+# $ python ./scripts/linkify_changelog.py CHANGELOG.md
+for line in fileinput.input(inplace=1):
+ line = re.sub(r"\s\\#([0-9]+)", r" [\\#\1](https://github.com/cosmos/ibc-go/issues/\1)", line.rstrip())
+ print(line)
diff --git a/scripts/protoc-swagger-gen.sh b/scripts/protoc-swagger-gen.sh
new file mode 100755
index 00000000..30cf44bc
--- /dev/null
+++ b/scripts/protoc-swagger-gen.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -eo pipefail
+
+mkdir -p ./tmp-swagger-gen
+proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq)
+for dir in $proto_dirs; do
+
+ # generate swagger files (filter query files)
+ query_file=$(find "${dir}" -maxdepth 1 \( -name 'query.proto' -o -name 'service.proto' \))
+ if [[ ! -z "$query_file" ]]; then
+ buf protoc \
+ -I "proto" \
+ -I "third_party/proto" \
+ "$query_file" \
+ --swagger_out=./tmp-swagger-gen \
+ --swagger_opt=logtostderr=true --swagger_opt=fqn_for_swagger_name=true --swagger_opt=simple_operation_ids=true
+ fi
+done
+
+# combine swagger files
+# uses nodejs package `swagger-combine`.
+# all the individual swagger files need to be configured in `config.json` for merging
+# swagger-combine ./client/docs/config.json -o ./client/docs/swagger-ui/swagger.yaml -f yaml --continueOnConflictingPaths true --includeDefinitions true
+
+# clean swagger files
+rm -rf ./tmp-swagger-gen
diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh
new file mode 100755
index 00000000..656cff26
--- /dev/null
+++ b/scripts/protocgen.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+
+set -eo pipefail
+
+protoc_gen_gocosmos() {
+ if ! grep "github.com/gogo/protobuf => github.com/regen-network/protobuf" go.mod &>/dev/null ; then
+ echo -e "\tPlease run this command from somewhere inside the ibc-go folder."
+ return 1
+ fi
+
+ go get github.com/regen-network/cosmos-proto/protoc-gen-gocosmos@latest 2>/dev/null
+}
+
+protoc_gen_gocosmos
+
+proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq)
+for dir in $proto_dirs; do
+ buf protoc \
+ -I "proto" \
+ -I "third_party/proto" \
+ --gocosmos_out=plugins=interfacetype+grpc,\
+Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types:. \
+ --grpc-gateway_out=logtostderr=true:. \
+ $(find "${dir}" -maxdepth 1 -name '*.proto')
+
+done
+
+# command to generate docs using protoc-gen-doc
+buf protoc \
+-I "proto" \
+-I "third_party/proto" \
+--doc_out=./docs/ibc \
+--doc_opt=./docs/protodoc-markdown.tmpl,proto-docs.md \
+$(find "$(pwd)/proto" -maxdepth 5 -name '*.proto')
+go mod tidy
+
+
+# move proto files to the right places
+cp -r github.com/cosmos/ibc-go/* ./
+rm -rf github.com
diff --git a/testing/chain.go b/testing/chain.go
new file mode 100644
index 00000000..0534066d
--- /dev/null
+++ b/testing/chain.go
@@ -0,0 +1,910 @@
+package ibctesting
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/crypto/tmhash"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version"
+ tmtypes "github.com/tendermint/tendermint/types"
+ tmversion "github.com/tendermint/tendermint/version"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/simapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ "github.com/cosmos/cosmos-sdk/x/staking/teststaking"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+const (
+ // Default params constants used to create a TM client
+ TrustingPeriod time.Duration = time.Hour * 24 * 7 * 2
+ UnbondingPeriod time.Duration = time.Hour * 24 * 7 * 3
+ MaxClockDrift time.Duration = time.Second * 10
+ DefaultDelayPeriod uint64 = 0
+
+ DefaultChannelVersion = ibctransfertypes.Version
+ InvalidID = "IDisInvalid"
+
+ ConnectionIDPrefix = "conn"
+ ChannelIDPrefix = "chan"
+
+ TransferPort = ibctransfertypes.ModuleName
+ MockPort = mock.ModuleName
+
+ // used for testing UpdateClientProposal
+ Title = "title"
+ Description = "description"
+)
+
+var (
+ DefaultOpenInitVersion *connectiontypes.Version
+
+ // Default params variables used to create a TM client
+ DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel
+ TestHash = tmhash.Sum([]byte("TESTING HASH"))
+ TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+
+ UpgradePath = []string{"upgrade", "upgradedIBCState"}
+
+ ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0]
+
+ MockAcknowledgement = mock.MockAcknowledgement
+ MockCommitment = mock.MockCommitment
+)
+
+// TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI
+// header and the validators of the TestChain. It also contains a field called ChainID. This
+// is the clientID that *other* chains use to refer to this TestChain. The SenderAccount
+// is used for delivering transactions through the application state.
+// NOTE: the actual application uses an empty chain-id for ease of testing.
+type TestChain struct {
+ t *testing.T
+
+ App *simapp.SimApp
+ ChainID string
+ LastHeader *ibctmtypes.Header // header for last block height committed
+ CurrentHeader tmproto.Header // header for current block height
+ QueryServer types.QueryServer
+ TxConfig client.TxConfig
+ Codec codec.BinaryMarshaler
+
+ Vals *tmtypes.ValidatorSet
+ Signers []tmtypes.PrivValidator
+
+ senderPrivKey cryptotypes.PrivKey
+ SenderAccount authtypes.AccountI
+
+ // IBC specific helpers
+ ClientIDs []string // ClientID's used on this chain
+ Connections []*TestConnection // track connectionID's created for this chain
+}
+
+// NewTestChain initializes a new TestChain instance with a single validator set using a
+// generated private key. It also creates a sender account to be used for delivering transactions.
+//
+// The first block height is committed to state in order to allow for client creations on
+// counterparty chains. The TestChain will return with a block height starting at 2.
+//
+// Time management is handled by the Coordinator in order to ensure synchrony between chains.
+// Each update of any chain increments the block header time for all chains by 5 seconds.
+func NewTestChain(t *testing.T, chainID string) *TestChain {
+ // generate validator private/public key
+ privVal := mock.NewPV()
+ pubKey, err := privVal.GetPubKey()
+ require.NoError(t, err)
+
+ // create validator set with single validator
+ validator := tmtypes.NewValidator(pubKey, 1)
+ valSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{validator})
+ signers := []tmtypes.PrivValidator{privVal}
+
+ // generate genesis account
+ senderPrivKey := secp256k1.GenPrivKey()
+ acc := authtypes.NewBaseAccount(senderPrivKey.PubKey().Address().Bytes(), senderPrivKey.PubKey(), 0, 0)
+ balance := banktypes.Balance{
+ Address: acc.GetAddress().String(),
+ Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100000000000000))),
+ }
+
+ app := simapp.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)
+
+ // create current header and call begin block
+ header := tmproto.Header{
+ ChainID: chainID,
+ Height: 1,
+ Time: globalStartTime,
+ }
+
+ txConfig := simapp.MakeTestEncodingConfig().TxConfig
+
+ // create an account to send transactions from
+ chain := &TestChain{
+ t: t,
+ ChainID: chainID,
+ App: app,
+ CurrentHeader: header,
+ QueryServer: app.IBCKeeper,
+ TxConfig: txConfig,
+ Codec: app.AppCodec(),
+ Vals: valSet,
+ Signers: signers,
+ senderPrivKey: senderPrivKey,
+ SenderAccount: acc,
+ ClientIDs: make([]string, 0),
+ Connections: make([]*TestConnection, 0),
+ }
+
+ cap := chain.App.IBCKeeper.PortKeeper.BindPort(chain.GetContext(), MockPort)
+ err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(MockPort))
+ require.NoError(t, err)
+
+ chain.NextBlock()
+
+ return chain
+}
+
+// GetContext returns the current context for the application.
+func (chain *TestChain) GetContext() sdk.Context {
+ return chain.App.BaseApp.NewContext(false, chain.CurrentHeader)
+}
+
+// QueryProof performs an abci query with the given key and returns the proto encoded merkle proof
+// for the query and the height at which the proof will succeed on a tendermint verifier.
+func (chain *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) {
+ res := chain.App.Query(abci.RequestQuery{
+ Path: fmt.Sprintf("store/%s/key", host.StoreKey),
+ Height: chain.App.LastBlockHeight() - 1,
+ Data: key,
+ Prove: true,
+ })
+
+ merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
+ require.NoError(chain.t, err)
+
+ proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof)
+ require.NoError(chain.t, err)
+
+ revision := clienttypes.ParseChainID(chain.ChainID)
+
+ // proof height + 1 is returned as the proof created corresponds to the height the proof
+ // was created in the IAVL tree. Tendermint and subsequently the clients that rely on it
+ // have heights 1 above the IAVL tree. Thus we return proof height + 1
+ return proof, clienttypes.NewHeight(revision, uint64(res.Height)+1)
+}
+
+// QueryUpgradeProof performs an abci query with the given key and returns the proto encoded merkle proof
+// for the query and the height at which the proof will succeed on a tendermint verifier.
+func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, clienttypes.Height) {
+ res := chain.App.Query(abci.RequestQuery{
+ Path: "store/upgrade/key",
+ Height: int64(height - 1),
+ Data: key,
+ Prove: true,
+ })
+
+ merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
+ require.NoError(chain.t, err)
+
+ proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof)
+ require.NoError(chain.t, err)
+
+ revision := clienttypes.ParseChainID(chain.ChainID)
+
+ // proof height + 1 is returned as the proof created corresponds to the height the proof
+ // was created in the IAVL tree. Tendermint and subsequently the clients that rely on it
+ // have heights 1 above the IAVL tree. Thus we return proof height + 1
+ return proof, clienttypes.NewHeight(revision, uint64(res.Height+1))
+}
+
+// QueryClientStateProof performs and abci query for a client state
+// stored with a given clientID and returns the ClientState along with the proof
+func (chain *TestChain) QueryClientStateProof(clientID string) (exported.ClientState, []byte) {
+ // retrieve client state to provide proof for
+ clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
+ require.True(chain.t, found)
+
+ clientKey := host.FullClientStateKey(clientID)
+ proofClient, _ := chain.QueryProof(clientKey)
+
+ return clientState, proofClient
+}
+
+// QueryConsensusStateProof performs an abci query for a consensus state
+// stored on the given clientID. The proof and consensusHeight are returned.
+func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) {
+ clientState := chain.GetClientState(clientID)
+
+ consensusHeight := clientState.GetLatestHeight().(clienttypes.Height)
+ consensusKey := host.FullConsensusStateKey(clientID, consensusHeight)
+ proofConsensus, _ := chain.QueryProof(consensusKey)
+
+ return proofConsensus, consensusHeight
+}
+
+// NextBlock sets the last header to the current header and increments the current header to be
+// at the next block height. It does not update the time as that is handled by the Coordinator.
+//
+// CONTRACT: this function must only be called after app.Commit() occurs
+func (chain *TestChain) NextBlock() {
+ // set the last header to the current header
+ // use nil trusted fields
+ chain.LastHeader = chain.CurrentTMClientHeader()
+
+ // increment the current header
+ chain.CurrentHeader = tmproto.Header{
+ ChainID: chain.ChainID,
+ Height: chain.App.LastBlockHeight() + 1,
+ AppHash: chain.App.LastCommitID().Hash,
+ // NOTE: the time is increased by the coordinator to maintain time synchrony amongst
+ // chains.
+ Time: chain.CurrentHeader.Time,
+ ValidatorsHash: chain.Vals.Hash(),
+ NextValidatorsHash: chain.Vals.Hash(),
+ }
+
+ chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
+
+}
+
+// sendMsgs delivers a transaction through the application without returning the result.
+func (chain *TestChain) sendMsgs(msgs ...sdk.Msg) error {
+ _, err := chain.SendMsgs(msgs...)
+ return err
+}
+
+// SendMsgs delivers a transaction through the application. It updates the senders sequence
+// number and updates the TestChain's headers. It returns the result and error if one
+// occurred.
+func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*sdk.Result, error) {
+ _, r, err := simapp.SignCheckDeliver(
+ chain.t,
+ chain.TxConfig,
+ chain.App.BaseApp,
+ chain.GetContext().BlockHeader(),
+ msgs,
+ chain.ChainID,
+ []uint64{chain.SenderAccount.GetAccountNumber()},
+ []uint64{chain.SenderAccount.GetSequence()},
+ true, true, chain.senderPrivKey,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ // SignCheckDeliver calls app.Commit()
+ chain.NextBlock()
+
+ // increment sequence for successful transaction execution
+ chain.SenderAccount.SetSequence(chain.SenderAccount.GetSequence() + 1)
+
+ return r, nil
+}
+
+// GetClientState retrieves the client state for the provided clientID. The client is
+// expected to exist otherwise testing will fail.
+func (chain *TestChain) GetClientState(clientID string) exported.ClientState {
+ clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
+ require.True(chain.t, found)
+
+ return clientState
+}
+
+// GetConsensusState retrieves the consensus state for the provided clientID and height.
+// It will return a success boolean depending on if consensus state exists or not.
+func (chain *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) {
+ return chain.App.IBCKeeper.ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height)
+}
+
+// GetValsAtHeight will return the validator set of the chain at a given height. It will return
+// a success boolean depending on if the validator set exists or not at that height.
+func (chain *TestChain) GetValsAtHeight(height int64) (*tmtypes.ValidatorSet, bool) {
+ histInfo, ok := chain.App.StakingKeeper.GetHistoricalInfo(chain.GetContext(), height)
+ if !ok {
+ return nil, false
+ }
+
+ valSet := stakingtypes.Validators(histInfo.Valset)
+
+ tmValidators, err := teststaking.ToTmValidators(valSet)
+ if err != nil {
+ panic(err)
+ }
+ return tmtypes.NewValidatorSet(tmValidators), true
+}
+
+// GetConnection retrieves an IBC Connection for the provided TestConnection. The
+// connection is expected to exist otherwise testing will fail.
+func (chain *TestChain) GetConnection(testConnection *TestConnection) connectiontypes.ConnectionEnd {
+ connection, found := chain.App.IBCKeeper.ConnectionKeeper.GetConnection(chain.GetContext(), testConnection.ID)
+ require.True(chain.t, found)
+
+ return connection
+}
+
+// GetChannel retrieves an IBC Channel for the provided TestChannel. The channel
+// is expected to exist otherwise testing will fail.
+func (chain *TestChain) GetChannel(testChannel TestChannel) channeltypes.Channel {
+ channel, found := chain.App.IBCKeeper.ChannelKeeper.GetChannel(chain.GetContext(), testChannel.PortID, testChannel.ID)
+ require.True(chain.t, found)
+
+ return channel
+}
+
+// GetAcknowledgement retrieves an acknowledgement for the provided packet. If the
+// acknowledgement does not exist then testing will fail.
+func (chain *TestChain) GetAcknowledgement(packet exported.PacketI) []byte {
+ ack, found := chain.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ require.True(chain.t, found)
+
+ return ack
+}
+
+// GetPrefix returns the prefix for used by a chain in connection creation
+func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix {
+ return commitmenttypes.NewMerklePrefix(chain.App.IBCKeeper.ConnectionKeeper.GetCommitmentPrefix().Bytes())
+}
+
+// NewClientID appends a new clientID string in the format:
+// ClientFor
+func (chain *TestChain) NewClientID(clientType string) string {
+ clientID := fmt.Sprintf("%s-%s", clientType, strconv.Itoa(len(chain.ClientIDs)))
+ chain.ClientIDs = append(chain.ClientIDs, clientID)
+ return clientID
+}
+
+// AddTestConnection appends a new TestConnection which contains references
+// to the connection id, client id and counterparty client id.
+func (chain *TestChain) AddTestConnection(clientID, counterpartyClientID string) *TestConnection {
+ conn := chain.ConstructNextTestConnection(clientID, counterpartyClientID)
+
+ chain.Connections = append(chain.Connections, conn)
+ return conn
+}
+
+// ConstructNextTestConnection constructs the next test connection to be
+// created given a clientID and counterparty clientID. The connection id
+// format: -conn
+func (chain *TestChain) ConstructNextTestConnection(clientID, counterpartyClientID string) *TestConnection {
+ connectionID := connectiontypes.FormatConnectionIdentifier(uint64(len(chain.Connections)))
+ return &TestConnection{
+ ID: connectionID,
+ ClientID: clientID,
+ NextChannelVersion: DefaultChannelVersion,
+ CounterpartyClientID: counterpartyClientID,
+ }
+}
+
+// GetFirstTestConnection returns the first test connection for a given clientID.
+// The connection may or may not exist in the chain state.
+func (chain *TestChain) GetFirstTestConnection(clientID, counterpartyClientID string) *TestConnection {
+ if len(chain.Connections) > 0 {
+ return chain.Connections[0]
+ }
+
+ return chain.ConstructNextTestConnection(clientID, counterpartyClientID)
+}
+
+// AddTestChannel appends a new TestChannel which contains references to the port and channel ID
+// used for channel creation and interaction. See 'NextTestChannel' for channel ID naming format.
+func (chain *TestChain) AddTestChannel(conn *TestConnection, portID string) TestChannel {
+ channel := chain.NextTestChannel(conn, portID)
+ conn.Channels = append(conn.Channels, channel)
+ return channel
+}
+
+// NextTestChannel returns the next test channel to be created on this connection, but does not
+// add it to the list of created channels. This function is expected to be used when the caller
+// has not created the associated channel in app state, but would still like to refer to the
+// non-existent channel usually to test for its non-existence.
+//
+// channel ID format: -chan
+//
+// The port is passed in by the caller.
+func (chain *TestChain) NextTestChannel(conn *TestConnection, portID string) TestChannel {
+ nextChanSeq := chain.App.IBCKeeper.ChannelKeeper.GetNextChannelSequence(chain.GetContext())
+ channelID := channeltypes.FormatChannelIdentifier(nextChanSeq)
+ return TestChannel{
+ PortID: portID,
+ ID: channelID,
+ ClientID: conn.ClientID,
+ CounterpartyClientID: conn.CounterpartyClientID,
+ Version: conn.NextChannelVersion,
+ }
+}
+
+// ConstructMsgCreateClient constructs a message to create a new client state (tendermint or solomachine).
+// NOTE: a solo machine client will be created with an empty diversifier.
+func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, clientID string, clientType string) *clienttypes.MsgCreateClient {
+ var (
+ clientState exported.ClientState
+ consensusState exported.ConsensusState
+ )
+
+ switch clientType {
+ case exported.Tendermint:
+ height := counterparty.LastHeader.GetHeight().(clienttypes.Height)
+ clientState = ibctmtypes.NewClientState(
+ counterparty.ChainID, DefaultTrustLevel, TrustingPeriod, UnbondingPeriod, MaxClockDrift,
+ height, commitmenttypes.GetSDKSpecs(), UpgradePath, false, false,
+ )
+ consensusState = counterparty.LastHeader.ConsensusState()
+ case exported.Solomachine:
+ solo := NewSolomachine(chain.t, chain.Codec, clientID, "", 1)
+ clientState = solo.ClientState()
+ consensusState = solo.ConsensusState()
+ default:
+ chain.t.Fatalf("unsupported client state type %s", clientType)
+ }
+
+ msg, err := clienttypes.NewMsgCreateClient(
+ clientState, consensusState, chain.SenderAccount.GetAddress(),
+ )
+ require.NoError(chain.t, err)
+ return msg
+}
+
+// CreateTMClient will construct and execute a 07-tendermint MsgCreateClient. A counterparty
+// client will be created on the (target) chain.
+func (chain *TestChain) CreateTMClient(counterparty *TestChain, clientID string) error {
+ // construct MsgCreateClient using counterparty
+ msg := chain.ConstructMsgCreateClient(counterparty, clientID, exported.Tendermint)
+ return chain.sendMsgs(msg)
+}
+
+// UpdateTMClient will construct and execute a 07-tendermint MsgUpdateClient. The counterparty
+// client will be updated on the (target) chain. UpdateTMClient mocks the relayer flow
+// necessary for updating a Tendermint client.
+func (chain *TestChain) UpdateTMClient(counterparty *TestChain, clientID string) error {
+ header, err := chain.ConstructUpdateTMClientHeader(counterparty, clientID)
+ require.NoError(chain.t, err)
+
+ msg, err := clienttypes.NewMsgUpdateClient(
+ clientID, header,
+ chain.SenderAccount.GetAddress(),
+ )
+ require.NoError(chain.t, err)
+
+ return chain.sendMsgs(msg)
+}
+
+// ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the
+// light client on the source chain.
+func (chain *TestChain) ConstructUpdateTMClientHeader(counterparty *TestChain, clientID string) (*ibctmtypes.Header, error) {
+ header := counterparty.LastHeader
+ // Relayer must query for LatestHeight on client to get TrustedHeight
+ trustedHeight := chain.GetClientState(clientID).GetLatestHeight().(clienttypes.Height)
+ var (
+ tmTrustedVals *tmtypes.ValidatorSet
+ ok bool
+ )
+ // Once we get TrustedHeight from client, we must query the validators from the counterparty chain
+ // If the LatestHeight == LastHeader.Height, then TrustedValidators are current validators
+ // If LatestHeight < LastHeader.Height, we can query the historical validator set from HistoricalInfo
+ if trustedHeight == counterparty.LastHeader.GetHeight() {
+ tmTrustedVals = counterparty.Vals
+ } else {
+ // NOTE: We need to get validators from counterparty at height: trustedHeight+1
+ // since the last trusted validators for a header at height h
+ // is the NextValidators at h+1 committed to in header h by
+ // NextValidatorsHash
+ tmTrustedVals, ok = counterparty.GetValsAtHeight(int64(trustedHeight.RevisionHeight + 1))
+ if !ok {
+ return nil, sdkerrors.Wrapf(ibctmtypes.ErrInvalidHeaderHeight, "could not retrieve trusted validators at trustedHeight: %d", trustedHeight)
+ }
+ }
+ // inject trusted fields into last header
+ // for now assume revision number is 0
+ header.TrustedHeight = trustedHeight
+
+ trustedVals, err := tmTrustedVals.ToProto()
+ if err != nil {
+ return nil, err
+ }
+ header.TrustedValidators = trustedVals
+
+ return header, nil
+
+}
+
+// ExpireClient fast forwards the chain's block time by the provided amount of time which will
+// expire any clients with a trusting period less than or equal to this amount of time.
+func (chain *TestChain) ExpireClient(amount time.Duration) {
+ chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(amount)
+}
+
+// CurrentTMClientHeader creates a TM header using the current header parameters
+// on the chain. The trusted fields in the header are set to nil.
+func (chain *TestChain) CurrentTMClientHeader() *ibctmtypes.Header {
+ return chain.CreateTMClientHeader(chain.ChainID, chain.CurrentHeader.Height, clienttypes.Height{}, chain.CurrentHeader.Time, chain.Vals, nil, chain.Signers)
+}
+
+// CreateTMClientHeader creates a TM header to update the TM client. Args are passed in to allow
+// caller flexibility to use params that differ from the chain.
+func (chain *TestChain) CreateTMClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, tmValSet, tmTrustedVals *tmtypes.ValidatorSet, signers []tmtypes.PrivValidator) *ibctmtypes.Header {
+ var (
+ valSet *tmproto.ValidatorSet
+ trustedVals *tmproto.ValidatorSet
+ )
+ require.NotNil(chain.t, tmValSet)
+
+ vsetHash := tmValSet.Hash()
+
+ tmHeader := tmtypes.Header{
+ Version: tmprotoversion.Consensus{Block: tmversion.BlockProtocol, App: 2},
+ ChainID: chainID,
+ Height: blockHeight,
+ Time: timestamp,
+ LastBlockID: MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)),
+ LastCommitHash: chain.App.LastCommitID().Hash,
+ DataHash: tmhash.Sum([]byte("data_hash")),
+ ValidatorsHash: vsetHash,
+ NextValidatorsHash: vsetHash,
+ ConsensusHash: tmhash.Sum([]byte("consensus_hash")),
+ AppHash: chain.CurrentHeader.AppHash,
+ LastResultsHash: tmhash.Sum([]byte("last_results_hash")),
+ EvidenceHash: tmhash.Sum([]byte("evidence_hash")),
+ ProposerAddress: tmValSet.Proposer.Address, //nolint:staticcheck
+ }
+ hhash := tmHeader.Hash()
+ blockID := MakeBlockID(hhash, 3, tmhash.Sum([]byte("part_set")))
+ voteSet := tmtypes.NewVoteSet(chainID, blockHeight, 1, tmproto.PrecommitType, tmValSet)
+
+ commit, err := tmtypes.MakeCommit(blockID, blockHeight, 1, voteSet, signers, timestamp)
+ require.NoError(chain.t, err)
+
+ signedHeader := &tmproto.SignedHeader{
+ Header: tmHeader.ToProto(),
+ Commit: commit.ToProto(),
+ }
+
+ if tmValSet != nil {
+ valSet, err = tmValSet.ToProto()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ if tmTrustedVals != nil {
+ trustedVals, err = tmTrustedVals.ToProto()
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ // The trusted fields may be nil. They may be filled before relaying messages to a client.
+ // The relayer is responsible for querying client and injecting appropriate trusted fields.
+ return &ibctmtypes.Header{
+ SignedHeader: signedHeader,
+ ValidatorSet: valSet,
+ TrustedHeight: trustedHeight,
+ TrustedValidators: trustedVals,
+ }
+}
+
+// MakeBlockID copied unimported test functions from tmtypes to use them here
+func MakeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) tmtypes.BlockID {
+ return tmtypes.BlockID{
+ Hash: hash,
+ PartSetHeader: tmtypes.PartSetHeader{
+ Total: partSetSize,
+ Hash: partSetHash,
+ },
+ }
+}
+
+// CreateSortedSignerArray takes two PrivValidators, and the corresponding Validator structs
+// (including voting power). It returns a signer array of PrivValidators that matches the
+// sorting of ValidatorSet.
+// The sorting is first by .VotingPower (descending), with secondary index of .Address (ascending).
+func CreateSortedSignerArray(altPrivVal, suitePrivVal tmtypes.PrivValidator,
+ altVal, suiteVal *tmtypes.Validator) []tmtypes.PrivValidator {
+
+ switch {
+ case altVal.VotingPower > suiteVal.VotingPower:
+ return []tmtypes.PrivValidator{altPrivVal, suitePrivVal}
+ case altVal.VotingPower < suiteVal.VotingPower:
+ return []tmtypes.PrivValidator{suitePrivVal, altPrivVal}
+ default:
+ if bytes.Compare(altVal.Address, suiteVal.Address) == -1 {
+ return []tmtypes.PrivValidator{altPrivVal, suitePrivVal}
+ }
+ return []tmtypes.PrivValidator{suitePrivVal, altPrivVal}
+ }
+}
+
+// ConnectionOpenInit will construct and execute a MsgConnectionOpenInit.
+func (chain *TestChain) ConnectionOpenInit(
+ counterparty *TestChain,
+ connection, counterpartyConnection *TestConnection,
+) error {
+ msg := connectiontypes.NewMsgConnectionOpenInit(
+ connection.ClientID,
+ connection.CounterpartyClientID,
+ counterparty.GetPrefix(), DefaultOpenInitVersion, DefaultDelayPeriod,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ConnectionOpenTry will construct and execute a MsgConnectionOpenTry.
+func (chain *TestChain) ConnectionOpenTry(
+ counterparty *TestChain,
+ connection, counterpartyConnection *TestConnection,
+) error {
+ counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
+
+ connectionKey := host.ConnectionKey(counterpartyConnection.ID)
+ proofInit, proofHeight := counterparty.QueryProof(connectionKey)
+
+ proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
+
+ msg := connectiontypes.NewMsgConnectionOpenTry(
+ "", connection.ClientID, // does not support handshake continuation
+ counterpartyConnection.ID, counterpartyConnection.ClientID,
+ counterpartyClient, counterparty.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, DefaultDelayPeriod,
+ proofInit, proofClient, proofConsensus,
+ proofHeight, consensusHeight,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ConnectionOpenAck will construct and execute a MsgConnectionOpenAck.
+func (chain *TestChain) ConnectionOpenAck(
+ counterparty *TestChain,
+ connection, counterpartyConnection *TestConnection,
+) error {
+ counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
+
+ connectionKey := host.ConnectionKey(counterpartyConnection.ID)
+ proofTry, proofHeight := counterparty.QueryProof(connectionKey)
+
+ proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
+
+ msg := connectiontypes.NewMsgConnectionOpenAck(
+ connection.ID, counterpartyConnection.ID, counterpartyClient, // testing doesn't use flexible selection
+ proofTry, proofClient, proofConsensus,
+ proofHeight, consensusHeight,
+ ConnectionVersion,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ConnectionOpenConfirm will construct and execute a MsgConnectionOpenConfirm.
+func (chain *TestChain) ConnectionOpenConfirm(
+ counterparty *TestChain,
+ connection, counterpartyConnection *TestConnection,
+) error {
+ connectionKey := host.ConnectionKey(counterpartyConnection.ID)
+ proof, height := counterparty.QueryProof(connectionKey)
+
+ msg := connectiontypes.NewMsgConnectionOpenConfirm(
+ connection.ID,
+ proof, height,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// CreatePortCapability binds and claims a capability for the given portID if it does not
+// already exist. This function will fail testing on any resulting error.
+// NOTE: only creation of a capbility for a transfer or mock port is supported
+// Other applications must bind to the port in InitGenesis or modify this code.
+func (chain *TestChain) CreatePortCapability(portID string) {
+ // check if the portId is already binded, if not bind it
+ _, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
+ if !ok {
+ // create capability using the IBC capability keeper
+ cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), host.PortPath(portID))
+ require.NoError(chain.t, err)
+
+ switch portID {
+ case MockPort:
+ // claim capability using the mock capability keeper
+ err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
+ require.NoError(chain.t, err)
+ case TransferPort:
+ // claim capability using the transfer capability keeper
+ err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
+ require.NoError(chain.t, err)
+ default:
+ panic(fmt.Sprintf("unsupported ibc testing package port ID %s", portID))
+ }
+ }
+
+ chain.App.Commit()
+
+ chain.NextBlock()
+}
+
+// GetPortCapability returns the port capability for the given portID. The capability must
+// exist, otherwise testing will fail.
+func (chain *TestChain) GetPortCapability(portID string) *capabilitytypes.Capability {
+ cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
+ require.True(chain.t, ok)
+
+ return cap
+}
+
+// CreateChannelCapability binds and claims a capability for the given portID and channelID
+// if it does not already exist. This function will fail testing on any resulting error.
+func (chain *TestChain) CreateChannelCapability(portID, channelID string) {
+ capName := host.ChannelCapabilityPath(portID, channelID)
+ // check if the portId is already binded, if not bind it
+ _, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), capName)
+ if !ok {
+ cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), capName)
+ require.NoError(chain.t, err)
+ err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, capName)
+ require.NoError(chain.t, err)
+ }
+
+ chain.App.Commit()
+
+ chain.NextBlock()
+}
+
+// GetChannelCapability returns the channel capability for the given portID and channelID.
+// The capability must exist, otherwise testing will fail.
+func (chain *TestChain) GetChannelCapability(portID, channelID string) *capabilitytypes.Capability {
+ cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.ChannelCapabilityPath(portID, channelID))
+ require.True(chain.t, ok)
+
+ return cap
+}
+
+// ChanOpenInit will construct and execute a MsgChannelOpenInit.
+func (chain *TestChain) ChanOpenInit(
+ ch, counterparty TestChannel,
+ order channeltypes.Order,
+ connectionID string,
+) error {
+ msg := channeltypes.NewMsgChannelOpenInit(
+ ch.PortID,
+ ch.Version, order, []string{connectionID},
+ counterparty.PortID,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ChanOpenTry will construct and execute a MsgChannelOpenTry.
+func (chain *TestChain) ChanOpenTry(
+ counterparty *TestChain,
+ ch, counterpartyCh TestChannel,
+ order channeltypes.Order,
+ connectionID string,
+) error {
+ proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
+
+ msg := channeltypes.NewMsgChannelOpenTry(
+ ch.PortID, "", // does not support handshake continuation
+ ch.Version, order, []string{connectionID},
+ counterpartyCh.PortID, counterpartyCh.ID, counterpartyCh.Version,
+ proof, height,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ChanOpenAck will construct and execute a MsgChannelOpenAck.
+func (chain *TestChain) ChanOpenAck(
+ counterparty *TestChain,
+ ch, counterpartyCh TestChannel,
+) error {
+ proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
+
+ msg := channeltypes.NewMsgChannelOpenAck(
+ ch.PortID, ch.ID,
+ counterpartyCh.ID, counterpartyCh.Version, // testing doesn't use flexible selection
+ proof, height,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm.
+func (chain *TestChain) ChanOpenConfirm(
+ counterparty *TestChain,
+ ch, counterpartyCh TestChannel,
+) error {
+ proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
+
+ msg := channeltypes.NewMsgChannelOpenConfirm(
+ ch.PortID, ch.ID,
+ proof, height,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// ChanCloseInit will construct and execute a MsgChannelCloseInit.
+//
+// NOTE: does not work with ibc-transfer module
+func (chain *TestChain) ChanCloseInit(
+ counterparty *TestChain,
+ channel TestChannel,
+) error {
+ msg := channeltypes.NewMsgChannelCloseInit(
+ channel.PortID, channel.ID,
+ chain.SenderAccount.GetAddress(),
+ )
+ return chain.sendMsgs(msg)
+}
+
+// GetPacketData returns a ibc-transfer marshalled packet to be used for
+// callback testing.
+func (chain *TestChain) GetPacketData(counterparty *TestChain) []byte {
+ packet := ibctransfertypes.FungibleTokenPacketData{
+ Denom: TestCoin.Denom,
+ Amount: TestCoin.Amount.Uint64(),
+ Sender: chain.SenderAccount.GetAddress().String(),
+ Receiver: counterparty.SenderAccount.GetAddress().String(),
+ }
+
+ return packet.GetBytes()
+}
+
+// SendPacket simulates sending a packet through the channel keeper. No message needs to be
+// passed since this call is made from a module.
+func (chain *TestChain) SendPacket(
+ packet exported.PacketI,
+) error {
+ channelCap := chain.GetChannelCapability(packet.GetSourcePort(), packet.GetSourceChannel())
+
+ // no need to send message, acting as a module
+ err := chain.App.IBCKeeper.ChannelKeeper.SendPacket(chain.GetContext(), channelCap, packet)
+ if err != nil {
+ return err
+ }
+
+ // commit changes
+ chain.App.Commit()
+ chain.NextBlock()
+
+ return nil
+}
+
+// WriteAcknowledgement simulates writing an acknowledgement to the chain.
+func (chain *TestChain) WriteAcknowledgement(
+ packet exported.PacketI,
+) error {
+ channelCap := chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel())
+
+ // no need to send message, acting as a handler
+ err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, TestHash)
+ if err != nil {
+ return err
+ }
+
+ // commit changes
+ chain.App.Commit()
+ chain.NextBlock()
+
+ return nil
+}
diff --git a/testing/chain_test.go b/testing/chain_test.go
new file mode 100644
index 00000000..361a9c4c
--- /dev/null
+++ b/testing/chain_test.go
@@ -0,0 +1,47 @@
+package ibctesting_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+func TestCreateSortedSignerArray(t *testing.T) {
+ privVal1 := mock.NewPV()
+ pubKey1, err := privVal1.GetPubKey()
+ require.NoError(t, err)
+
+ privVal2 := mock.NewPV()
+ pubKey2, err := privVal2.GetPubKey()
+ require.NoError(t, err)
+
+ validator1 := tmtypes.NewValidator(pubKey1, 1)
+ validator2 := tmtypes.NewValidator(pubKey2, 2)
+
+ expected := []tmtypes.PrivValidator{privVal2, privVal1}
+
+ actual := ibctesting.CreateSortedSignerArray(privVal1, privVal2, validator1, validator2)
+ require.Equal(t, expected, actual)
+
+ // swap order
+ actual = ibctesting.CreateSortedSignerArray(privVal2, privVal1, validator2, validator1)
+ require.Equal(t, expected, actual)
+
+ // smaller address
+ validator1.Address = []byte{1}
+ validator2.Address = []byte{2}
+ validator2.VotingPower = 1
+
+ expected = []tmtypes.PrivValidator{privVal1, privVal2}
+
+ actual = ibctesting.CreateSortedSignerArray(privVal1, privVal2, validator1, validator2)
+ require.Equal(t, expected, actual)
+
+ // swap order
+ actual = ibctesting.CreateSortedSignerArray(privVal2, privVal1, validator2, validator1)
+ require.Equal(t, expected, actual)
+}
diff --git a/testing/coordinator.go b/testing/coordinator.go
new file mode 100644
index 00000000..ade28b4d
--- /dev/null
+++ b/testing/coordinator.go
@@ -0,0 +1,700 @@
+package ibctesting
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+)
+
+var (
+ ChainIDPrefix = "testchain"
+ globalStartTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC)
+ TimeIncrement = time.Second * 5
+)
+
+// Coordinator is a testing struct which contains N TestChain's. It handles keeping all chains
+// in sync with regards to time.
+type Coordinator struct {
+ t *testing.T
+
+ Chains map[string]*TestChain
+}
+
+// NewCoordinator initializes Coordinator with N TestChain's
+func NewCoordinator(t *testing.T, n int) *Coordinator {
+ chains := make(map[string]*TestChain)
+
+ for i := 0; i < n; i++ {
+ chainID := GetChainID(i)
+ chains[chainID] = NewTestChain(t, chainID)
+ }
+ return &Coordinator{
+ t: t,
+ Chains: chains,
+ }
+}
+
+// Setup constructs a TM client, connection, and channel on both chains provided. It will
+// fail if any error occurs. The clientID's, TestConnections, and TestChannels are returned
+// for both chains. The channels created are connected to the ibc-transfer application.
+func (coord *Coordinator) Setup(
+ chainA, chainB *TestChain, order channeltypes.Order,
+) (string, string, *TestConnection, *TestConnection, TestChannel, TestChannel) {
+ clientA, clientB, connA, connB := coord.SetupClientConnections(chainA, chainB, exported.Tendermint)
+
+ // channels can also be referenced through the returned connections
+ channelA, channelB := coord.CreateMockChannels(chainA, chainB, connA, connB, order)
+
+ return clientA, clientB, connA, connB, channelA, channelB
+}
+
+// SetupClients is a helper function to create clients on both chains. It assumes the
+// caller does not anticipate any errors.
+func (coord *Coordinator) SetupClients(
+ chainA, chainB *TestChain,
+ clientType string,
+) (string, string) {
+
+ clientA, err := coord.CreateClient(chainA, chainB, clientType)
+ require.NoError(coord.t, err)
+
+ clientB, err := coord.CreateClient(chainB, chainA, clientType)
+ require.NoError(coord.t, err)
+
+ return clientA, clientB
+}
+
+// SetupClientConnections is a helper function to create clients and the appropriate
+// connections on both the source and counterparty chain. It assumes the caller does not
+// anticipate any errors.
+func (coord *Coordinator) SetupClientConnections(
+ chainA, chainB *TestChain,
+ clientType string,
+) (string, string, *TestConnection, *TestConnection) {
+
+ clientA, clientB := coord.SetupClients(chainA, chainB, clientType)
+
+ connA, connB := coord.CreateConnection(chainA, chainB, clientA, clientB)
+
+ return clientA, clientB, connA, connB
+}
+
+// CreateClient creates a counterparty client on the source chain and returns the clientID.
+func (coord *Coordinator) CreateClient(
+ source, counterparty *TestChain,
+ clientType string,
+) (clientID string, err error) {
+ coord.CommitBlock(source, counterparty)
+
+ clientID = source.NewClientID(clientType)
+
+ switch clientType {
+ case exported.Tendermint:
+ err = source.CreateTMClient(counterparty, clientID)
+
+ default:
+ err = fmt.Errorf("client type %s is not supported", clientType)
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ coord.IncrementTime()
+
+ return clientID, nil
+}
+
+// UpdateClient updates a counterparty client on the source chain.
+func (coord *Coordinator) UpdateClient(
+ source, counterparty *TestChain,
+ clientID string,
+ clientType string,
+) (err error) {
+ coord.CommitBlock(source, counterparty)
+
+ switch clientType {
+ case exported.Tendermint:
+ err = source.UpdateTMClient(counterparty, clientID)
+
+ default:
+ err = fmt.Errorf("client type %s is not supported", clientType)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ coord.IncrementTime()
+
+ return nil
+}
+
+// CreateConnection constructs and executes connection handshake messages in order to create
+// OPEN channels on chainA and chainB. The connection information of for chainA and chainB
+// are returned within a TestConnection struct. The function expects the connections to be
+// successfully opened otherwise testing will fail.
+func (coord *Coordinator) CreateConnection(
+ chainA, chainB *TestChain,
+ clientA, clientB string,
+) (*TestConnection, *TestConnection) {
+
+ connA, connB, err := coord.ConnOpenInit(chainA, chainB, clientA, clientB)
+ require.NoError(coord.t, err)
+
+ err = coord.ConnOpenTry(chainB, chainA, connB, connA)
+ require.NoError(coord.t, err)
+
+ err = coord.ConnOpenAck(chainA, chainB, connA, connB)
+ require.NoError(coord.t, err)
+
+ err = coord.ConnOpenConfirm(chainB, chainA, connB, connA)
+ require.NoError(coord.t, err)
+
+ return connA, connB
+}
+
+// CreateMockChannels constructs and executes channel handshake messages to create OPEN
+// channels that use a mock application module that returns nil on all callbacks. This
+// function is expects the channels to be successfully opened otherwise testing will
+// fail.
+func (coord *Coordinator) CreateMockChannels(
+ chainA, chainB *TestChain,
+ connA, connB *TestConnection,
+ order channeltypes.Order,
+) (TestChannel, TestChannel) {
+ return coord.CreateChannel(chainA, chainB, connA, connB, MockPort, MockPort, order)
+}
+
+// CreateTransferChannels constructs and executes channel handshake messages to create OPEN
+// ibc-transfer channels on chainA and chainB. The function expects the channels to be
+// successfully opened otherwise testing will fail.
+func (coord *Coordinator) CreateTransferChannels(
+ chainA, chainB *TestChain,
+ connA, connB *TestConnection,
+ order channeltypes.Order,
+) (TestChannel, TestChannel) {
+ return coord.CreateChannel(chainA, chainB, connA, connB, TransferPort, TransferPort, order)
+}
+
+// CreateChannel constructs and executes channel handshake messages in order to create
+// OPEN channels on chainA and chainB. The function expects the channels to be successfully
+// opened otherwise testing will fail.
+func (coord *Coordinator) CreateChannel(
+ chainA, chainB *TestChain,
+ connA, connB *TestConnection,
+ sourcePortID, counterpartyPortID string,
+ order channeltypes.Order,
+) (TestChannel, TestChannel) {
+
+ channelA, channelB, err := coord.ChanOpenInit(chainA, chainB, connA, connB, sourcePortID, counterpartyPortID, order)
+ require.NoError(coord.t, err)
+
+ err = coord.ChanOpenTry(chainB, chainA, channelB, channelA, connB, order)
+ require.NoError(coord.t, err)
+
+ err = coord.ChanOpenAck(chainA, chainB, channelA, channelB)
+ require.NoError(coord.t, err)
+
+ err = coord.ChanOpenConfirm(chainB, chainA, channelB, channelA)
+ require.NoError(coord.t, err)
+
+ return channelA, channelB
+}
+
+// SendPacket sends a packet through the channel keeper on the source chain and updates the
+// counterparty client for the source chain.
+func (coord *Coordinator) SendPacket(
+ source, counterparty *TestChain,
+ packet exported.PacketI,
+ counterpartyClientID string,
+) error {
+ if err := source.SendPacket(packet); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ counterpartyClientID, exported.Tendermint,
+ )
+}
+
+// RecvPacket receives a channel packet on the counterparty chain and updates
+// the client on the source chain representing the counterparty.
+func (coord *Coordinator) RecvPacket(
+ source, counterparty *TestChain,
+ sourceClient string,
+ packet channeltypes.Packet,
+) error {
+ // get proof of packet commitment on source
+ packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight := source.QueryProof(packetKey)
+
+ // Increment time and commit block so that 5 second delay period passes between send and receive
+ coord.IncrementTime()
+ coord.CommitBlock(source, counterparty)
+
+ recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, counterparty.SenderAccount.GetAddress())
+
+ // receive on counterparty and update source client
+ return coord.SendMsgs(counterparty, source, sourceClient, []sdk.Msg{recvMsg})
+}
+
+// WriteAcknowledgement writes an acknowledgement to the channel keeper on the source chain and updates the
+// counterparty client for the source chain.
+func (coord *Coordinator) WriteAcknowledgement(
+ source, counterparty *TestChain,
+ packet exported.PacketI,
+ counterpartyClientID string,
+) error {
+ if err := source.WriteAcknowledgement(packet); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ counterpartyClientID, exported.Tendermint,
+ )
+}
+
+// AcknowledgePacket acknowledges on the source chain the packet received on
+// the counterparty chain and updates the client on the counterparty representing
+// the source chain.
+// TODO: add a query for the acknowledgement by events
+// - https://github.com/cosmos/cosmos-sdk/issues/6509
+func (coord *Coordinator) AcknowledgePacket(
+ source, counterparty *TestChain,
+ counterpartyClient string,
+ packet channeltypes.Packet, ack []byte,
+) error {
+ // get proof of acknowledgement on counterparty
+ packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight := counterparty.QueryProof(packetKey)
+
+ // Increment time and commit block so that 5 second delay period passes between send and receive
+ coord.IncrementTime()
+ coord.CommitBlock(source, counterparty)
+
+ ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, source.SenderAccount.GetAddress())
+ return coord.SendMsgs(source, counterparty, counterpartyClient, []sdk.Msg{ackMsg})
+}
+
+// RelayPacket receives a channel packet on counterparty, queries the ack
+// and acknowledges the packet on source. The clients are updated as needed.
+func (coord *Coordinator) RelayPacket(
+ source, counterparty *TestChain,
+ sourceClient, counterpartyClient string,
+ packet channeltypes.Packet, ack []byte,
+) error {
+ // Increment time and commit block so that 5 second delay period passes between send and receive
+ coord.IncrementTime()
+ coord.CommitBlock(counterparty)
+
+ if err := coord.RecvPacket(source, counterparty, sourceClient, packet); err != nil {
+ return err
+ }
+
+ // Increment time and commit block so that 5 second delay period passes between send and receive
+ coord.IncrementTime()
+ coord.CommitBlock(source)
+
+ return coord.AcknowledgePacket(source, counterparty, counterpartyClient, packet, ack)
+}
+
+// IncrementTime iterates through all the TestChain's and increments their current header time
+// by 5 seconds.
+//
+// CONTRACT: this function must be called after every commit on any TestChain.
+func (coord *Coordinator) IncrementTime() {
+ for _, chain := range coord.Chains {
+ chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(TimeIncrement)
+ chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
+ }
+}
+
+// IncrementTimeBy iterates through all the TestChain's and increments their current header time
+// by specified time.
+func (coord *Coordinator) IncrementTimeBy(increment time.Duration) {
+ for _, chain := range coord.Chains {
+ chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(increment)
+ chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
+ }
+}
+
+// SendMsg delivers a single provided message to the chain. The counterparty
+// client is update with the new source consensus state.
+func (coord *Coordinator) SendMsg(source, counterparty *TestChain, counterpartyClientID string, msg sdk.Msg) error {
+ return coord.SendMsgs(source, counterparty, counterpartyClientID, []sdk.Msg{msg})
+}
+
+// SendMsgs delivers the provided messages to the chain. The counterparty
+// client is updated with the new source consensus state.
+func (coord *Coordinator) SendMsgs(source, counterparty *TestChain, counterpartyClientID string, msgs []sdk.Msg) error {
+ if err := source.sendMsgs(msgs...); err != nil {
+ return err
+ }
+
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ counterpartyClientID, exported.Tendermint,
+ )
+}
+
+// GetChain returns the TestChain using the given chainID and returns an error if it does
+// not exist.
+func (coord *Coordinator) GetChain(chainID string) *TestChain {
+ chain, found := coord.Chains[chainID]
+ require.True(coord.t, found, fmt.Sprintf("%s chain does not exist", chainID))
+ return chain
+}
+
+// GetChainID returns the chainID used for the provided index.
+func GetChainID(index int) string {
+ return ChainIDPrefix + strconv.Itoa(index)
+}
+
+// CommitBlock commits a block on the provided indexes and then increments the global time.
+//
+// CONTRACT: the passed in list of indexes must not contain duplicates
+func (coord *Coordinator) CommitBlock(chains ...*TestChain) {
+ for _, chain := range chains {
+ chain.App.Commit()
+ chain.NextBlock()
+ }
+ coord.IncrementTime()
+}
+
+// CommitNBlocks commits n blocks to state and updates the block height by 1 for each commit.
+func (coord *Coordinator) CommitNBlocks(chain *TestChain, n uint64) {
+ for i := uint64(0); i < n; i++ {
+ chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
+ chain.App.Commit()
+ chain.NextBlock()
+ coord.IncrementTime()
+ }
+}
+
+// ConnOpenInit initializes a connection on the source chain with the state INIT
+// using the OpenInit handshake call.
+//
+// NOTE: The counterparty testing connection will be created even if it is not created in the
+// application state.
+func (coord *Coordinator) ConnOpenInit(
+ source, counterparty *TestChain,
+ clientID, counterpartyClientID string,
+) (*TestConnection, *TestConnection, error) {
+ sourceConnection := source.AddTestConnection(clientID, counterpartyClientID)
+ counterpartyConnection := counterparty.AddTestConnection(counterpartyClientID, clientID)
+
+ // initialize connection on source
+ if err := source.ConnectionOpenInit(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ return sourceConnection, counterpartyConnection, err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ if err := coord.UpdateClient(
+ counterparty, source,
+ counterpartyClientID, exported.Tendermint,
+ ); err != nil {
+ return sourceConnection, counterpartyConnection, err
+ }
+
+ return sourceConnection, counterpartyConnection, nil
+}
+
+// ConnOpenInitOnBothChains initializes a connection on the source chain with the state INIT
+// using the OpenInit handshake call.
+func (coord *Coordinator) ConnOpenInitOnBothChains(
+ source, counterparty *TestChain,
+ clientID, counterpartyClientID string,
+) (*TestConnection, *TestConnection, error) {
+ sourceConnection := source.AddTestConnection(clientID, counterpartyClientID)
+ counterpartyConnection := counterparty.AddTestConnection(counterpartyClientID, clientID)
+
+ // initialize connection on source
+ if err := source.ConnectionOpenInit(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ return sourceConnection, counterpartyConnection, err
+ }
+ coord.IncrementTime()
+
+ // initialize connection on counterparty
+ if err := counterparty.ConnectionOpenInit(source, counterpartyConnection, sourceConnection); err != nil {
+ return sourceConnection, counterpartyConnection, err
+ }
+ coord.IncrementTime()
+
+ // update counterparty client on source connection
+ if err := coord.UpdateClient(
+ source, counterparty,
+ clientID, exported.Tendermint,
+ ); err != nil {
+ return sourceConnection, counterpartyConnection, err
+ }
+
+ // update source client on counterparty connection
+ if err := coord.UpdateClient(
+ counterparty, source,
+ counterpartyClientID, exported.Tendermint,
+ ); err != nil {
+ return sourceConnection, counterpartyConnection, err
+ }
+
+ return sourceConnection, counterpartyConnection, nil
+}
+
+// ConnOpenTry initializes a connection on the source chain with the state TRYOPEN
+// using the OpenTry handshake call.
+func (coord *Coordinator) ConnOpenTry(
+ source, counterparty *TestChain,
+ sourceConnection, counterpartyConnection *TestConnection,
+) error {
+ // initialize TRYOPEN connection on source
+ if err := source.ConnectionOpenTry(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ counterpartyConnection.ClientID, exported.Tendermint,
+ )
+}
+
+// ConnOpenAck initializes a connection on the source chain with the state OPEN
+// using the OpenAck handshake call.
+func (coord *Coordinator) ConnOpenAck(
+ source, counterparty *TestChain,
+ sourceConnection, counterpartyConnection *TestConnection,
+) error {
+ // set OPEN connection on source using OpenAck
+ if err := source.ConnectionOpenAck(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ counterpartyConnection.ClientID, exported.Tendermint,
+ )
+}
+
+// ConnOpenConfirm initializes a connection on the source chain with the state OPEN
+// using the OpenConfirm handshake call.
+func (coord *Coordinator) ConnOpenConfirm(
+ source, counterparty *TestChain,
+ sourceConnection, counterpartyConnection *TestConnection,
+) error {
+ if err := source.ConnectionOpenConfirm(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ counterpartyConnection.ClientID, exported.Tendermint,
+ )
+}
+
+// ChanOpenInit initializes a channel on the source chain with the state INIT
+// using the OpenInit handshake call.
+//
+// NOTE: The counterparty testing channel will be created even if it is not created in the
+// application state.
+func (coord *Coordinator) ChanOpenInit(
+ source, counterparty *TestChain,
+ connection, counterpartyConnection *TestConnection,
+ sourcePortID, counterpartyPortID string,
+ order channeltypes.Order,
+) (TestChannel, TestChannel, error) {
+ sourceChannel := source.AddTestChannel(connection, sourcePortID)
+ counterpartyChannel := counterparty.AddTestChannel(counterpartyConnection, counterpartyPortID)
+
+ // NOTE: only creation of a capability for a transfer or mock port is supported
+ // Other applications must bind to the port in InitGenesis or modify this code.
+ source.CreatePortCapability(sourceChannel.PortID)
+ coord.IncrementTime()
+
+ // initialize channel on source
+ if err := source.ChanOpenInit(sourceChannel, counterpartyChannel, order, connection.ID); err != nil {
+ return sourceChannel, counterpartyChannel, err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ if err := coord.UpdateClient(
+ counterparty, source,
+ counterpartyConnection.ClientID, exported.Tendermint,
+ ); err != nil {
+ return sourceChannel, counterpartyChannel, err
+ }
+
+ return sourceChannel, counterpartyChannel, nil
+}
+
+// ChanOpenInitOnBothChains initializes a channel on the source chain and counterparty chain
+// with the state INIT using the OpenInit handshake call.
+func (coord *Coordinator) ChanOpenInitOnBothChains(
+ source, counterparty *TestChain,
+ connection, counterpartyConnection *TestConnection,
+ sourcePortID, counterpartyPortID string,
+ order channeltypes.Order,
+) (TestChannel, TestChannel, error) {
+ sourceChannel := source.AddTestChannel(connection, sourcePortID)
+ counterpartyChannel := counterparty.AddTestChannel(counterpartyConnection, counterpartyPortID)
+
+ // NOTE: only creation of a capability for a transfer or mock port is supported
+ // Other applications must bind to the port in InitGenesis or modify this code.
+ source.CreatePortCapability(sourceChannel.PortID)
+ counterparty.CreatePortCapability(counterpartyChannel.PortID)
+ coord.IncrementTime()
+
+ // initialize channel on source
+ if err := source.ChanOpenInit(sourceChannel, counterpartyChannel, order, connection.ID); err != nil {
+ return sourceChannel, counterpartyChannel, err
+ }
+ coord.IncrementTime()
+
+ // initialize channel on counterparty
+ if err := counterparty.ChanOpenInit(counterpartyChannel, sourceChannel, order, counterpartyConnection.ID); err != nil {
+ return sourceChannel, counterpartyChannel, err
+ }
+ coord.IncrementTime()
+
+ // update counterparty client on source connection
+ if err := coord.UpdateClient(
+ source, counterparty,
+ connection.ClientID, exported.Tendermint,
+ ); err != nil {
+ return sourceChannel, counterpartyChannel, err
+ }
+
+ // update source client on counterparty connection
+ if err := coord.UpdateClient(
+ counterparty, source,
+ counterpartyConnection.ClientID, exported.Tendermint,
+ ); err != nil {
+ return sourceChannel, counterpartyChannel, err
+ }
+
+ return sourceChannel, counterpartyChannel, nil
+}
+
+// ChanOpenTry initializes a channel on the source chain with the state TRYOPEN
+// using the OpenTry handshake call.
+func (coord *Coordinator) ChanOpenTry(
+ source, counterparty *TestChain,
+ sourceChannel, counterpartyChannel TestChannel,
+ connection *TestConnection,
+ order channeltypes.Order,
+) error {
+
+ // initialize channel on source
+ if err := source.ChanOpenTry(counterparty, sourceChannel, counterpartyChannel, order, connection.ID); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ connection.CounterpartyClientID, exported.Tendermint,
+ )
+}
+
+// ChanOpenAck initializes a channel on the source chain with the state OPEN
+// using the OpenAck handshake call.
+func (coord *Coordinator) ChanOpenAck(
+ source, counterparty *TestChain,
+ sourceChannel, counterpartyChannel TestChannel,
+) error {
+
+ if err := source.ChanOpenAck(counterparty, sourceChannel, counterpartyChannel); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ sourceChannel.CounterpartyClientID, exported.Tendermint,
+ )
+}
+
+// ChanOpenConfirm initializes a channel on the source chain with the state OPEN
+// using the OpenConfirm handshake call.
+func (coord *Coordinator) ChanOpenConfirm(
+ source, counterparty *TestChain,
+ sourceChannel, counterpartyChannel TestChannel,
+) error {
+
+ if err := source.ChanOpenConfirm(counterparty, sourceChannel, counterpartyChannel); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ sourceChannel.CounterpartyClientID, exported.Tendermint,
+ )
+}
+
+// ChanCloseInit closes a channel on the source chain resulting in the channels state
+// being set to CLOSED.
+//
+// NOTE: does not work with ibc-transfer module
+func (coord *Coordinator) ChanCloseInit(
+ source, counterparty *TestChain,
+ channel TestChannel,
+) error {
+
+ if err := source.ChanCloseInit(counterparty, channel); err != nil {
+ return err
+ }
+ coord.IncrementTime()
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ channel.CounterpartyClientID, exported.Tendermint,
+ )
+}
+
+// SetChannelClosed sets a channel state to CLOSED.
+func (coord *Coordinator) SetChannelClosed(
+ source, counterparty *TestChain,
+ testChannel TestChannel,
+) error {
+ channel := source.GetChannel(testChannel)
+
+ channel.State = channeltypes.CLOSED
+ source.App.IBCKeeper.ChannelKeeper.SetChannel(source.GetContext(), testChannel.PortID, testChannel.ID, channel)
+
+ coord.CommitBlock(source)
+
+ // update source client on counterparty connection
+ return coord.UpdateClient(
+ counterparty, source,
+ testChannel.CounterpartyClientID, exported.Tendermint,
+ )
+}
diff --git a/testing/mock/README.md b/testing/mock/README.md
new file mode 100644
index 00000000..5da403f9
--- /dev/null
+++ b/testing/mock/README.md
@@ -0,0 +1,6 @@
+This package is only intended to be used for testing core IBC. In order to maintain secure
+testing, we need to do message passing and execution which requires connecting an IBC application
+module that fulfills all the callbacks. We cannot connect to ibc-transfer which does not support
+all channel types so instead we create a mock application module which does nothing. It simply
+return nil in all cases so no error ever occurs. It is intended to be as minimal and lightweight
+as possible and should never import simapp.
diff --git a/testing/mock/doc.go b/testing/mock/doc.go
new file mode 100644
index 00000000..eaaa42b2
--- /dev/null
+++ b/testing/mock/doc.go
@@ -0,0 +1,9 @@
+/*
+This package is only intended to be used for testing core IBC. In order to maintain secure
+testing, we need to do message passing and execution which requires connecting an IBC application
+module that fulfills all the callbacks. We cannot connect to ibc-transfer which does not support
+all channel types so instead we create a mock application module which does nothing. It simply
+return nil in all cases so no error ever occurs. It is intended to be as minimal and lightweight
+as possible and should never import simapp.
+*/
+package mock
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
new file mode 100644
index 00000000..663497aa
--- /dev/null
+++ b/testing/mock/mock.go
@@ -0,0 +1,188 @@
+package mock
+
+import (
+ "encoding/json"
+
+ "github.com/cosmos/cosmos-sdk/types/module"
+
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+
+ "github.com/gorilla/mux"
+ "github.com/spf13/cobra"
+
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+)
+
+const (
+ ModuleName = "mock"
+)
+
+var (
+ MockAcknowledgement = []byte("mock acknowledgement")
+ MockCommitment = []byte("mock packet commitment")
+)
+
+// AppModuleBasic is the mock AppModuleBasic.
+type AppModuleBasic struct{}
+
+// Name implements AppModuleBasic interface.
+func (AppModuleBasic) Name() string {
+ return ModuleName
+}
+
+// RegisterLegacyAminoCodec implements AppModuleBasic interface.
+func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {}
+
+// RegisterInterfaces implements AppModuleBasic interface.
+func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) {}
+
+// DefaultGenesis implements AppModuleBasic interface.
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage {
+ return nil
+}
+
+// ValidateGenesis implements the AppModuleBasic interface.
+func (AppModuleBasic) ValidateGenesis(codec.JSONMarshaler, client.TxEncodingConfig, json.RawMessage) error {
+ return nil
+}
+
+// RegisterRESTRoutes implements AppModuleBasic interface.
+func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) {}
+
+// RegisterGRPCGatewayRoutes implements AppModuleBasic interface.
+func (a AppModuleBasic) RegisterGRPCGatewayRoutes(_ client.Context, _ *runtime.ServeMux) {}
+
+// GetTxCmd implements AppModuleBasic interface.
+func (AppModuleBasic) GetTxCmd() *cobra.Command {
+ return nil
+}
+
+// GetQueryCmd implements AppModuleBasic interface.
+func (AppModuleBasic) GetQueryCmd() *cobra.Command {
+ return nil
+}
+
+// AppModule represents the AppModule for the mock module.
+type AppModule struct {
+ AppModuleBasic
+ scopedKeeper capabilitykeeper.ScopedKeeper
+}
+
+// NewAppModule returns a mock AppModule instance.
+func NewAppModule(sk capabilitykeeper.ScopedKeeper) AppModule {
+ return AppModule{
+ scopedKeeper: sk,
+ }
+}
+
+// RegisterInvariants implements the AppModule interface.
+func (AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {}
+
+// Route implements the AppModule interface.
+func (am AppModule) Route() sdk.Route {
+ return sdk.NewRoute(ModuleName, nil)
+}
+
+// QuerierRoute implements the AppModule interface.
+func (AppModule) QuerierRoute() string {
+ return ""
+}
+
+// LegacyQuerierHandler implements the AppModule interface.
+func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier {
+ return nil
+}
+
+// RegisterServices implements the AppModule interface.
+func (am AppModule) RegisterServices(module.Configurator) {}
+
+// InitGenesis implements the AppModule interface.
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate {
+ return []abci.ValidatorUpdate{}
+}
+
+// ExportGenesis implements the AppModule interface.
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage {
+ return nil
+}
+
+// BeginBlock implements the AppModule interface
+func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
+}
+
+// EndBlock implements the AppModule interface
+func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate {
+ return []abci.ValidatorUpdate{}
+}
+
+//____________________________________________________________________________
+
+// OnChanOpenInit implements the IBCModule interface.
+func (am AppModule) OnChanOpenInit(
+ ctx sdk.Context, _ channeltypes.Order, _ []string, portID string,
+ channelID string, chanCap *capabilitytypes.Capability, _ channeltypes.Counterparty, _ string,
+) error {
+ // Claim channel capability passed back by IBC module
+ if err := am.scopedKeeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// OnChanOpenTry implements the IBCModule interface.
+func (am AppModule) OnChanOpenTry(
+ ctx sdk.Context, _ channeltypes.Order, _ []string, portID string,
+ channelID string, chanCap *capabilitytypes.Capability, _ channeltypes.Counterparty, _, _ string,
+) error {
+ // Claim channel capability passed back by IBC module
+ if err := am.scopedKeeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// OnChanOpenAck implements the IBCModule interface.
+func (am AppModule) OnChanOpenAck(sdk.Context, string, string, string) error {
+ return nil
+}
+
+// OnChanOpenConfirm implements the IBCModule interface.
+func (am AppModule) OnChanOpenConfirm(sdk.Context, string, string) error {
+ return nil
+}
+
+// OnChanCloseInit implements the IBCModule interface.
+func (am AppModule) OnChanCloseInit(sdk.Context, string, string) error {
+ return nil
+}
+
+// OnChanCloseConfirm implements the IBCModule interface.
+func (am AppModule) OnChanCloseConfirm(sdk.Context, string, string) error {
+ return nil
+}
+
+// OnRecvPacket implements the IBCModule interface.
+func (am AppModule) OnRecvPacket(sdk.Context, channeltypes.Packet) (*sdk.Result, []byte, error) {
+ return nil, MockAcknowledgement, nil
+}
+
+// OnAcknowledgementPacket implements the IBCModule interface.
+func (am AppModule) OnAcknowledgementPacket(sdk.Context, channeltypes.Packet, []byte) (*sdk.Result, error) {
+ return nil, nil
+}
+
+// OnTimeoutPacket implements the IBCModule interface.
+func (am AppModule) OnTimeoutPacket(sdk.Context, channeltypes.Packet) (*sdk.Result, error) {
+ return nil, nil
+}
diff --git a/testing/mock/privval.go b/testing/mock/privval.go
new file mode 100644
index 00000000..fe46659b
--- /dev/null
+++ b/testing/mock/privval.go
@@ -0,0 +1,50 @@
+package mock
+
+import (
+ "github.com/tendermint/tendermint/crypto"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+)
+
+var _ tmtypes.PrivValidator = PV{}
+
+// MockPV implements PrivValidator without any safety or persistence.
+// Only use it for testing.
+type PV struct {
+ PrivKey cryptotypes.PrivKey
+}
+
+func NewPV() PV {
+ return PV{ed25519.GenPrivKey()}
+}
+
+// GetPubKey implements PrivValidator interface
+func (pv PV) GetPubKey() (crypto.PubKey, error) {
+ return cryptocodec.ToTmPubKeyInterface(pv.PrivKey.PubKey())
+}
+
+// SignVote implements PrivValidator interface
+func (pv PV) SignVote(chainID string, vote *tmproto.Vote) error {
+ signBytes := tmtypes.VoteSignBytes(chainID, vote)
+ sig, err := pv.PrivKey.Sign(signBytes)
+ if err != nil {
+ return err
+ }
+ vote.Signature = sig
+ return nil
+}
+
+// SignProposal implements PrivValidator interface
+func (pv PV) SignProposal(chainID string, proposal *tmproto.Proposal) error {
+ signBytes := tmtypes.ProposalSignBytes(chainID, proposal)
+ sig, err := pv.PrivKey.Sign(signBytes)
+ if err != nil {
+ return err
+ }
+ proposal.Signature = sig
+ return nil
+}
diff --git a/testing/mock/privval_test.go b/testing/mock/privval_test.go
new file mode 100644
index 00000000..b9f0487a
--- /dev/null
+++ b/testing/mock/privval_test.go
@@ -0,0 +1,44 @@
+package mock_test
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+)
+
+const chainID = "testChain"
+
+func TestGetPubKey(t *testing.T) {
+ pv := mock.NewPV()
+ pk, err := pv.GetPubKey()
+ require.NoError(t, err)
+ require.Equal(t, "ed25519", pk.Type())
+}
+
+func TestSignVote(t *testing.T) {
+ pv := mock.NewPV()
+ pk, _ := pv.GetPubKey()
+
+ vote := &tmproto.Vote{Height: 2}
+ pv.SignVote(chainID, vote)
+
+ msg := tmtypes.VoteSignBytes(chainID, vote)
+ ok := pk.VerifySignature(msg, vote.Signature)
+ require.True(t, ok)
+}
+
+func TestSignProposal(t *testing.T) {
+ pv := mock.NewPV()
+ pk, _ := pv.GetPubKey()
+
+ proposal := &tmproto.Proposal{Round: 2}
+ pv.SignProposal(chainID, proposal)
+
+ msg := tmtypes.ProposalSignBytes(chainID, proposal)
+ ok := pk.VerifySignature(msg, proposal.Signature)
+ require.True(t, ok)
+}
diff --git a/testing/solomachine.go b/testing/solomachine.go
new file mode 100644
index 00000000..bee63785
--- /dev/null
+++ b/testing/solomachine.go
@@ -0,0 +1,321 @@
+package ibctesting
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/crypto/types/multisig"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+)
+
+var prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
+
+// Solomachine is a testing helper used to simulate a counterparty
+// solo machine client.
+type Solomachine struct {
+ t *testing.T
+
+ cdc codec.BinaryMarshaler
+ ClientID string
+ PrivateKeys []cryptotypes.PrivKey // keys used for signing
+ PublicKeys []cryptotypes.PubKey // keys used for generating solo machine pub key
+ PublicKey cryptotypes.PubKey // key used for verification
+ Sequence uint64
+ Time uint64
+ Diversifier string
+}
+
+// NewSolomachine returns a new solomachine instance with an `nKeys` amount of
+// generated private/public key pairs and a sequence starting at 1. If nKeys
+// is greater than 1 then a multisig public key is used.
+func NewSolomachine(t *testing.T, cdc codec.BinaryMarshaler, clientID, diversifier string, nKeys uint64) *Solomachine {
+ privKeys, pubKeys, pk := GenerateKeys(t, nKeys)
+
+ return &Solomachine{
+ t: t,
+ cdc: cdc,
+ ClientID: clientID,
+ PrivateKeys: privKeys,
+ PublicKeys: pubKeys,
+ PublicKey: pk,
+ Sequence: 1,
+ Time: 10,
+ Diversifier: diversifier,
+ }
+}
+
+// GenerateKeys generates a new set of secp256k1 private keys and public keys.
+// If the number of keys is greater than one then the public key returned represents
+// a multisig public key. The private keys are used for signing, the public
+// keys are used for generating the public key and the public key is used for
+// solo machine verification. The usage of secp256k1 is entirely arbitrary.
+// The key type can be swapped for any key type supported by the PublicKey
+// interface, if needed. The same is true for the amino based Multisignature
+// public key.
+func GenerateKeys(t *testing.T, n uint64) ([]cryptotypes.PrivKey, []cryptotypes.PubKey, cryptotypes.PubKey) {
+ require.NotEqual(t, uint64(0), n, "generation of zero keys is not allowed")
+
+ privKeys := make([]cryptotypes.PrivKey, n)
+ pubKeys := make([]cryptotypes.PubKey, n)
+ for i := uint64(0); i < n; i++ {
+ privKeys[i] = secp256k1.GenPrivKey()
+ pubKeys[i] = privKeys[i].PubKey()
+ }
+
+ var pk cryptotypes.PubKey
+ if len(privKeys) > 1 {
+ // generate multi sig pk
+ pk = kmultisig.NewLegacyAminoPubKey(int(n), pubKeys)
+ } else {
+ pk = privKeys[0].PubKey()
+ }
+
+ return privKeys, pubKeys, pk
+}
+
+// ClientState returns a new solo machine ClientState instance. Default usage does not allow update
+// after governance proposal
+func (solo *Solomachine) ClientState() *solomachinetypes.ClientState {
+ return solomachinetypes.NewClientState(solo.Sequence, solo.ConsensusState(), false)
+}
+
+// ConsensusState returns a new solo machine ConsensusState instance
+func (solo *Solomachine) ConsensusState() *solomachinetypes.ConsensusState {
+ publicKey, err := codectypes.NewAnyWithValue(solo.PublicKey)
+ require.NoError(solo.t, err)
+
+ return &solomachinetypes.ConsensusState{
+ PublicKey: publicKey,
+ Diversifier: solo.Diversifier,
+ Timestamp: solo.Time,
+ }
+}
+
+// GetHeight returns an exported.Height with Sequence as RevisionHeight
+func (solo *Solomachine) GetHeight() exported.Height {
+ return clienttypes.NewHeight(0, solo.Sequence)
+}
+
+// CreateHeader generates a new private/public key pair and creates the
+// necessary signature to construct a valid solo machine header.
+func (solo *Solomachine) CreateHeader() *solomachinetypes.Header {
+ // generate new private keys and signature for header
+ newPrivKeys, newPubKeys, newPubKey := GenerateKeys(solo.t, uint64(len(solo.PrivateKeys)))
+
+ publicKey, err := codectypes.NewAnyWithValue(newPubKey)
+ require.NoError(solo.t, err)
+
+ data := &solomachinetypes.HeaderData{
+ NewPubKey: publicKey,
+ NewDiversifier: solo.Diversifier,
+ }
+
+ dataBz, err := solo.cdc.MarshalBinaryBare(data)
+ require.NoError(solo.t, err)
+
+ signBytes := &solomachinetypes.SignBytes{
+ Sequence: solo.Sequence,
+ Timestamp: solo.Time,
+ Diversifier: solo.Diversifier,
+ DataType: solomachinetypes.HEADER,
+ Data: dataBz,
+ }
+
+ bz, err := solo.cdc.MarshalBinaryBare(signBytes)
+ require.NoError(solo.t, err)
+
+ sig := solo.GenerateSignature(bz)
+
+ header := &solomachinetypes.Header{
+ Sequence: solo.Sequence,
+ Timestamp: solo.Time,
+ Signature: sig,
+ NewPublicKey: publicKey,
+ NewDiversifier: solo.Diversifier,
+ }
+
+ // assumes successful header update
+ solo.Sequence++
+ solo.PrivateKeys = newPrivKeys
+ solo.PublicKeys = newPubKeys
+ solo.PublicKey = newPubKey
+
+ return header
+}
+
+// CreateMisbehaviour constructs testing misbehaviour for the solo machine client
+// by signing over two different data bytes at the same sequence.
+func (solo *Solomachine) CreateMisbehaviour() *solomachinetypes.Misbehaviour {
+ path := solo.GetClientStatePath("counterparty")
+ dataOne, err := solomachinetypes.ClientStateDataBytes(solo.cdc, path, solo.ClientState())
+ require.NoError(solo.t, err)
+
+ path = solo.GetConsensusStatePath("counterparty", clienttypes.NewHeight(0, 1))
+ dataTwo, err := solomachinetypes.ConsensusStateDataBytes(solo.cdc, path, solo.ConsensusState())
+ require.NoError(solo.t, err)
+
+ signBytes := &solomachinetypes.SignBytes{
+ Sequence: solo.Sequence,
+ Timestamp: solo.Time,
+ Diversifier: solo.Diversifier,
+ DataType: solomachinetypes.CLIENT,
+ Data: dataOne,
+ }
+
+ bz, err := solo.cdc.MarshalBinaryBare(signBytes)
+ require.NoError(solo.t, err)
+
+ sig := solo.GenerateSignature(bz)
+ signatureOne := solomachinetypes.SignatureAndData{
+ Signature: sig,
+ DataType: solomachinetypes.CLIENT,
+ Data: dataOne,
+ Timestamp: solo.Time,
+ }
+
+ // misbehaviour signaturess can have different timestamps
+ solo.Time++
+
+ signBytes = &solomachinetypes.SignBytes{
+ Sequence: solo.Sequence,
+ Timestamp: solo.Time,
+ Diversifier: solo.Diversifier,
+ DataType: solomachinetypes.CONSENSUS,
+ Data: dataTwo,
+ }
+
+ bz, err = solo.cdc.MarshalBinaryBare(signBytes)
+ require.NoError(solo.t, err)
+
+ sig = solo.GenerateSignature(bz)
+ signatureTwo := solomachinetypes.SignatureAndData{
+ Signature: sig,
+ DataType: solomachinetypes.CONSENSUS,
+ Data: dataTwo,
+ Timestamp: solo.Time,
+ }
+
+ return &solomachinetypes.Misbehaviour{
+ ClientId: solo.ClientID,
+ Sequence: solo.Sequence,
+ SignatureOne: &signatureOne,
+ SignatureTwo: &signatureTwo,
+ }
+}
+
+// GenerateSignature uses the stored private keys to generate a signature
+// over the sign bytes with each key. If the amount of keys is greater than
+// 1 then a multisig data type is returned.
+func (solo *Solomachine) GenerateSignature(signBytes []byte) []byte {
+ sigs := make([]signing.SignatureData, len(solo.PrivateKeys))
+ for i, key := range solo.PrivateKeys {
+ sig, err := key.Sign(signBytes)
+ require.NoError(solo.t, err)
+
+ sigs[i] = &signing.SingleSignatureData{
+ Signature: sig,
+ }
+ }
+
+ var sigData signing.SignatureData
+ if len(sigs) == 1 {
+ // single public key
+ sigData = sigs[0]
+ } else {
+ // generate multi signature data
+ multiSigData := multisig.NewMultisig(len(sigs))
+ for i, sig := range sigs {
+ multisig.AddSignature(multiSigData, sig, i)
+ }
+
+ sigData = multiSigData
+ }
+
+ protoSigData := signing.SignatureDataToProto(sigData)
+ bz, err := solo.cdc.MarshalBinaryBare(protoSigData)
+ require.NoError(solo.t, err)
+
+ return bz
+}
+
+// GetClientStatePath returns the commitment path for the client state.
+func (solo *Solomachine) GetClientStatePath(counterpartyClientIdentifier string) commitmenttypes.MerklePath {
+ path, err := commitmenttypes.ApplyPrefix(prefix, commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier)))
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetConsensusStatePath returns the commitment path for the consensus state.
+func (solo *Solomachine) GetConsensusStatePath(counterpartyClientIdentifier string, consensusHeight exported.Height) commitmenttypes.MerklePath {
+ path, err := commitmenttypes.ApplyPrefix(prefix, commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight)))
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetConnectionStatePath returns the commitment path for the connection state.
+func (solo *Solomachine) GetConnectionStatePath(connID string) commitmenttypes.MerklePath {
+ connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath)
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetChannelStatePath returns the commitment path for that channel state.
+func (solo *Solomachine) GetChannelStatePath(portID, channelID string) commitmenttypes.MerklePath {
+ channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, channelPath)
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetPacketCommitmentPath returns the commitment path for a packet commitment.
+func (solo *Solomachine) GetPacketCommitmentPath(portID, channelID string) commitmenttypes.MerklePath {
+ commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, solo.Sequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath)
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetPacketAcknowledgementPath returns the commitment path for a packet acknowledgement.
+func (solo *Solomachine) GetPacketAcknowledgementPath(portID, channelID string) commitmenttypes.MerklePath {
+ ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, solo.Sequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, ackPath)
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetPacketReceiptPath returns the commitment path for a packet receipt
+// and an absent receipts.
+func (solo *Solomachine) GetPacketReceiptPath(portID, channelID string) commitmenttypes.MerklePath {
+ receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, solo.Sequence))
+ path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath)
+ require.NoError(solo.t, err)
+
+ return path
+}
+
+// GetNextSequenceRecvPath returns the commitment path for the next sequence recv counter.
+func (solo *Solomachine) GetNextSequenceRecvPath(portID, channelID string) commitmenttypes.MerklePath {
+ nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID))
+ path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath)
+ require.NoError(solo.t, err)
+
+ return path
+}
diff --git a/testing/types.go b/testing/types.go
new file mode 100644
index 00000000..16cda621
--- /dev/null
+++ b/testing/types.go
@@ -0,0 +1,44 @@
+package ibctesting
+
+import (
+ channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+)
+
+// TestConnection is a testing helper struct to keep track of the connectionID, source clientID,
+// counterparty clientID, and the next channel version used in creating and interacting with a
+// connection.
+type TestConnection struct {
+ ID string
+ ClientID string
+ CounterpartyClientID string
+ NextChannelVersion string
+ Channels []TestChannel
+}
+
+// FirstOrNextTestChannel returns the first test channel if it exists, otherwise it
+// returns the next test channel to be created. This function is expected to be used
+// when the caller does not know if the channel has or has not been created in app
+// state, but would still like to refer to it to test existence or non-existence.
+func (conn *TestConnection) FirstOrNextTestChannel(portID string) TestChannel {
+ if len(conn.Channels) > 0 {
+ return conn.Channels[0]
+ }
+ return TestChannel{
+ PortID: portID,
+ ID: channeltypes.FormatChannelIdentifier(0),
+ ClientID: conn.ClientID,
+ CounterpartyClientID: conn.CounterpartyClientID,
+ Version: conn.NextChannelVersion,
+ }
+}
+
+// TestChannel is a testing helper struct to keep track of the portID and channelID
+// used in creating and interacting with a channel. The clientID and counterparty
+// client ID are also tracked to cut down on querying and argument passing.
+type TestChannel struct {
+ PortID string
+ ID string
+ ClientID string
+ CounterpartyClientID string
+ Version string
+}
diff --git a/third_party/proto/confio/proofs.proto b/third_party/proto/confio/proofs.proto
new file mode 100644
index 00000000..da43503e
--- /dev/null
+++ b/third_party/proto/confio/proofs.proto
@@ -0,0 +1,234 @@
+syntax = "proto3";
+
+package ics23;
+option go_package = "github.com/confio/ics23/go";
+
+enum HashOp {
+ // NO_HASH is the default if no data passed. Note this is an illegal argument some places.
+ NO_HASH = 0;
+ SHA256 = 1;
+ SHA512 = 2;
+ KECCAK = 3;
+ RIPEMD160 = 4;
+ BITCOIN = 5; // ripemd160(sha256(x))
+}
+
+/**
+LengthOp defines how to process the key and value of the LeafOp
+to include length information. After encoding the length with the given
+algorithm, the length will be prepended to the key and value bytes.
+(Each one with it's own encoded length)
+*/
+enum LengthOp {
+ // NO_PREFIX don't include any length info
+ NO_PREFIX = 0;
+ // VAR_PROTO uses protobuf (and go-amino) varint encoding of the length
+ VAR_PROTO = 1;
+ // VAR_RLP uses rlp int encoding of the length
+ VAR_RLP = 2;
+ // FIXED32_BIG uses big-endian encoding of the length as a 32 bit integer
+ FIXED32_BIG = 3;
+ // FIXED32_LITTLE uses little-endian encoding of the length as a 32 bit integer
+ FIXED32_LITTLE = 4;
+ // FIXED64_BIG uses big-endian encoding of the length as a 64 bit integer
+ FIXED64_BIG = 5;
+ // FIXED64_LITTLE uses little-endian encoding of the length as a 64 bit integer
+ FIXED64_LITTLE = 6;
+ // REQUIRE_32_BYTES is like NONE, but will fail if the input is not exactly 32 bytes (sha256 output)
+ REQUIRE_32_BYTES = 7;
+ // REQUIRE_64_BYTES is like NONE, but will fail if the input is not exactly 64 bytes (sha512 output)
+ REQUIRE_64_BYTES = 8;
+}
+
+/**
+ExistenceProof takes a key and a value and a set of steps to perform on it.
+The result of peforming all these steps will provide a "root hash", which can
+be compared to the value in a header.
+
+Since it is computationally infeasible to produce a hash collission for any of the used
+cryptographic hash functions, if someone can provide a series of operations to transform
+a given key and value into a root hash that matches some trusted root, these key and values
+must be in the referenced merkle tree.
+
+The only possible issue is maliablity in LeafOp, such as providing extra prefix data,
+which should be controlled by a spec. Eg. with lengthOp as NONE,
+ prefix = FOO, key = BAR, value = CHOICE
+and
+ prefix = F, key = OOBAR, value = CHOICE
+would produce the same value.
+
+With LengthOp this is tricker but not impossible. Which is why the "leafPrefixEqual" field
+in the ProofSpec is valuable to prevent this mutability. And why all trees should
+length-prefix the data before hashing it.
+*/
+message ExistenceProof {
+ bytes key = 1;
+ bytes value = 2;
+ LeafOp leaf = 3;
+ repeated InnerOp path = 4;
+}
+
+/*
+NonExistenceProof takes a proof of two neighbors, one left of the desired key,
+one right of the desired key. If both proofs are valid AND they are neighbors,
+then there is no valid proof for the given key.
+*/
+message NonExistenceProof {
+ bytes key = 1; // TODO: remove this as unnecessary??? we prove a range
+ ExistenceProof left = 2;
+ ExistenceProof right = 3;
+}
+
+/*
+CommitmentProof is either an ExistenceProof or a NonExistenceProof, or a Batch of such messages
+*/
+message CommitmentProof {
+ oneof proof {
+ ExistenceProof exist = 1;
+ NonExistenceProof nonexist = 2;
+ BatchProof batch = 3;
+ CompressedBatchProof compressed = 4;
+ }
+}
+
+/**
+LeafOp represents the raw key-value data we wish to prove, and
+must be flexible to represent the internal transformation from
+the original key-value pairs into the basis hash, for many existing
+merkle trees.
+
+key and value are passed in. So that the signature of this operation is:
+ leafOp(key, value) -> output
+
+To process this, first prehash the keys and values if needed (ANY means no hash in this case):
+ hkey = prehashKey(key)
+ hvalue = prehashValue(value)
+
+Then combine the bytes, and hash it
+ output = hash(prefix || length(hkey) || hkey || length(hvalue) || hvalue)
+*/
+message LeafOp {
+ HashOp hash = 1;
+ HashOp prehash_key = 2;
+ HashOp prehash_value = 3;
+ LengthOp length = 4;
+ // prefix is a fixed bytes that may optionally be included at the beginning to differentiate
+ // a leaf node from an inner node.
+ bytes prefix = 5;
+}
+
+/**
+InnerOp represents a merkle-proof step that is not a leaf.
+It represents concatenating two children and hashing them to provide the next result.
+
+The result of the previous step is passed in, so the signature of this op is:
+ innerOp(child) -> output
+
+The result of applying InnerOp should be:
+ output = op.hash(op.prefix || child || op.suffix)
+
+ where the || operator is concatenation of binary data,
+and child is the result of hashing all the tree below this step.
+
+Any special data, like prepending child with the length, or prepending the entire operation with
+some value to differentiate from leaf nodes, should be included in prefix and suffix.
+If either of prefix or suffix is empty, we just treat it as an empty string
+*/
+message InnerOp {
+ HashOp hash = 1;
+ bytes prefix = 2;
+ bytes suffix = 3;
+}
+
+
+/**
+ProofSpec defines what the expected parameters are for a given proof type.
+This can be stored in the client and used to validate any incoming proofs.
+
+ verify(ProofSpec, Proof) -> Proof | Error
+
+As demonstrated in tests, if we don't fix the algorithm used to calculate the
+LeafHash for a given tree, there are many possible key-value pairs that can
+generate a given hash (by interpretting the preimage differently).
+We need this for proper security, requires client knows a priori what
+tree format server uses. But not in code, rather a configuration object.
+*/
+message ProofSpec {
+ // any field in the ExistenceProof must be the same as in this spec.
+ // except Prefix, which is just the first bytes of prefix (spec can be longer)
+ LeafOp leaf_spec = 1;
+ InnerSpec inner_spec = 2;
+ // max_depth (if > 0) is the maximum number of InnerOps allowed (mainly for fixed-depth tries)
+ int32 max_depth = 3;
+ // min_depth (if > 0) is the minimum number of InnerOps allowed (mainly for fixed-depth tries)
+ int32 min_depth = 4;
+}
+
+/*
+InnerSpec contains all store-specific structure info to determine if two proofs from a
+given store are neighbors.
+
+This enables:
+
+ isLeftMost(spec: InnerSpec, op: InnerOp)
+ isRightMost(spec: InnerSpec, op: InnerOp)
+ isLeftNeighbor(spec: InnerSpec, left: InnerOp, right: InnerOp)
+*/
+message InnerSpec {
+ // Child order is the ordering of the children node, must count from 0
+ // iavl tree is [0, 1] (left then right)
+ // merk is [0, 2, 1] (left, right, here)
+ repeated int32 child_order = 1;
+ int32 child_size = 2;
+ int32 min_prefix_length = 3;
+ int32 max_prefix_length = 4;
+ // empty child is the prehash image that is used when one child is nil (eg. 20 bytes of 0)
+ bytes empty_child = 5;
+ // hash is the algorithm that must be used for each InnerOp
+ HashOp hash = 6;
+}
+
+/*
+BatchProof is a group of multiple proof types than can be compressed
+*/
+message BatchProof {
+ repeated BatchEntry entries = 1;
+}
+
+// Use BatchEntry not CommitmentProof, to avoid recursion
+message BatchEntry {
+ oneof proof {
+ ExistenceProof exist = 1;
+ NonExistenceProof nonexist = 2;
+ }
+}
+
+
+/****** all items here are compressed forms *******/
+
+message CompressedBatchProof {
+ repeated CompressedBatchEntry entries = 1;
+ repeated InnerOp lookup_inners = 2;
+}
+
+// Use BatchEntry not CommitmentProof, to avoid recursion
+message CompressedBatchEntry {
+ oneof proof {
+ CompressedExistenceProof exist = 1;
+ CompressedNonExistenceProof nonexist = 2;
+ }
+}
+
+message CompressedExistenceProof {
+ bytes key = 1;
+ bytes value = 2;
+ LeafOp leaf = 3;
+ // these are indexes into the lookup_inners table in CompressedBatchProof
+ repeated int32 path = 4;
+}
+
+message CompressedNonExistenceProof {
+ bytes key = 1; // TODO: remove this as unnecessary??? we prove a range
+ CompressedExistenceProof left = 2;
+ CompressedExistenceProof right = 3;
+}
diff --git a/third_party/proto/cosmos/base/query/v1beta1/pagination.proto b/third_party/proto/cosmos/base/query/v1beta1/pagination.proto
new file mode 100644
index 00000000..2a8cbcce
--- /dev/null
+++ b/third_party/proto/cosmos/base/query/v1beta1/pagination.proto
@@ -0,0 +1,50 @@
+syntax = "proto3";
+package cosmos.base.query.v1beta1;
+
+option go_package = "github.com/cosmos/cosmos-sdk/types/query";
+
+// PageRequest is to be embedded in gRPC request messages for efficient
+// pagination. Ex:
+//
+// message SomeRequest {
+// Foo some_parameter = 1;
+// PageRequest pagination = 2;
+// }
+message PageRequest {
+ // key is a value returned in PageResponse.next_key to begin
+ // querying the next page most efficiently. Only one of offset or key
+ // should be set.
+ bytes key = 1;
+
+ // offset is a numeric offset that can be used when key is unavailable.
+ // It is less efficient than using key. Only one of offset or key should
+ // be set.
+ uint64 offset = 2;
+
+ // limit is the total number of results to be returned in the result page.
+ // If left empty it will default to a value to be set by each app.
+ uint64 limit = 3;
+
+ // count_total is set to true to indicate that the result set should include
+ // a count of the total number of items available for pagination in UIs.
+ // count_total is only respected when offset is used. It is ignored when key
+ // is set.
+ bool count_total = 4;
+}
+
+// PageResponse is to be embedded in gRPC response messages where the
+// corresponding request message has used PageRequest.
+//
+// message SomeResponse {
+// repeated Bar results = 1;
+// PageResponse page = 2;
+// }
+message PageResponse {
+ // next_key is the key to be passed to PageRequest.key to
+ // query the next page most efficiently
+ bytes next_key = 1;
+
+ // total is total number of results available if PageRequest.count_total
+ // was set, its value is undefined otherwise
+ uint64 total = 2;
+}
diff --git a/third_party/proto/cosmos/base/v1beta1/coin.proto b/third_party/proto/cosmos/base/v1beta1/coin.proto
new file mode 100644
index 00000000..fab75284
--- /dev/null
+++ b/third_party/proto/cosmos/base/v1beta1/coin.proto
@@ -0,0 +1,40 @@
+syntax = "proto3";
+package cosmos.base.v1beta1;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/cosmos/cosmos-sdk/types";
+option (gogoproto.goproto_stringer_all) = false;
+option (gogoproto.stringer_all) = false;
+
+// Coin defines a token with a denomination and an amount.
+//
+// NOTE: The amount field is an Int which implements the custom method
+// signatures required by gogoproto.
+message Coin {
+ option (gogoproto.equal) = true;
+
+ string denom = 1;
+ string amount = 2 [(gogoproto.customtype) = "Int", (gogoproto.nullable) = false];
+}
+
+// DecCoin defines a token with a denomination and a decimal amount.
+//
+// NOTE: The amount field is an Dec which implements the custom method
+// signatures required by gogoproto.
+message DecCoin {
+ option (gogoproto.equal) = true;
+
+ string denom = 1;
+ string amount = 2 [(gogoproto.customtype) = "Dec", (gogoproto.nullable) = false];
+}
+
+// IntProto defines a Protobuf wrapper around an Int object.
+message IntProto {
+ string int = 1 [(gogoproto.customtype) = "Int", (gogoproto.nullable) = false];
+}
+
+// DecProto defines a Protobuf wrapper around a Dec object.
+message DecProto {
+ string dec = 1 [(gogoproto.customtype) = "Dec", (gogoproto.nullable) = false];
+}
diff --git a/third_party/proto/gogoproto/gogo.proto b/third_party/proto/gogoproto/gogo.proto
new file mode 100644
index 00000000..49e78f99
--- /dev/null
+++ b/third_party/proto/gogoproto/gogo.proto
@@ -0,0 +1,145 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package gogoproto;
+
+import "google/protobuf/descriptor.proto";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "GoGoProtos";
+option go_package = "github.com/gogo/protobuf/gogoproto";
+
+extend google.protobuf.EnumOptions {
+ optional bool goproto_enum_prefix = 62001;
+ optional bool goproto_enum_stringer = 62021;
+ optional bool enum_stringer = 62022;
+ optional string enum_customname = 62023;
+ optional bool enumdecl = 62024;
+}
+
+extend google.protobuf.EnumValueOptions {
+ optional string enumvalue_customname = 66001;
+}
+
+extend google.protobuf.FileOptions {
+ optional bool goproto_getters_all = 63001;
+ optional bool goproto_enum_prefix_all = 63002;
+ optional bool goproto_stringer_all = 63003;
+ optional bool verbose_equal_all = 63004;
+ optional bool face_all = 63005;
+ optional bool gostring_all = 63006;
+ optional bool populate_all = 63007;
+ optional bool stringer_all = 63008;
+ optional bool onlyone_all = 63009;
+
+ optional bool equal_all = 63013;
+ optional bool description_all = 63014;
+ optional bool testgen_all = 63015;
+ optional bool benchgen_all = 63016;
+ optional bool marshaler_all = 63017;
+ optional bool unmarshaler_all = 63018;
+ optional bool stable_marshaler_all = 63019;
+
+ optional bool sizer_all = 63020;
+
+ optional bool goproto_enum_stringer_all = 63021;
+ optional bool enum_stringer_all = 63022;
+
+ optional bool unsafe_marshaler_all = 63023;
+ optional bool unsafe_unmarshaler_all = 63024;
+
+ optional bool goproto_extensions_map_all = 63025;
+ optional bool goproto_unrecognized_all = 63026;
+ optional bool gogoproto_import = 63027;
+ optional bool protosizer_all = 63028;
+ optional bool compare_all = 63029;
+ optional bool typedecl_all = 63030;
+ optional bool enumdecl_all = 63031;
+
+ optional bool goproto_registration = 63032;
+ optional bool messagename_all = 63033;
+
+ optional bool goproto_sizecache_all = 63034;
+ optional bool goproto_unkeyed_all = 63035;
+}
+
+extend google.protobuf.MessageOptions {
+ optional bool goproto_getters = 64001;
+ optional bool goproto_stringer = 64003;
+ optional bool verbose_equal = 64004;
+ optional bool face = 64005;
+ optional bool gostring = 64006;
+ optional bool populate = 64007;
+ optional bool stringer = 67008;
+ optional bool onlyone = 64009;
+
+ optional bool equal = 64013;
+ optional bool description = 64014;
+ optional bool testgen = 64015;
+ optional bool benchgen = 64016;
+ optional bool marshaler = 64017;
+ optional bool unmarshaler = 64018;
+ optional bool stable_marshaler = 64019;
+
+ optional bool sizer = 64020;
+
+ optional bool unsafe_marshaler = 64023;
+ optional bool unsafe_unmarshaler = 64024;
+
+ optional bool goproto_extensions_map = 64025;
+ optional bool goproto_unrecognized = 64026;
+
+ optional bool protosizer = 64028;
+ optional bool compare = 64029;
+
+ optional bool typedecl = 64030;
+
+ optional bool messagename = 64033;
+
+ optional bool goproto_sizecache = 64034;
+ optional bool goproto_unkeyed = 64035;
+}
+
+extend google.protobuf.FieldOptions {
+ optional bool nullable = 65001;
+ optional bool embed = 65002;
+ optional string customtype = 65003;
+ optional string customname = 65004;
+ optional string jsontag = 65005;
+ optional string moretags = 65006;
+ optional string casttype = 65007;
+ optional string castkey = 65008;
+ optional string castvalue = 65009;
+
+ optional bool stdtime = 65010;
+ optional bool stdduration = 65011;
+ optional bool wktpointer = 65012;
+
+ optional string castrepeated = 65013;
+}
diff --git a/third_party/proto/google/api/annotations.proto b/third_party/proto/google/api/annotations.proto
new file mode 100644
index 00000000..85c361b4
--- /dev/null
+++ b/third_party/proto/google/api/annotations.proto
@@ -0,0 +1,31 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+extend google.protobuf.MethodOptions {
+ // See `HttpRule`.
+ HttpRule http = 72295728;
+}
diff --git a/third_party/proto/google/api/http.proto b/third_party/proto/google/api/http.proto
new file mode 100644
index 00000000..2bd3a19b
--- /dev/null
+++ b/third_party/proto/google/api/http.proto
@@ -0,0 +1,318 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+option objc_class_prefix = "GAPI";
+
+
+// Defines the HTTP configuration for an API service. It contains a list of
+// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
+// to one or more HTTP REST API methods.
+message Http {
+ // A list of HTTP configuration rules that apply to individual API methods.
+ //
+ // **NOTE:** All service configuration rules follow "last one wins" order.
+ repeated HttpRule rules = 1;
+
+ // When set to true, URL path parmeters will be fully URI-decoded except in
+ // cases of single segment matches in reserved expansion, where "%2F" will be
+ // left encoded.
+ //
+ // The default behavior is to not decode RFC 6570 reserved characters in multi
+ // segment matches.
+ bool fully_decode_reserved_expansion = 2;
+}
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP
+// REST API methods. The mapping specifies how different portions of the RPC
+// request message are mapped to URL path, URL query parameters, and
+// HTTP request body. The mapping is typically specified as an
+// `google.api.http` annotation on the RPC method,
+// see "google/api/annotations.proto" for details.
+//
+// The mapping consists of a field specifying the path template and
+// method kind. The path template can refer to fields in the request
+// message, as in the example below which describes a REST GET
+// operation on a resource collection of messages:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// SubMessage sub = 2; // `sub.subfield` is url-mapped
+// }
+// message Message {
+// string text = 1; // content of the resource
+// }
+//
+// The same http annotation can alternatively be expressed inside the
+// `GRPC API Configuration` YAML file.
+//
+// http:
+// rules:
+// - selector: .Messaging.GetMessage
+// get: /v1/messages/{message_id}/{sub.subfield}
+//
+// This definition enables an automatic, bidrectional mapping of HTTP
+// JSON to RPC. Example:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
+//
+// In general, not only fields but also field paths can be referenced
+// from a path pattern. Fields mapped to the path pattern cannot be
+// repeated and must have a primitive (non-message) type.
+//
+// Any fields in the request message which are not bound by the path
+// pattern automatically become (optional) HTTP query
+// parameters. Assume the following definition of the request message:
+//
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http).get = "/v1/messages/{message_id}";
+// }
+// }
+// message GetMessageRequest {
+// message SubMessage {
+// string subfield = 1;
+// }
+// string message_id = 1; // mapped to the URL
+// int64 revision = 2; // becomes a parameter
+// SubMessage sub = 3; // `sub.subfield` becomes a parameter
+// }
+//
+//
+// This enables a HTTP JSON to RPC mapping as below:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
+//
+// Note that fields which are mapped to HTTP parameters must have a
+// primitive type or a repeated primitive type. Message types are not
+// allowed. In the case of a repeated type, the parameter can be
+// repeated in the URL, as in `...?param=A¶m=B`.
+//
+// For HTTP method kinds which allow a request body, the `body` field
+// specifies the mapping. Consider a REST update method on the
+// message resource collection:
+//
+//
+// service Messaging {
+// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "message"
+// };
+// }
+// }
+// message UpdateMessageRequest {
+// string message_id = 1; // mapped to the URL
+// Message message = 2; // mapped to the body
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled, where the
+// representation of the JSON in the request body is determined by
+// protos JSON encoding:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
+//
+// The special name `*` can be used in the body mapping to define that
+// every field not bound by the path template should be mapped to the
+// request body. This enables the following alternative definition of
+// the update method:
+//
+// service Messaging {
+// rpc UpdateMessage(Message) returns (Message) {
+// option (google.api.http) = {
+// put: "/v1/messages/{message_id}"
+// body: "*"
+// };
+// }
+// }
+// message Message {
+// string message_id = 1;
+// string text = 2;
+// }
+//
+//
+// The following HTTP JSON to RPC mapping is enabled:
+//
+// HTTP | RPC
+// -----|-----
+// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
+//
+// Note that when using `*` in the body mapping, it is not possible to
+// have HTTP parameters, as all fields not bound by the path end in
+// the body. This makes this option more rarely used in practice of
+// defining REST APIs. The common usage of `*` is in custom methods
+// which don't use the URL at all for transferring data.
+//
+// It is possible to define multiple HTTP methods for one RPC by using
+// the `additional_bindings` option. Example:
+//
+// service Messaging {
+// rpc GetMessage(GetMessageRequest) returns (Message) {
+// option (google.api.http) = {
+// get: "/v1/messages/{message_id}"
+// additional_bindings {
+// get: "/v1/users/{user_id}/messages/{message_id}"
+// }
+// };
+// }
+// }
+// message GetMessageRequest {
+// string message_id = 1;
+// string user_id = 2;
+// }
+//
+//
+// This enables the following two alternative HTTP JSON to RPC
+// mappings:
+//
+// HTTP | RPC
+// -----|-----
+// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
+// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+// omitted. If omitted, it indicates there is no HTTP request body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+// request) can be classified into three types:
+// (a) Matched in the URL template.
+// (b) Covered by body (if body is `*`, everything except (a) fields;
+// else everything under the body field)
+// (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+// Template = "/" Segments [ Verb ] ;
+// Segments = Segment { "/" Segment } ;
+// Segment = "*" | "**" | LITERAL | Variable ;
+// Variable = "{" FieldPath [ "=" Segments ] "}" ;
+// FieldPath = IDENT { "." IDENT } ;
+// Verb = ":" LITERAL ;
+//
+// The syntax `*` matches a single path segment. The syntax `**` matches zero
+// or more path segments, which must be the last part of the path except the
+// `Verb`. The syntax `LITERAL` matches literal text in the path.
+//
+// The syntax `Variable` matches part of the URL path as specified by its
+// template. A variable template must not contain other variables. If a variable
+// matches a single path segment, its template may be omitted, e.g. `{var}`
+// is equivalent to `{var=*}`.
+//
+// If a variable contains exactly one path segment, such as `"{var}"` or
+// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
+// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
+// Discovery Document as `{var}`.
+//
+// If a variable contains one or more path segments, such as `"{var=foo/*}"`
+// or `"{var=**}"`, when such a variable is expanded into a URL path, all
+// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
+// show up in the Discovery Document as `{+var}`.
+//
+// NOTE: While the single segment variable matches the semantics of
+// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
+// Simple String Expansion, the multi segment variable **does not** match
+// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
+// does not expand special characters like `?` and `#`, which would lead
+// to invalid URLs.
+//
+// NOTE: the field paths in variables and in the `body` must not refer to
+// repeated fields or map fields.
+message HttpRule {
+ // Selects methods to which this rule applies.
+ //
+ // Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
+ string selector = 1;
+
+ // Determines the URL pattern is matched by this rules. This pattern can be
+ // used with any of the {get|put|post|delete|patch} methods. A custom method
+ // can be defined using the 'custom' field.
+ oneof pattern {
+ // Used for listing and getting information about resources.
+ string get = 2;
+
+ // Used for updating a resource.
+ string put = 3;
+
+ // Used for creating a resource.
+ string post = 4;
+
+ // Used for deleting a resource.
+ string delete = 5;
+
+ // Used for updating a resource.
+ string patch = 6;
+
+ // The custom pattern is used for specifying an HTTP method that is not
+ // included in the `pattern` field, such as HEAD, or "*" to leave the
+ // HTTP method unspecified for this rule. The wild-card rule is useful
+ // for services that provide content to Web (HTML) clients.
+ CustomHttpPattern custom = 8;
+ }
+
+ // The name of the request field whose value is mapped to the HTTP body, or
+ // `*` for mapping all fields not captured by the path pattern to the HTTP
+ // body. NOTE: the referred field must not be a repeated field and must be
+ // present at the top-level of request message type.
+ string body = 7;
+
+ // Optional. The name of the response field whose value is mapped to the HTTP
+ // body of response. Other response fields are ignored. When
+ // not set, the response message will be used as HTTP body of response.
+ string response_body = 12;
+
+ // Additional HTTP bindings for the selector. Nested bindings must
+ // not contain an `additional_bindings` field themselves (that is,
+ // the nesting may only be one level deep).
+ repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+ // The name of this custom HTTP verb.
+ string kind = 1;
+
+ // The path matched by this custom verb.
+ string path = 2;
+}
diff --git a/third_party/proto/google/protobuf/any.proto b/third_party/proto/google/protobuf/any.proto
new file mode 100644
index 00000000..1431810e
--- /dev/null
+++ b/third_party/proto/google/protobuf/any.proto
@@ -0,0 +1,161 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+import "gogoproto/gogo.proto";
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "types";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": ,
+// "lastName":
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+message Any {
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. This string must contain at least
+ // one "/" character. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
+ //
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ string type_url = 1;
+
+ // Must be a valid serialized protocol buffer of the above specified type.
+ bytes value = 2;
+
+ option (gogoproto.typedecl) = false;
+}
+
+option (gogoproto.goproto_registration) = false;
diff --git a/third_party/proto/tendermint/crypto/keys.proto b/third_party/proto/tendermint/crypto/keys.proto
new file mode 100644
index 00000000..16fd7adf
--- /dev/null
+++ b/third_party/proto/tendermint/crypto/keys.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+package tendermint.crypto;
+
+option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto";
+
+import "gogoproto/gogo.proto";
+
+// PublicKey defines the keys available for use with Tendermint Validators
+message PublicKey {
+ option (gogoproto.compare) = true;
+ option (gogoproto.equal) = true;
+
+ oneof sum {
+ bytes ed25519 = 1;
+ bytes secp256k1 = 2;
+ }
+}
diff --git a/third_party/proto/tendermint/crypto/proof.proto b/third_party/proto/tendermint/crypto/proof.proto
new file mode 100644
index 00000000..975df768
--- /dev/null
+++ b/third_party/proto/tendermint/crypto/proof.proto
@@ -0,0 +1,41 @@
+syntax = "proto3";
+package tendermint.crypto;
+
+option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto";
+
+import "gogoproto/gogo.proto";
+
+message Proof {
+ int64 total = 1;
+ int64 index = 2;
+ bytes leaf_hash = 3;
+ repeated bytes aunts = 4;
+}
+
+message ValueOp {
+ // Encoded in ProofOp.Key.
+ bytes key = 1;
+
+ // To encode in ProofOp.Data
+ Proof proof = 2;
+}
+
+message DominoOp {
+ string key = 1;
+ string input = 2;
+ string output = 3;
+}
+
+// ProofOp defines an operation used for calculating Merkle root
+// The data could be arbitrary format, providing nessecary data
+// for example neighbouring node hash
+message ProofOp {
+ string type = 1;
+ bytes key = 2;
+ bytes data = 3;
+}
+
+// ProofOps is Merkle proof defined by the list of ProofOps
+message ProofOps {
+ repeated ProofOp ops = 1 [(gogoproto.nullable) = false];
+}
diff --git a/third_party/proto/tendermint/libs/bits/types.proto b/third_party/proto/tendermint/libs/bits/types.proto
new file mode 100644
index 00000000..3111d113
--- /dev/null
+++ b/third_party/proto/tendermint/libs/bits/types.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+package tendermint.libs.bits;
+
+option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits";
+
+message BitArray {
+ int64 bits = 1;
+ repeated uint64 elems = 2;
+}
diff --git a/third_party/proto/tendermint/types/types.proto b/third_party/proto/tendermint/types/types.proto
new file mode 100644
index 00000000..7f7ea74c
--- /dev/null
+++ b/third_party/proto/tendermint/types/types.proto
@@ -0,0 +1,157 @@
+syntax = "proto3";
+package tendermint.types;
+
+option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+import "tendermint/crypto/proof.proto";
+import "tendermint/version/types.proto";
+import "tendermint/types/validator.proto";
+
+// BlockIdFlag indicates which BlcokID the signature is for
+enum BlockIDFlag {
+ option (gogoproto.goproto_enum_stringer) = true;
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"];
+ BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"];
+ BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"];
+ BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"];
+}
+
+// SignedMsgType is a type of signed message in the consensus.
+enum SignedMsgType {
+ option (gogoproto.goproto_enum_stringer) = true;
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"];
+ // Votes
+ SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"];
+ SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"];
+
+ // Proposals
+ SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"];
+}
+
+// PartsetHeader
+message PartSetHeader {
+ uint32 total = 1;
+ bytes hash = 2;
+}
+
+message Part {
+ uint32 index = 1;
+ bytes bytes = 2;
+ tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false];
+}
+
+// BlockID
+message BlockID {
+ bytes hash = 1;
+ PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false];
+}
+
+// --------------------------------
+
+// Header defines the structure of a Tendermint block header.
+message Header {
+ // basic block info
+ tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false];
+ string chain_id = 2 [(gogoproto.customname) = "ChainID"];
+ int64 height = 3;
+ google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
+
+ // prev block info
+ BlockID last_block_id = 5 [(gogoproto.nullable) = false];
+
+ // hashes of block data
+ bytes last_commit_hash = 6; // commit from validators from the last block
+ bytes data_hash = 7; // transactions
+
+ // hashes from the app output from the prev block
+ bytes validators_hash = 8; // validators for the current block
+ bytes next_validators_hash = 9; // validators for the next block
+ bytes consensus_hash = 10; // consensus params for current block
+ bytes app_hash = 11; // state after txs from the previous block
+ bytes last_results_hash = 12; // root hash of all results from the txs from the previous block
+
+ // consensus info
+ bytes evidence_hash = 13; // evidence included in the block
+ bytes proposer_address = 14; // original proposer of the block
+}
+
+// Data contains the set of transactions included in the block
+message Data {
+ // Txs that will be applied by state @ block.Height+1.
+ // NOTE: not all txs here are valid. We're just agreeing on the order first.
+ // This means that block.AppHash does not include these txs.
+ repeated bytes txs = 1;
+}
+
+// Vote represents a prevote, precommit, or commit vote from validators for
+// consensus.
+message Vote {
+ SignedMsgType type = 1;
+ int64 height = 2;
+ int32 round = 3;
+ BlockID block_id = 4
+ [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil.
+ google.protobuf.Timestamp timestamp = 5
+ [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
+ bytes validator_address = 6;
+ int32 validator_index = 7;
+ bytes signature = 8;
+}
+
+// Commit contains the evidence that a block was committed by a set of validators.
+message Commit {
+ int64 height = 1;
+ int32 round = 2;
+ BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"];
+ repeated CommitSig signatures = 4 [(gogoproto.nullable) = false];
+}
+
+// CommitSig is a part of the Vote included in a Commit.
+message CommitSig {
+ BlockIDFlag block_id_flag = 1;
+ bytes validator_address = 2;
+ google.protobuf.Timestamp timestamp = 3
+ [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
+ bytes signature = 4;
+}
+
+message Proposal {
+ SignedMsgType type = 1;
+ int64 height = 2;
+ int32 round = 3;
+ int32 pol_round = 4;
+ BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false];
+ google.protobuf.Timestamp timestamp = 6
+ [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
+ bytes signature = 7;
+}
+
+message SignedHeader {
+ Header header = 1;
+ Commit commit = 2;
+}
+
+message LightBlock {
+ SignedHeader signed_header = 1;
+ tendermint.types.ValidatorSet validator_set = 2;
+}
+
+message BlockMeta {
+ BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false];
+ int64 block_size = 2;
+ Header header = 3 [(gogoproto.nullable) = false];
+ int64 num_txs = 4;
+}
+
+// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree.
+message TxProof {
+ bytes root_hash = 1;
+ bytes data = 2;
+ tendermint.crypto.Proof proof = 3;
+}
diff --git a/third_party/proto/tendermint/types/validator.proto b/third_party/proto/tendermint/types/validator.proto
new file mode 100644
index 00000000..49860b96
--- /dev/null
+++ b/third_party/proto/tendermint/types/validator.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+package tendermint.types;
+
+option go_package = "github.com/tendermint/tendermint/proto/tendermint/types";
+
+import "gogoproto/gogo.proto";
+import "tendermint/crypto/keys.proto";
+
+message ValidatorSet {
+ repeated Validator validators = 1;
+ Validator proposer = 2;
+ int64 total_voting_power = 3;
+}
+
+message Validator {
+ bytes address = 1;
+ tendermint.crypto.PublicKey pub_key = 2 [(gogoproto.nullable) = false];
+ int64 voting_power = 3;
+ int64 proposer_priority = 4;
+}
+
+message SimpleValidator {
+ tendermint.crypto.PublicKey pub_key = 1;
+ int64 voting_power = 2;
+}
diff --git a/third_party/proto/tendermint/version/types.proto b/third_party/proto/tendermint/version/types.proto
new file mode 100644
index 00000000..6061868b
--- /dev/null
+++ b/third_party/proto/tendermint/version/types.proto
@@ -0,0 +1,24 @@
+syntax = "proto3";
+package tendermint.version;
+
+option go_package = "github.com/tendermint/tendermint/proto/tendermint/version";
+
+import "gogoproto/gogo.proto";
+
+// App includes the protocol and software version for the application.
+// This information is included in ResponseInfo. The App.Protocol can be
+// updated in ResponseEndBlock.
+message App {
+ uint64 protocol = 1;
+ string software = 2;
+}
+
+// Consensus captures the consensus rules for processing a block in the blockchain,
+// including all blockchain data structures and the rules of the application's
+// state transition machine.
+message Consensus {
+ option (gogoproto.equal) = true;
+
+ uint64 block = 1;
+ uint64 app = 2;
+}
From c725dc2cf4551a0eb5ca98a1e66761f4088807d9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 22 Feb 2021 15:40:37 +0100
Subject: [PATCH 003/393] Fix code import names (#4)
* import name changes
* make proto-all
---
applications/transfer/types/genesis.pb.go | 443 -----
applications/transfer/types/query.pb.go | 1418 -----------------
applications/transfer/types/query.pb.gw.go | 326 ----
applications/transfer/types/transfer.pb.go | 909 -----------
applications/transfer/types/tx.pb.go | 804 ----------
.../transfer/client/cli/cli.go | 0
.../transfer/client/cli/query.go | 2 +-
.../transfer/client/cli/tx.go | 6 +-
{applications => apps}/transfer/handler.go | 2 +-
.../transfer/handler_test.go | 10 +-
.../transfer/keeper/MBT_README.md | 0
.../transfer/keeper/encoding.go | 2 +-
.../transfer/keeper/genesis.go | 2 +-
.../transfer/keeper/genesis_test.go | 2 +-
.../transfer/keeper/grpc_query.go | 2 +-
.../transfer/keeper/grpc_query_test.go | 2 +-
.../transfer/keeper/keeper.go | 6 +-
.../transfer/keeper/keeper_test.go | 4 +-
.../transfer/keeper/mbt_relay_test.go | 10 +-
.../model_based_tests/Test5Packets.json | 0
.../keeper/model_based_tests/Test5Packets.tla | 0
.../Test5PacketsAllDifferentPass.json | 0
.../Test5PacketsAllDifferentPass.tla | 0
.../TestOnRecvAcknowledgementErrorFail.json | 0
.../TestOnRecvAcknowledgementErrorFail.tla | 0
.../TestOnRecvAcknowledgementErrorPass.json | 0
.../TestOnRecvAcknowledgementErrorPass.tla | 0
.../TestOnRecvAcknowledgementResultFail.json | 0
.../TestOnRecvAcknowledgementResultFail.tla | 0
.../TestOnRecvAcknowledgementResultPass.json | 0
.../TestOnRecvAcknowledgementResultPass.tla | 0
.../TestOnRecvPacketFail.json | 0
.../TestOnRecvPacketFail.tla | 0
.../TestOnRecvPacketPass.json | 0
.../TestOnRecvPacketPass.tla | 0
.../model_based_tests/TestOnTimeoutFail.json | 0
.../model_based_tests/TestOnTimeoutFail.tla | 0
.../model_based_tests/TestOnTimeoutPass.json | 0
.../model_based_tests/TestOnTimeoutPass.tla | 0
.../TestSendTransferFail.json | 0
.../TestSendTransferFail.tla | 0
.../TestSendTransferPass.json | 0
.../TestSendTransferPass.tla | 0
.../model_based_tests/TestUnescrowTokens.json | 0
.../model_based_tests/TestUnescrowTokens.tla | 0
.../transfer/keeper/msg_server.go | 2 +-
.../transfer/keeper/params.go | 2 +-
.../transfer/keeper/params_test.go | 2 +-
.../transfer/keeper/relay.go | 8 +-
.../transfer/keeper/relay_model/account.tla | 0
.../keeper/relay_model/account_record.tla | 0
.../relay_model/apalache-to-relay-test.json | 0
.../relay_model/apalache-to-relay-test2.json | 0
.../transfer/keeper/relay_model/denom.tla | 0
.../keeper/relay_model/denom_record.tla | 0
.../keeper/relay_model/denom_record2.tla | 0
.../keeper/relay_model/denom_sequence.tla | 0
.../keeper/relay_model/identifiers.tla | 0
.../transfer/keeper/relay_model/relay.tla | 0
.../keeper/relay_model/relay_tests.tla | 0
.../transfer/keeper/relay_test.go | 12 +-
{applications => apps}/transfer/module.go | 14 +-
.../transfer/module_test.go | 10 +-
.../transfer/simulation/decoder.go | 2 +-
.../transfer/simulation/decoder_test.go | 4 +-
.../transfer/simulation/genesis.go | 2 +-
.../transfer/simulation/genesis_test.go | 4 +-
.../transfer/simulation/params.go | 2 +-
.../transfer/simulation/params_test.go | 2 +-
.../transfer/spec/01_concepts.md | 0
.../transfer/spec/02_state.md | 0
.../transfer/spec/03_state_transitions.md | 0
.../transfer/spec/04_messages.md | 0
.../transfer/spec/05_events.md | 0
.../transfer/spec/06_metrics.md | 0
.../transfer/spec/07_params.md | 0
.../transfer/spec/README.md | 0
.../transfer/types/codec.go | 0
{applications => apps}/transfer/types/coin.go | 0
.../transfer/types/errors.go | 0
.../transfer/types/events.go | 0
.../transfer/types/expected_keepers.go | 6 +-
.../transfer/types/genesis.go | 2 +-
.../transfer/types/genesis_test.go | 2 +-
{applications => apps}/transfer/types/keys.go | 0
.../transfer/types/keys_test.go | 2 +-
{applications => apps}/transfer/types/msgs.go | 4 +-
.../transfer/types/msgs_test.go | 2 +-
.../transfer/types/packet.go | 0
.../transfer/types/packet_test.go | 0
.../transfer/types/params.go | 0
.../transfer/types/params_test.go | 0
.../transfer/types/trace.go | 2 +-
.../transfer/types/trace_test.go | 0
core/02-client/abci.go | 4 +-
core/02-client/abci_test.go | 10 +-
core/02-client/client/cli/cli.go | 2 +-
core/02-client/client/cli/query.go | 6 +-
core/02-client/client/cli/tx.go | 4 +-
core/02-client/client/proposal_handler.go | 2 +-
core/02-client/client/utils/utils.go | 12 +-
core/02-client/genesis.go | 6 +-
core/02-client/keeper/client.go | 4 +-
core/02-client/keeper/client_test.go | 16 +-
core/02-client/keeper/encoding.go | 4 +-
core/02-client/keeper/grpc_query.go | 6 +-
core/02-client/keeper/grpc_query_test.go | 10 +-
core/02-client/keeper/keeper.go | 10 +-
core/02-client/keeper/keeper_test.go | 16 +-
core/02-client/keeper/params.go | 2 +-
core/02-client/keeper/params_test.go | 2 +-
core/02-client/keeper/proposal.go | 4 +-
core/02-client/keeper/proposal_test.go | 10 +-
core/02-client/module.go | 4 +-
core/02-client/proposal_handler.go | 4 +-
core/02-client/proposal_handler_test.go | 10 +-
core/02-client/simulation/decoder.go | 6 +-
core/02-client/simulation/decoder_test.go | 8 +-
core/02-client/simulation/genesis.go | 2 +-
core/02-client/types/client.go | 4 +-
core/02-client/types/client_test.go | 6 +-
core/02-client/types/codec.go | 2 +-
core/02-client/types/codec_test.go | 12 +-
core/02-client/types/encoding.go | 2 +-
core/02-client/types/events.go | 2 +-
core/02-client/types/genesis.go | 4 +-
core/02-client/types/genesis_test.go | 18 +-
core/02-client/types/height.go | 2 +-
core/02-client/types/height_test.go | 2 +-
core/02-client/types/keys.go | 2 +-
core/02-client/types/keys_test.go | 2 +-
core/02-client/types/msgs.go | 4 +-
core/02-client/types/msgs_test.go | 12 +-
core/02-client/types/params.go | 2 +-
core/02-client/types/params_test.go | 2 +-
core/02-client/types/proposal_test.go | 6 +-
core/02-client/types/query.go | 2 +-
core/03-connection/client/cli/cli.go | 2 +-
core/03-connection/client/cli/query.go | 6 +-
core/03-connection/client/cli/tx.go | 8 +-
core/03-connection/client/utils/utils.go | 14 +-
core/03-connection/genesis.go | 4 +-
core/03-connection/keeper/grpc_query.go | 6 +-
core/03-connection/keeper/grpc_query_test.go | 10 +-
core/03-connection/keeper/handshake.go | 8 +-
core/03-connection/keeper/handshake_test.go | 10 +-
core/03-connection/keeper/keeper.go | 10 +-
core/03-connection/keeper/keeper_test.go | 6 +-
core/03-connection/keeper/verify.go | 4 +-
core/03-connection/keeper/verify_test.go | 16 +-
core/03-connection/module.go | 4 +-
core/03-connection/simulation/decoder.go | 4 +-
core/03-connection/simulation/decoder_test.go | 6 +-
core/03-connection/simulation/genesis.go | 2 +-
core/03-connection/types/codec.go | 2 +-
core/03-connection/types/connection.go | 6 +-
core/03-connection/types/connection_test.go | 8 +-
core/03-connection/types/events.go | 2 +-
core/03-connection/types/expected_keepers.go | 2 +-
core/03-connection/types/genesis.go | 2 +-
core/03-connection/types/genesis_test.go | 6 +-
core/03-connection/types/keys.go | 2 +-
core/03-connection/types/keys_test.go | 2 +-
core/03-connection/types/msgs.go | 8 +-
core/03-connection/types/msgs_test.go | 10 +-
core/03-connection/types/query.go | 4 +-
core/03-connection/types/version.go | 2 +-
core/03-connection/types/version_test.go | 6 +-
core/04-channel/client/cli/cli.go | 2 +-
core/04-channel/client/cli/query.go | 6 +-
core/04-channel/client/cli/tx.go | 8 +-
core/04-channel/client/utils/utils.go | 12 +-
core/04-channel/genesis.go | 4 +-
core/04-channel/handler.go | 4 +-
core/04-channel/keeper/grpc_query.go | 8 +-
core/04-channel/keeper/grpc_query_test.go | 10 +-
core/04-channel/keeper/handshake.go | 10 +-
core/04-channel/keeper/handshake_test.go | 12 +-
core/04-channel/keeper/keeper.go | 12 +-
core/04-channel/keeper/keeper_test.go | 6 +-
core/04-channel/keeper/packet.go | 10 +-
core/04-channel/keeper/packet_test.go | 14 +-
core/04-channel/keeper/timeout.go | 8 +-
core/04-channel/keeper/timeout_test.go | 10 +-
core/04-channel/module.go | 4 +-
core/04-channel/simulation/decoder.go | 4 +-
core/04-channel/simulation/decoder_test.go | 6 +-
core/04-channel/simulation/genesis.go | 2 +-
core/04-channel/types/channel.go | 4 +-
core/04-channel/types/channel_test.go | 2 +-
core/04-channel/types/codec.go | 2 +-
core/04-channel/types/events.go | 2 +-
core/04-channel/types/expected_keepers.go | 4 +-
core/04-channel/types/genesis.go | 2 +-
core/04-channel/types/genesis_test.go | 2 +-
core/04-channel/types/keys.go | 2 +-
core/04-channel/types/keys_test.go | 2 +-
core/04-channel/types/msgs.go | 6 +-
core/04-channel/types/msgs_test.go | 8 +-
core/04-channel/types/packet.go | 6 +-
core/04-channel/types/packet_test.go | 4 +-
core/04-channel/types/query.go | 4 +-
core/05-port/keeper/keeper.go | 4 +-
core/05-port/keeper/keeper_test.go | 2 +-
core/05-port/types/module.go | 2 +-
core/23-commitment/types/codec.go | 2 +-
core/23-commitment/types/merkle.go | 2 +-
core/23-commitment/types/merkle_test.go | 2 +-
core/23-commitment/types/utils_test.go | 2 +-
core/24-host/keys.go | 2 +-
core/24-host/parse_test.go | 4 +-
core/client/cli/cli.go | 8 +-
core/client/query.go | 6 +-
core/genesis.go | 10 +-
core/genesis_test.go | 20 +-
core/handler.go | 8 +-
core/keeper/grpc_query.go | 6 +-
core/keeper/keeper.go | 14 +-
core/keeper/msg_server.go | 10 +-
core/keeper/msg_server_test.go | 18 +-
core/module.go | 18 +-
core/simulation/decoder.go | 10 +-
core/simulation/decoder_test.go | 12 +-
core/simulation/genesis.go | 16 +-
core/simulation/genesis_test.go | 6 +-
core/types/codec.go | 14 +-
core/types/genesis.go | 6 +-
core/types/query.go | 12 +-
light-clients/06-solomachine/module.go | 2 +-
.../06-solomachine/types/client_state.go | 8 +-
.../06-solomachine/types/client_state_test.go | 16 +-
light-clients/06-solomachine/types/codec.go | 4 +-
.../06-solomachine/types/codec_test.go | 10 +-
.../06-solomachine/types/consensus_state.go | 4 +-
.../types/consensus_state_test.go | 6 +-
light-clients/06-solomachine/types/header.go | 4 +-
.../06-solomachine/types/header_test.go | 6 +-
.../06-solomachine/types/misbehaviour.go | 6 +-
.../types/misbehaviour_handle.go | 4 +-
.../types/misbehaviour_handle_test.go | 8 +-
.../06-solomachine/types/misbehaviour_test.go | 6 +-
light-clients/06-solomachine/types/proof.go | 10 +-
.../06-solomachine/types/proof_test.go | 6 +-
.../06-solomachine/types/proposal_handle.go | 4 +-
.../types/proposal_handle_test.go | 8 +-
.../06-solomachine/types/solomachine.go | 2 +-
.../06-solomachine/types/solomachine_test.go | 8 +-
light-clients/06-solomachine/types/update.go | 4 +-
.../06-solomachine/types/update_test.go | 8 +-
light-clients/07-tendermint/module.go | 2 +-
.../07-tendermint/types/client_state.go | 12 +-
.../07-tendermint/types/client_state_test.go | 16 +-
light-clients/07-tendermint/types/codec.go | 2 +-
.../07-tendermint/types/consensus_state.go | 6 +-
.../types/consensus_state_test.go | 6 +-
light-clients/07-tendermint/types/genesis.go | 4 +-
.../07-tendermint/types/genesis_test.go | 6 +-
light-clients/07-tendermint/types/header.go | 6 +-
.../07-tendermint/types/header_test.go | 6 +-
.../07-tendermint/types/misbehaviour.go | 6 +-
.../types/misbehaviour_handle.go | 4 +-
.../types/misbehaviour_handle_test.go | 12 +-
.../07-tendermint/types/misbehaviour_test.go | 10 +-
.../07-tendermint/types/proposal_handle.go | 4 +-
.../types/proposal_handle_test.go | 8 +-
light-clients/07-tendermint/types/store.go | 6 +-
.../07-tendermint/types/store_test.go | 14 +-
.../07-tendermint/types/tendermint_test.go | 8 +-
light-clients/07-tendermint/types/update.go | 6 +-
.../07-tendermint/types/update_test.go | 10 +-
light-clients/07-tendermint/types/upgrade.go | 6 +-
.../07-tendermint/types/upgrade_test.go | 8 +-
light-clients/09-localhost/module.go | 2 +-
.../09-localhost/types/client_state.go | 10 +-
.../09-localhost/types/client_state_test.go | 16 +-
light-clients/09-localhost/types/codec.go | 2 +-
.../09-localhost/types/localhost_test.go | 4 +-
testing/chain.go | 20 +-
testing/chain_test.go | 4 +-
testing/coordinator.go | 6 +-
testing/mock/mock.go | 4 +-
testing/mock/privval_test.go | 2 +-
testing/solomachine.go | 10 +-
testing/types.go | 2 +-
284 files changed, 685 insertions(+), 4585 deletions(-)
delete mode 100644 applications/transfer/types/genesis.pb.go
delete mode 100644 applications/transfer/types/query.pb.go
delete mode 100644 applications/transfer/types/query.pb.gw.go
delete mode 100644 applications/transfer/types/transfer.pb.go
delete mode 100644 applications/transfer/types/tx.pb.go
rename {applications => apps}/transfer/client/cli/cli.go (100%)
rename {applications => apps}/transfer/client/cli/query.go (97%)
rename {applications => apps}/transfer/client/cli/tx.go (94%)
rename {applications => apps}/transfer/handler.go (90%)
rename {applications => apps}/transfer/handler_test.go (95%)
rename {applications => apps}/transfer/keeper/MBT_README.md (100%)
rename {applications => apps}/transfer/keeper/encoding.go (94%)
rename {applications => apps}/transfer/keeper/genesis.go (94%)
rename {applications => apps}/transfer/keeper/genesis_test.go (92%)
rename {applications => apps}/transfer/keeper/grpc_query.go (96%)
rename {applications => apps}/transfer/keeper/grpc_query_test.go (97%)
rename {applications => apps}/transfer/keeper/keeper.go (96%)
rename {applications => apps}/transfer/keeper/keeper_test.go (92%)
rename {applications => apps}/transfer/keeper/mbt_relay_test.go (97%)
rename {applications => apps}/transfer/keeper/model_based_tests/Test5Packets.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/Test5Packets.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnTimeoutFail.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnTimeoutPass.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestSendTransferFail.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestSendTransferFail.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestSendTransferPass.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestSendTransferPass.tla (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestUnescrowTokens.json (100%)
rename {applications => apps}/transfer/keeper/model_based_tests/TestUnescrowTokens.tla (100%)
rename {applications => apps}/transfer/keeper/msg_server.go (95%)
rename {applications => apps}/transfer/keeper/params.go (92%)
rename {applications => apps}/transfer/keeper/params_test.go (86%)
rename {applications => apps}/transfer/keeper/relay.go (98%)
rename {applications => apps}/transfer/keeper/relay_model/account.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/account_record.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/apalache-to-relay-test.json (100%)
rename {applications => apps}/transfer/keeper/relay_model/apalache-to-relay-test2.json (100%)
rename {applications => apps}/transfer/keeper/relay_model/denom.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/denom_record.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/denom_record2.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/denom_sequence.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/identifiers.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/relay.tla (100%)
rename {applications => apps}/transfer/keeper/relay_model/relay_tests.tla (100%)
rename {applications => apps}/transfer/keeper/relay_test.go (97%)
rename {applications => apps}/transfer/module.go (96%)
rename {applications => apps}/transfer/module_test.go (95%)
rename {applications => apps}/transfer/simulation/decoder.go (93%)
rename {applications => apps}/transfer/simulation/decoder_test.go (90%)
rename {applications => apps}/transfer/simulation/genesis.go (95%)
rename {applications => apps}/transfer/simulation/genesis_test.go (94%)
rename {applications => apps}/transfer/simulation/params.go (93%)
rename {applications => apps}/transfer/simulation/params_test.go (91%)
rename {applications => apps}/transfer/spec/01_concepts.md (100%)
rename {applications => apps}/transfer/spec/02_state.md (100%)
rename {applications => apps}/transfer/spec/03_state_transitions.md (100%)
rename {applications => apps}/transfer/spec/04_messages.md (100%)
rename {applications => apps}/transfer/spec/05_events.md (100%)
rename {applications => apps}/transfer/spec/06_metrics.md (100%)
rename {applications => apps}/transfer/spec/07_params.md (100%)
rename {applications => apps}/transfer/spec/README.md (100%)
rename {applications => apps}/transfer/types/codec.go (100%)
rename {applications => apps}/transfer/types/coin.go (100%)
rename {applications => apps}/transfer/types/errors.go (100%)
rename {applications => apps}/transfer/types/events.go (100%)
rename {applications => apps}/transfer/types/expected_keepers.go (90%)
rename {applications => apps}/transfer/types/genesis.go (93%)
rename {applications => apps}/transfer/types/genesis_test.go (91%)
rename {applications => apps}/transfer/types/keys.go (100%)
rename {applications => apps}/transfer/types/keys_test.go (88%)
rename {applications => apps}/transfer/types/msgs.go (95%)
rename {applications => apps}/transfer/types/msgs_test.go (98%)
rename {applications => apps}/transfer/types/packet.go (100%)
rename {applications => apps}/transfer/types/packet_test.go (100%)
rename {applications => apps}/transfer/types/params.go (100%)
rename {applications => apps}/transfer/types/params_test.go (100%)
rename {applications => apps}/transfer/types/trace.go (99%)
rename {applications => apps}/transfer/types/trace_test.go (100%)
diff --git a/applications/transfer/types/genesis.pb.go b/applications/transfer/types/genesis.pb.go
deleted file mode 100644
index 3ae0442f..00000000
--- a/applications/transfer/types/genesis.pb.go
+++ /dev/null
@@ -1,443 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/applications/transfer/v1/genesis.proto
-
-package types
-
-import (
- fmt "fmt"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// GenesisState defines the ibc-transfer genesis state
-type GenesisState struct {
- PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"`
- DenomTraces Traces `protobuf:"bytes,2,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces" yaml:"denom_traces"`
- Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params"`
-}
-
-func (m *GenesisState) Reset() { *m = GenesisState{} }
-func (m *GenesisState) String() string { return proto.CompactTextString(m) }
-func (*GenesisState) ProtoMessage() {}
-func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_a4f788affd5bea89, []int{0}
-}
-func (m *GenesisState) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *GenesisState) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GenesisState.Merge(m, src)
-}
-func (m *GenesisState) XXX_Size() int {
- return m.Size()
-}
-func (m *GenesisState) XXX_DiscardUnknown() {
- xxx_messageInfo_GenesisState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GenesisState proto.InternalMessageInfo
-
-func (m *GenesisState) GetPortId() string {
- if m != nil {
- return m.PortId
- }
- return ""
-}
-
-func (m *GenesisState) GetDenomTraces() Traces {
- if m != nil {
- return m.DenomTraces
- }
- return nil
-}
-
-func (m *GenesisState) GetParams() Params {
- if m != nil {
- return m.Params
- }
- return Params{}
-}
-
-func init() {
- proto.RegisterType((*GenesisState)(nil), "ibc.applications.transfer.v1.GenesisState")
-}
-
-func init() {
- proto.RegisterFile("ibc/applications/transfer/v1/genesis.proto", fileDescriptor_a4f788affd5bea89)
-}
-
-var fileDescriptor_a4f788affd5bea89 = []byte{
- // 317 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xca, 0x4c, 0x4a, 0xd6,
- 0x4f, 0x2c, 0x28, 0xc8, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2f, 0x29, 0x4a,
- 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c,
- 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc9, 0x4c, 0x4a, 0xd6, 0x43, 0x56, 0xab, 0x07,
- 0x53, 0xab, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xa8, 0x0f, 0x62, 0x41,
- 0xf4, 0x48, 0x69, 0xe3, 0x35, 0x1f, 0xae, 0x1f, 0xac, 0x58, 0xe9, 0x33, 0x23, 0x17, 0x8f, 0x3b,
- 0xc4, 0xca, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0x21, 0x6d, 0x2e, 0xf6, 0x82, 0xfc, 0xa2, 0x92, 0xf8,
- 0xcc, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xa1, 0x4f, 0xf7, 0xe4, 0xf9, 0x2a, 0x13,
- 0x73, 0x73, 0xac, 0x94, 0xa0, 0x12, 0x4a, 0x41, 0x6c, 0x20, 0x96, 0x67, 0x8a, 0x50, 0x11, 0x17,
- 0x4f, 0x4a, 0x6a, 0x5e, 0x7e, 0x6e, 0x7c, 0x49, 0x51, 0x62, 0x72, 0x6a, 0xb1, 0x04, 0x93, 0x02,
- 0xb3, 0x06, 0xb7, 0x91, 0x86, 0x1e, 0x3e, 0x57, 0xeb, 0xb9, 0x80, 0x74, 0x84, 0x80, 0x34, 0x38,
- 0xa9, 0x9e, 0xb8, 0x27, 0xcf, 0xf0, 0xe9, 0x9e, 0xbc, 0x30, 0xc4, 0x7c, 0x64, 0xb3, 0x94, 0x56,
- 0xdd, 0x97, 0x67, 0x03, 0xab, 0x2a, 0x0e, 0xe2, 0x4e, 0x81, 0x6b, 0x29, 0x16, 0x72, 0xe2, 0x62,
- 0x2b, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x96, 0x60, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xc1, 0x6f,
- 0x5b, 0x00, 0x58, 0xad, 0x13, 0x0b, 0xc8, 0xa6, 0x20, 0xa8, 0x4e, 0xa7, 0x88, 0x13, 0x8f, 0xe4,
- 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f,
- 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b,
- 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0x2f, 0xce, 0xcd, 0x2f, 0x86, 0x52, 0xba, 0xc5, 0x29, 0xd9, 0xfa,
- 0x15, 0xfa, 0xb8, 0xc3, 0xb6, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x1c, 0xac, 0xc6, 0x80,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0xbb, 0x81, 0x1e, 0xe5, 0x01, 0x00, 0x00,
-}
-
-func (m *GenesisState) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenesis(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- if len(m.DenomTraces) > 0 {
- for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenesis(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.PortId) > 0 {
- i -= len(m.PortId)
- copy(dAtA[i:], m.PortId)
- i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenesis(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *GenesisState) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.PortId)
- if l > 0 {
- n += 1 + l + sovGenesis(uint64(l))
- }
- if len(m.DenomTraces) > 0 {
- for _, e := range m.DenomTraces {
- l = e.Size()
- n += 1 + l + sovGenesis(uint64(l))
- }
- }
- l = m.Params.Size()
- n += 1 + l + sovGenesis(uint64(l))
- return n
-}
-
-func sovGenesis(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenesis(x uint64) (n int) {
- return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *GenesisState) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenesis
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenesis
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PortId = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenesis
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenesis
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DenomTraces = append(m.DenomTraces, DenomTrace{})
- if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenesis
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenesis
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenesis(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenesis
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipGenesis(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenesis
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthGenesis
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupGenesis
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthGenesis
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/applications/transfer/types/query.pb.go b/applications/transfer/types/query.pb.go
deleted file mode 100644
index 1c1d6929..00000000
--- a/applications/transfer/types/query.pb.go
+++ /dev/null
@@ -1,1418 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/applications/transfer/v1/query.proto
-
-package types
-
-import (
- context "context"
- fmt "fmt"
- query "github.com/cosmos/cosmos-sdk/types/query"
- _ "github.com/gogo/protobuf/gogoproto"
- grpc1 "github.com/gogo/protobuf/grpc"
- proto "github.com/gogo/protobuf/proto"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
-// method
-type QueryDenomTraceRequest struct {
- // hash (in hex format) of the denomination trace information.
- Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
-}
-
-func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{} }
-func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) }
-func (*QueryDenomTraceRequest) ProtoMessage() {}
-func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_a638e2800a01538c, []int{0}
-}
-func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueryDenomTraceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_QueryDenomTraceRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *QueryDenomTraceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryDenomTraceRequest.Merge(m, src)
-}
-func (m *QueryDenomTraceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *QueryDenomTraceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryDenomTraceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryDenomTraceRequest proto.InternalMessageInfo
-
-func (m *QueryDenomTraceRequest) GetHash() string {
- if m != nil {
- return m.Hash
- }
- return ""
-}
-
-// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
-// method.
-type QueryDenomTraceResponse struct {
- // denom_trace returns the requested denomination trace information.
- DenomTrace *DenomTrace `protobuf:"bytes,1,opt,name=denom_trace,json=denomTrace,proto3" json:"denom_trace,omitempty"`
-}
-
-func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse{} }
-func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) }
-func (*QueryDenomTraceResponse) ProtoMessage() {}
-func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_a638e2800a01538c, []int{1}
-}
-func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueryDenomTraceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_QueryDenomTraceResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *QueryDenomTraceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryDenomTraceResponse.Merge(m, src)
-}
-func (m *QueryDenomTraceResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *QueryDenomTraceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryDenomTraceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryDenomTraceResponse proto.InternalMessageInfo
-
-func (m *QueryDenomTraceResponse) GetDenomTrace() *DenomTrace {
- if m != nil {
- return m.DenomTrace
- }
- return nil
-}
-
-// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
-// method
-type QueryDenomTracesRequest struct {
- // pagination defines an optional pagination for the request.
- Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"`
-}
-
-func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest{} }
-func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) }
-func (*QueryDenomTracesRequest) ProtoMessage() {}
-func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_a638e2800a01538c, []int{2}
-}
-func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueryDenomTracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_QueryDenomTracesRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *QueryDenomTracesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryDenomTracesRequest.Merge(m, src)
-}
-func (m *QueryDenomTracesRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *QueryDenomTracesRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryDenomTracesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryDenomTracesRequest proto.InternalMessageInfo
-
-func (m *QueryDenomTracesRequest) GetPagination() *query.PageRequest {
- if m != nil {
- return m.Pagination
- }
- return nil
-}
-
-// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
-// method.
-type QueryDenomTracesResponse struct {
- // denom_traces returns all denominations trace information.
- DenomTraces Traces `protobuf:"bytes,1,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces"`
- // pagination defines the pagination in the response.
- Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"`
-}
-
-func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesResponse{} }
-func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) }
-func (*QueryDenomTracesResponse) ProtoMessage() {}
-func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_a638e2800a01538c, []int{3}
-}
-func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueryDenomTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_QueryDenomTracesResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *QueryDenomTracesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryDenomTracesResponse.Merge(m, src)
-}
-func (m *QueryDenomTracesResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *QueryDenomTracesResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryDenomTracesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryDenomTracesResponse proto.InternalMessageInfo
-
-func (m *QueryDenomTracesResponse) GetDenomTraces() Traces {
- if m != nil {
- return m.DenomTraces
- }
- return nil
-}
-
-func (m *QueryDenomTracesResponse) GetPagination() *query.PageResponse {
- if m != nil {
- return m.Pagination
- }
- return nil
-}
-
-// QueryParamsRequest is the request type for the Query/Params RPC method.
-type QueryParamsRequest struct {
-}
-
-func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} }
-func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) }
-func (*QueryParamsRequest) ProtoMessage() {}
-func (*QueryParamsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_a638e2800a01538c, []int{4}
-}
-func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *QueryParamsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryParamsRequest.Merge(m, src)
-}
-func (m *QueryParamsRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *QueryParamsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo
-
-// QueryParamsResponse is the response type for the Query/Params RPC method.
-type QueryParamsResponse struct {
- // params defines the parameters of the module.
- Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"`
-}
-
-func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} }
-func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) }
-func (*QueryParamsResponse) ProtoMessage() {}
-func (*QueryParamsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_a638e2800a01538c, []int{5}
-}
-func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *QueryParamsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryParamsResponse.Merge(m, src)
-}
-func (m *QueryParamsResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *QueryParamsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo
-
-func (m *QueryParamsResponse) GetParams() *Params {
- if m != nil {
- return m.Params
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTraceRequest")
- proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTraceResponse")
- proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTracesRequest")
- proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTracesResponse")
- proto.RegisterType((*QueryParamsRequest)(nil), "ibc.applications.transfer.v1.QueryParamsRequest")
- proto.RegisterType((*QueryParamsResponse)(nil), "ibc.applications.transfer.v1.QueryParamsResponse")
-}
-
-func init() {
- proto.RegisterFile("ibc/applications/transfer/v1/query.proto", fileDescriptor_a638e2800a01538c)
-}
-
-var fileDescriptor_a638e2800a01538c = []byte{
- // 528 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x3f, 0x6f, 0xd3, 0x40,
- 0x14, 0xcf, 0x95, 0x12, 0x89, 0x17, 0xc4, 0x70, 0x54, 0x10, 0x59, 0x95, 0x5b, 0x59, 0x08, 0x02,
- 0x85, 0x3b, 0x5c, 0xa0, 0x30, 0xa0, 0x0e, 0x15, 0x02, 0xb1, 0x95, 0xc0, 0x80, 0x60, 0x40, 0x67,
- 0xe7, 0x70, 0x2c, 0x1a, 0x9f, 0xeb, 0xbb, 0x44, 0x54, 0x88, 0x85, 0x4f, 0x80, 0xc4, 0x8e, 0x98,
- 0xd9, 0x19, 0xd8, 0x18, 0x3b, 0x56, 0x62, 0x61, 0x02, 0x94, 0xf0, 0x41, 0x90, 0xef, 0xce, 0x8d,
- 0xa3, 0x20, 0x13, 0x4f, 0x39, 0x5d, 0xde, 0xef, 0xfd, 0xfe, 0xbc, 0xe7, 0x83, 0x4e, 0x1c, 0x84,
- 0x94, 0xa5, 0xe9, 0x5e, 0x1c, 0x32, 0x15, 0x8b, 0x44, 0x52, 0x95, 0xb1, 0x44, 0xbe, 0xe4, 0x19,
- 0x1d, 0xf9, 0x74, 0x7f, 0xc8, 0xb3, 0x03, 0x92, 0x66, 0x42, 0x09, 0xbc, 0x1a, 0x07, 0x21, 0x29,
- 0x57, 0x92, 0xa2, 0x92, 0x8c, 0x7c, 0x67, 0x25, 0x12, 0x91, 0xd0, 0x85, 0x34, 0x3f, 0x19, 0x8c,
- 0x73, 0x25, 0x14, 0x72, 0x20, 0x24, 0x0d, 0x98, 0xe4, 0xa6, 0x19, 0x1d, 0xf9, 0x01, 0x57, 0xcc,
- 0xa7, 0x29, 0x8b, 0xe2, 0x44, 0x37, 0xb2, 0xb5, 0x1b, 0x95, 0x4a, 0x8e, 0xb9, 0x4c, 0xf1, 0x6a,
- 0x24, 0x44, 0xb4, 0xc7, 0x29, 0x4b, 0x63, 0xca, 0x92, 0x44, 0x28, 0x2b, 0x49, 0xff, 0xeb, 0x5d,
- 0x85, 0x73, 0x8f, 0x72, 0xb2, 0x7b, 0x3c, 0x11, 0x83, 0x27, 0x19, 0x0b, 0x79, 0x97, 0xef, 0x0f,
- 0xb9, 0x54, 0x18, 0xc3, 0x72, 0x9f, 0xc9, 0x7e, 0x1b, 0xad, 0xa3, 0xce, 0xa9, 0xae, 0x3e, 0x7b,
- 0x3d, 0x38, 0x3f, 0x57, 0x2d, 0x53, 0x91, 0x48, 0x8e, 0x1f, 0x42, 0xab, 0x97, 0xdf, 0xbe, 0x50,
- 0xf9, 0xb5, 0x46, 0xb5, 0x36, 0x3b, 0xa4, 0x2a, 0x09, 0x52, 0x6a, 0x03, 0xbd, 0xe3, 0xb3, 0xc7,
- 0xe6, 0x58, 0x64, 0x21, 0xea, 0x3e, 0xc0, 0x34, 0x0d, 0x4b, 0x72, 0x91, 0x98, 0xe8, 0x48, 0x1e,
- 0x1d, 0x31, 0x73, 0xb0, 0xd1, 0x91, 0x5d, 0x16, 0x15, 0x86, 0xba, 0x25, 0xa4, 0xf7, 0x0d, 0x41,
- 0x7b, 0x9e, 0xc3, 0x5a, 0x79, 0x0e, 0xa7, 0x4b, 0x56, 0x64, 0x1b, 0xad, 0x9f, 0xa8, 0xe3, 0x65,
- 0xe7, 0xcc, 0xe1, 0xcf, 0xb5, 0xc6, 0xe7, 0x5f, 0x6b, 0x4d, 0xdb, 0xb7, 0x35, 0xf5, 0x26, 0xf1,
- 0x83, 0x19, 0x07, 0x4b, 0xda, 0xc1, 0xa5, 0xff, 0x3a, 0x30, 0xca, 0x66, 0x2c, 0xac, 0x00, 0xd6,
- 0x0e, 0x76, 0x59, 0xc6, 0x06, 0x45, 0x40, 0xde, 0x63, 0x38, 0x3b, 0x73, 0x6b, 0x2d, 0xdd, 0x85,
- 0x66, 0xaa, 0x6f, 0x6c, 0x66, 0x17, 0xaa, 0xcd, 0x58, 0xb4, 0xc5, 0x6c, 0x7e, 0x5c, 0x86, 0x93,
- 0xba, 0x2b, 0xfe, 0x8a, 0x00, 0xa6, 0x4e, 0xf1, 0xcd, 0xea, 0x36, 0xff, 0xde, 0x2c, 0xe7, 0x56,
- 0x4d, 0x94, 0xf1, 0xe0, 0x6d, 0xbf, 0xfb, 0xfe, 0xe7, 0xc3, 0xd2, 0x1d, 0xbc, 0x45, 0xab, 0xd6,
- 0xdf, 0x7c, 0x32, 0xe5, 0xf9, 0xd1, 0x37, 0xf9, 0xee, 0xbe, 0xc5, 0x5f, 0x10, 0xb4, 0x4a, 0xe3,
- 0xc6, 0xf5, 0x64, 0x14, 0x09, 0x3b, 0x5b, 0x75, 0x61, 0x56, 0xfe, 0x6d, 0x2d, 0xdf, 0xc7, 0xb4,
- 0xa6, 0x7c, 0xfc, 0x09, 0x41, 0xd3, 0x0c, 0x04, 0x5f, 0x5f, 0x80, 0x7b, 0x66, 0x1f, 0x1c, 0xbf,
- 0x06, 0xc2, 0x0a, 0xf5, 0xb5, 0xd0, 0x0d, 0x7c, 0x79, 0x01, 0xa1, 0x66, 0x41, 0x76, 0x9e, 0x1e,
- 0x8e, 0x5d, 0x74, 0x34, 0x76, 0xd1, 0xef, 0xb1, 0x8b, 0xde, 0x4f, 0xdc, 0xc6, 0xd1, 0xc4, 0x6d,
- 0xfc, 0x98, 0xb8, 0x8d, 0x67, 0xdb, 0x51, 0xac, 0xfa, 0xc3, 0x80, 0x84, 0x62, 0x40, 0xed, 0x0b,
- 0x67, 0x7e, 0xae, 0xc9, 0xde, 0x2b, 0xfa, 0xba, 0x82, 0x42, 0x1d, 0xa4, 0x5c, 0x06, 0x4d, 0xfd,
- 0x4c, 0xdd, 0xf8, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x7f, 0xfe, 0xbd, 0x7d, 0x05, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// QueryClient is the client API for Query service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type QueryClient interface {
- // DenomTrace queries a denomination trace information.
- DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error)
- // DenomTraces queries all denomination traces.
- DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error)
- // Params queries all parameters of the ibc-transfer module.
- Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
-}
-
-type queryClient struct {
- cc grpc1.ClientConn
-}
-
-func NewQueryClient(cc grpc1.ClientConn) QueryClient {
- return &queryClient{cc}
-}
-
-func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) {
- out := new(QueryDenomTraceResponse)
- err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTrace", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) {
- out := new(QueryDenomTracesResponse)
- err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTraces", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) {
- out := new(QueryParamsResponse)
- err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/Params", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// QueryServer is the server API for Query service.
-type QueryServer interface {
- // DenomTrace queries a denomination trace information.
- DenomTrace(context.Context, *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error)
- // DenomTraces queries all denomination traces.
- DenomTraces(context.Context, *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error)
- // Params queries all parameters of the ibc-transfer module.
- Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error)
-}
-
-// UnimplementedQueryServer can be embedded to have forward compatible implementations.
-type UnimplementedQueryServer struct {
-}
-
-func (*UnimplementedQueryServer) DenomTrace(ctx context.Context, req *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DenomTrace not implemented")
-}
-func (*UnimplementedQueryServer) DenomTraces(ctx context.Context, req *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DenomTraces not implemented")
-}
-func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
-}
-
-func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
- s.RegisterService(&_Query_serviceDesc, srv)
-}
-
-func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueryDenomTraceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(QueryServer).DenomTrace(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/ibc.applications.transfer.v1.Query/DenomTrace",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueryDenomTracesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(QueryServer).DenomTraces(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/ibc.applications.transfer.v1.Query/DenomTraces",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueryParamsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(QueryServer).Params(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/ibc.applications.transfer.v1.Query/Params",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Query_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibc.applications.transfer.v1.Query",
- HandlerType: (*QueryServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "DenomTrace",
- Handler: _Query_DenomTrace_Handler,
- },
- {
- MethodName: "DenomTraces",
- Handler: _Query_DenomTraces_Handler,
- },
- {
- MethodName: "Params",
- Handler: _Query_Params_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "ibc/applications/transfer/v1/query.proto",
-}
-
-func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *QueryDenomTraceRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *QueryDenomTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Hash) > 0 {
- i -= len(m.Hash)
- copy(dAtA[i:], m.Hash)
- i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *QueryDenomTraceResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *QueryDenomTraceResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *QueryDenomTraceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.DenomTrace != nil {
- {
- size, err := m.DenomTrace.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQuery(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *QueryDenomTracesRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *QueryDenomTracesRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *QueryDenomTracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Pagination != nil {
- {
- size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQuery(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *QueryDenomTracesResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *QueryDenomTracesResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *QueryDenomTracesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Pagination != nil {
- {
- size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQuery(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if len(m.DenomTraces) > 0 {
- for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQuery(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- return len(dAtA) - i, nil
-}
-
-func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.Params != nil {
- {
- size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintQuery(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
- offset -= sovQuery(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *QueryDenomTraceRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Hash)
- if l > 0 {
- n += 1 + l + sovQuery(uint64(l))
- }
- return n
-}
-
-func (m *QueryDenomTraceResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.DenomTrace != nil {
- l = m.DenomTrace.Size()
- n += 1 + l + sovQuery(uint64(l))
- }
- return n
-}
-
-func (m *QueryDenomTracesRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Pagination != nil {
- l = m.Pagination.Size()
- n += 1 + l + sovQuery(uint64(l))
- }
- return n
-}
-
-func (m *QueryDenomTracesResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.DenomTraces) > 0 {
- for _, e := range m.DenomTraces {
- l = e.Size()
- n += 1 + l + sovQuery(uint64(l))
- }
- }
- if m.Pagination != nil {
- l = m.Pagination.Size()
- n += 1 + l + sovQuery(uint64(l))
- }
- return n
-}
-
-func (m *QueryParamsRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- return n
-}
-
-func (m *QueryParamsResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Params != nil {
- l = m.Params.Size()
- n += 1 + l + sovQuery(uint64(l))
- }
- return n
-}
-
-func sovQuery(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozQuery(x uint64) (n int) {
- return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *QueryDenomTraceRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryDenomTraceRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryDenomTraceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthQuery
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthQuery
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Hash = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQuery(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthQuery
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *QueryDenomTraceResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryDenomTraceResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryDenomTraceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DenomTrace", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQuery
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQuery
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DenomTrace == nil {
- m.DenomTrace = &DenomTrace{}
- }
- if err := m.DenomTrace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQuery(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthQuery
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *QueryDenomTracesRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryDenomTracesRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryDenomTracesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQuery
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQuery
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Pagination == nil {
- m.Pagination = &query.PageRequest{}
- }
- if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQuery(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthQuery
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *QueryDenomTracesResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryDenomTracesResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryDenomTracesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQuery
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQuery
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.DenomTraces = append(m.DenomTraces, DenomTrace{})
- if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQuery
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQuery
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Pagination == nil {
- m.Pagination = &query.PageResponse{}
- }
- if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQuery(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthQuery
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipQuery(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthQuery
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthQuery
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthQuery
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Params == nil {
- m.Params = &Params{}
- }
- if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipQuery(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthQuery
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipQuery(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthQuery
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupQuery
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthQuery
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/applications/transfer/types/query.pb.gw.go b/applications/transfer/types/query.pb.gw.go
deleted file mode 100644
index 007ed668..00000000
--- a/applications/transfer/types/query.pb.gw.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: ibc/applications/transfer/v1/query.proto
-
-/*
-Package types is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package types
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/descriptor"
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// Suppress "imported and not used" errors
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-var _ = descriptor.ForMessage
-
-func request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryDenomTraceRequest
- var metadata runtime.ServerMetadata
-
- var (
- val string
- ok bool
- err error
- _ = err
- )
-
- val, ok = pathParams["hash"]
- if !ok {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash")
- }
-
- protoReq.Hash, err = runtime.String(val)
-
- if err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err)
- }
-
- msg, err := client.DenomTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func local_request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryDenomTraceRequest
- var metadata runtime.ServerMetadata
-
- var (
- val string
- ok bool
- err error
- _ = err
- )
-
- val, ok = pathParams["hash"]
- if !ok {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash")
- }
-
- protoReq.Hash, err = runtime.String(val)
-
- if err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err)
- }
-
- msg, err := server.DenomTrace(ctx, &protoReq)
- return msg, metadata, err
-
-}
-
-var (
- filter_Query_DenomTraces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
-)
-
-func request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryDenomTracesRequest
- var metadata runtime.ServerMetadata
-
- if err := req.ParseForm(); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
- if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.DenomTraces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func local_request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryDenomTracesRequest
- var metadata runtime.ServerMetadata
-
- if err := req.ParseForm(); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
- if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := server.DenomTraces(ctx, &protoReq)
- return msg, metadata, err
-
-}
-
-func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryParamsRequest
- var metadata runtime.ServerMetadata
-
- msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq QueryParamsRequest
- var metadata runtime.ServerMetadata
-
- msg, err := server.Params(ctx, &protoReq)
- return msg, metadata, err
-
-}
-
-// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
-// UnaryRPC :call QueryServer directly.
-// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
-// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead.
-func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error {
-
- mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := local_request_Query_DenomTrace_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := local_request_Query_DenomTraces_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterQueryHandler(ctx, mux, conn)
-}
-
-// RegisterQueryHandler registers the http handlers for service Query to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn))
-}
-
-// RegisterQueryHandlerClient registers the http handlers for service Query
-// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "QueryClient" to call the correct interceptors.
-func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error {
-
- mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Query_DenomTrace_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Query_DenomTraces_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Query_DenomTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "applications", "transfer", "v1beta1", "denom_traces", "hash"}, "", runtime.AssumeColonVerbOpt(false)))
-
- pattern_Query_DenomTraces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "applications", "transfer", "v1beta1", "denom_traces"}, "", runtime.AssumeColonVerbOpt(false)))
-
- pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "applications", "transfer", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(false)))
-)
-
-var (
- forward_Query_DenomTrace_0 = runtime.ForwardResponseMessage
-
- forward_Query_DenomTraces_0 = runtime.ForwardResponseMessage
-
- forward_Query_Params_0 = runtime.ForwardResponseMessage
-)
diff --git a/applications/transfer/types/transfer.pb.go b/applications/transfer/types/transfer.pb.go
deleted file mode 100644
index 62734b85..00000000
--- a/applications/transfer/types/transfer.pb.go
+++ /dev/null
@@ -1,909 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/applications/transfer/v1/transfer.proto
-
-package types
-
-import (
- fmt "fmt"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/gogo/protobuf/proto"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// FungibleTokenPacketData defines a struct for the packet payload
-// See FungibleTokenPacketData spec:
-// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
-type FungibleTokenPacketData struct {
- // the token denomination to be transferred
- Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"`
- // the token amount to be transferred
- Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"`
- // the sender address
- Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"`
- // the recipient address on the destination chain
- Receiver string `protobuf:"bytes,4,opt,name=receiver,proto3" json:"receiver,omitempty"`
-}
-
-func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData{} }
-func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) }
-func (*FungibleTokenPacketData) ProtoMessage() {}
-func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) {
- return fileDescriptor_5041673e96e97901, []int{0}
-}
-func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FungibleTokenPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_FungibleTokenPacketData.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *FungibleTokenPacketData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FungibleTokenPacketData.Merge(m, src)
-}
-func (m *FungibleTokenPacketData) XXX_Size() int {
- return m.Size()
-}
-func (m *FungibleTokenPacketData) XXX_DiscardUnknown() {
- xxx_messageInfo_FungibleTokenPacketData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FungibleTokenPacketData proto.InternalMessageInfo
-
-func (m *FungibleTokenPacketData) GetDenom() string {
- if m != nil {
- return m.Denom
- }
- return ""
-}
-
-func (m *FungibleTokenPacketData) GetAmount() uint64 {
- if m != nil {
- return m.Amount
- }
- return 0
-}
-
-func (m *FungibleTokenPacketData) GetSender() string {
- if m != nil {
- return m.Sender
- }
- return ""
-}
-
-func (m *FungibleTokenPacketData) GetReceiver() string {
- if m != nil {
- return m.Receiver
- }
- return ""
-}
-
-// DenomTrace contains the base denomination for ICS20 fungible tokens and the
-// source tracing information path.
-type DenomTrace struct {
- // path defines the chain of port/channel identifiers used for tracing the
- // source of the fungible token.
- Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
- // base denomination of the relayed fungible token.
- BaseDenom string `protobuf:"bytes,2,opt,name=base_denom,json=baseDenom,proto3" json:"base_denom,omitempty"`
-}
-
-func (m *DenomTrace) Reset() { *m = DenomTrace{} }
-func (m *DenomTrace) String() string { return proto.CompactTextString(m) }
-func (*DenomTrace) ProtoMessage() {}
-func (*DenomTrace) Descriptor() ([]byte, []int) {
- return fileDescriptor_5041673e96e97901, []int{1}
-}
-func (m *DenomTrace) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DenomTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DenomTrace.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DenomTrace) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DenomTrace.Merge(m, src)
-}
-func (m *DenomTrace) XXX_Size() int {
- return m.Size()
-}
-func (m *DenomTrace) XXX_DiscardUnknown() {
- xxx_messageInfo_DenomTrace.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DenomTrace proto.InternalMessageInfo
-
-func (m *DenomTrace) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func (m *DenomTrace) GetBaseDenom() string {
- if m != nil {
- return m.BaseDenom
- }
- return ""
-}
-
-// Params defines the set of IBC transfer parameters.
-// NOTE: To prevent a single token from being transferred, set the
-// TransfersEnabled parameter to true and then set the bank module's SendEnabled
-// parameter for the denomination to false.
-type Params struct {
- // send_enabled enables or disables all cross-chain token transfers from this
- // chain.
- SendEnabled bool `protobuf:"varint,1,opt,name=send_enabled,json=sendEnabled,proto3" json:"send_enabled,omitempty" yaml:"send_enabled"`
- // receive_enabled enables or disables all cross-chain token transfers to this
- // chain.
- ReceiveEnabled bool `protobuf:"varint,2,opt,name=receive_enabled,json=receiveEnabled,proto3" json:"receive_enabled,omitempty" yaml:"receive_enabled"`
-}
-
-func (m *Params) Reset() { *m = Params{} }
-func (m *Params) String() string { return proto.CompactTextString(m) }
-func (*Params) ProtoMessage() {}
-func (*Params) Descriptor() ([]byte, []int) {
- return fileDescriptor_5041673e96e97901, []int{2}
-}
-func (m *Params) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Params.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Params) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Params.Merge(m, src)
-}
-func (m *Params) XXX_Size() int {
- return m.Size()
-}
-func (m *Params) XXX_DiscardUnknown() {
- xxx_messageInfo_Params.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Params proto.InternalMessageInfo
-
-func (m *Params) GetSendEnabled() bool {
- if m != nil {
- return m.SendEnabled
- }
- return false
-}
-
-func (m *Params) GetReceiveEnabled() bool {
- if m != nil {
- return m.ReceiveEnabled
- }
- return false
-}
-
-func init() {
- proto.RegisterType((*FungibleTokenPacketData)(nil), "ibc.applications.transfer.v1.FungibleTokenPacketData")
- proto.RegisterType((*DenomTrace)(nil), "ibc.applications.transfer.v1.DenomTrace")
- proto.RegisterType((*Params)(nil), "ibc.applications.transfer.v1.Params")
-}
-
-func init() {
- proto.RegisterFile("ibc/applications/transfer/v1/transfer.proto", fileDescriptor_5041673e96e97901)
-}
-
-var fileDescriptor_5041673e96e97901 = []byte{
- // 362 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x41, 0x6b, 0xe2, 0x40,
- 0x14, 0xc7, 0x8d, 0xeb, 0x8a, 0xce, 0x2e, 0xbb, 0x30, 0x2b, 0x1a, 0x64, 0x1b, 0x25, 0x27, 0xa1,
- 0x34, 0x41, 0x7a, 0xf3, 0xd0, 0x82, 0xb5, 0x3d, 0x4b, 0xf0, 0x50, 0x7a, 0x91, 0xc9, 0xe4, 0x35,
- 0x06, 0x93, 0x99, 0x30, 0x33, 0x4a, 0xa5, 0x9f, 0xa0, 0xb7, 0x7e, 0xac, 0x1e, 0x3d, 0xf6, 0x24,
- 0x45, 0xbf, 0x81, 0x9f, 0xa0, 0x64, 0x12, 0x82, 0x14, 0x7a, 0x9a, 0xf7, 0x7b, 0xef, 0xff, 0xff,
- 0xcf, 0x83, 0x87, 0xce, 0x23, 0x9f, 0xba, 0x24, 0x4d, 0xe3, 0x88, 0x12, 0x15, 0x71, 0x26, 0x5d,
- 0x25, 0x08, 0x93, 0x8f, 0x20, 0xdc, 0xf5, 0xb0, 0xac, 0x9d, 0x54, 0x70, 0xc5, 0xf1, 0xff, 0xc8,
- 0xa7, 0xce, 0xa9, 0xd8, 0x29, 0x05, 0xeb, 0x61, 0xb7, 0x15, 0xf2, 0x90, 0x6b, 0xa1, 0x9b, 0x55,
- 0xb9, 0xc7, 0x7e, 0x46, 0x9d, 0xbb, 0x15, 0x0b, 0x23, 0x3f, 0x86, 0x19, 0x5f, 0x02, 0x9b, 0x12,
- 0xba, 0x04, 0x35, 0x21, 0x8a, 0xe0, 0x16, 0xfa, 0x19, 0x00, 0xe3, 0x89, 0x69, 0xf4, 0x8d, 0x41,
- 0xd3, 0xcb, 0x01, 0xb7, 0x51, 0x9d, 0x24, 0x7c, 0xc5, 0x94, 0x59, 0xed, 0x1b, 0x83, 0x9a, 0x57,
- 0x50, 0xd6, 0x97, 0xc0, 0x02, 0x10, 0xe6, 0x0f, 0x2d, 0x2f, 0x08, 0x77, 0x51, 0x43, 0x00, 0x85,
- 0x68, 0x0d, 0xc2, 0xac, 0xe9, 0x49, 0xc9, 0xf6, 0x35, 0x42, 0x93, 0x2c, 0x74, 0x26, 0x08, 0x05,
- 0x8c, 0x51, 0x2d, 0x25, 0x6a, 0x51, 0x7c, 0xa7, 0x6b, 0x7c, 0x86, 0x90, 0x4f, 0x24, 0xcc, 0xf3,
- 0x45, 0xaa, 0x7a, 0xd2, 0xcc, 0x3a, 0xda, 0x67, 0xbf, 0x18, 0xa8, 0x3e, 0x25, 0x82, 0x24, 0x12,
- 0x8f, 0xd0, 0xef, 0xec, 0xc7, 0x39, 0x30, 0xe2, 0xc7, 0x10, 0xe8, 0x94, 0xc6, 0xb8, 0x73, 0xdc,
- 0xf5, 0xfe, 0x6d, 0x48, 0x12, 0x8f, 0xec, 0xd3, 0xa9, 0xed, 0xfd, 0xca, 0xf0, 0x36, 0x27, 0x7c,
- 0x83, 0xfe, 0x16, 0x3b, 0x95, 0xf6, 0xaa, 0xb6, 0x77, 0x8f, 0xbb, 0x5e, 0x3b, 0xb7, 0x7f, 0x11,
- 0xd8, 0xde, 0x9f, 0xa2, 0x53, 0x84, 0x8c, 0xef, 0xdf, 0xf6, 0x96, 0xb1, 0xdd, 0x5b, 0xc6, 0xc7,
- 0xde, 0x32, 0x5e, 0x0f, 0x56, 0x65, 0x7b, 0xb0, 0x2a, 0xef, 0x07, 0xab, 0xf2, 0x70, 0x15, 0x46,
- 0x6a, 0xb1, 0xf2, 0x1d, 0xca, 0x13, 0x97, 0x72, 0x99, 0x70, 0x59, 0x3c, 0x17, 0x32, 0x58, 0xba,
- 0x4f, 0xee, 0xf7, 0x37, 0x56, 0x9b, 0x14, 0xa4, 0x5f, 0xd7, 0xa7, 0xba, 0xfc, 0x0c, 0x00, 0x00,
- 0xff, 0xff, 0x46, 0x73, 0x85, 0x0b, 0x0d, 0x02, 0x00, 0x00,
-}
-
-func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *FungibleTokenPacketData) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *FungibleTokenPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Receiver) > 0 {
- i -= len(m.Receiver)
- copy(dAtA[i:], m.Receiver)
- i = encodeVarintTransfer(dAtA, i, uint64(len(m.Receiver)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Sender) > 0 {
- i -= len(m.Sender)
- copy(dAtA[i:], m.Sender)
- i = encodeVarintTransfer(dAtA, i, uint64(len(m.Sender)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Amount != 0 {
- i = encodeVarintTransfer(dAtA, i, uint64(m.Amount))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Denom) > 0 {
- i -= len(m.Denom)
- copy(dAtA[i:], m.Denom)
- i = encodeVarintTransfer(dAtA, i, uint64(len(m.Denom)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DenomTrace) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DenomTrace) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DenomTrace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.BaseDenom) > 0 {
- i -= len(m.BaseDenom)
- copy(dAtA[i:], m.BaseDenom)
- i = encodeVarintTransfer(dAtA, i, uint64(len(m.BaseDenom)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Path) > 0 {
- i -= len(m.Path)
- copy(dAtA[i:], m.Path)
- i = encodeVarintTransfer(dAtA, i, uint64(len(m.Path)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Params) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Params) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.ReceiveEnabled {
- i--
- if m.ReceiveEnabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.SendEnabled {
- i--
- if m.SendEnabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintTransfer(dAtA []byte, offset int, v uint64) int {
- offset -= sovTransfer(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *FungibleTokenPacketData) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Denom)
- if l > 0 {
- n += 1 + l + sovTransfer(uint64(l))
- }
- if m.Amount != 0 {
- n += 1 + sovTransfer(uint64(m.Amount))
- }
- l = len(m.Sender)
- if l > 0 {
- n += 1 + l + sovTransfer(uint64(l))
- }
- l = len(m.Receiver)
- if l > 0 {
- n += 1 + l + sovTransfer(uint64(l))
- }
- return n
-}
-
-func (m *DenomTrace) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Path)
- if l > 0 {
- n += 1 + l + sovTransfer(uint64(l))
- }
- l = len(m.BaseDenom)
- if l > 0 {
- n += 1 + l + sovTransfer(uint64(l))
- }
- return n
-}
-
-func (m *Params) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.SendEnabled {
- n += 2
- }
- if m.ReceiveEnabled {
- n += 2
- }
- return n
-}
-
-func sovTransfer(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozTransfer(x uint64) (n int) {
- return sovTransfer(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *FungibleTokenPacketData) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: FungibleTokenPacketData: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: FungibleTokenPacketData: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTransfer
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTransfer
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Denom = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType)
- }
- m.Amount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Amount |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTransfer
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTransfer
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Sender = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTransfer
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTransfer
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Receiver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTransfer(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTransfer
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DenomTrace) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DenomTrace: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DenomTrace: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTransfer
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTransfer
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Path = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BaseDenom", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTransfer
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTransfer
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.BaseDenom = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipTransfer(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTransfer
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Params) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Params: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SendEnabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.SendEnabled = bool(v != 0)
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEnabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.ReceiveEnabled = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipTransfer(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTransfer
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTransfer(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTransfer
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthTransfer
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupTransfer
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTransfer
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthTransfer = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTransfer = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupTransfer = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/applications/transfer/types/tx.pb.go b/applications/transfer/types/tx.pb.go
deleted file mode 100644
index e3a630b4..00000000
--- a/applications/transfer/types/tx.pb.go
+++ /dev/null
@@ -1,804 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/applications/transfer/v1/tx.proto
-
-package types
-
-import (
- context "context"
- fmt "fmt"
- types "github.com/cosmos/cosmos-sdk/types"
- types1 "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- _ "github.com/gogo/protobuf/gogoproto"
- grpc1 "github.com/gogo/protobuf/grpc"
- proto "github.com/gogo/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- io "io"
- math "math"
- math_bits "math/bits"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
-// ICS20 enabled chains. See ICS Spec here:
-// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
-type MsgTransfer struct {
- // the port on which the packet will be sent
- SourcePort string `protobuf:"bytes,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"`
- // the channel by which the packet will be sent
- SourceChannel string `protobuf:"bytes,2,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"`
- // the tokens to be transferred
- Token types.Coin `protobuf:"bytes,3,opt,name=token,proto3" json:"token"`
- // the sender address
- Sender string `protobuf:"bytes,4,opt,name=sender,proto3" json:"sender,omitempty"`
- // the recipient address on the destination chain
- Receiver string `protobuf:"bytes,5,opt,name=receiver,proto3" json:"receiver,omitempty"`
- // Timeout height relative to the current block height.
- // The timeout is disabled when set to 0.
- TimeoutHeight types1.Height `protobuf:"bytes,6,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"`
- // Timeout timestamp (in nanoseconds) relative to the current block timestamp.
- // The timeout is disabled when set to 0.
- TimeoutTimestamp uint64 `protobuf:"varint,7,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"`
-}
-
-func (m *MsgTransfer) Reset() { *m = MsgTransfer{} }
-func (m *MsgTransfer) String() string { return proto.CompactTextString(m) }
-func (*MsgTransfer) ProtoMessage() {}
-func (*MsgTransfer) Descriptor() ([]byte, []int) {
- return fileDescriptor_7401ed9bed2f8e09, []int{0}
-}
-func (m *MsgTransfer) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MsgTransfer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MsgTransfer.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MsgTransfer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MsgTransfer.Merge(m, src)
-}
-func (m *MsgTransfer) XXX_Size() int {
- return m.Size()
-}
-func (m *MsgTransfer) XXX_DiscardUnknown() {
- xxx_messageInfo_MsgTransfer.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MsgTransfer proto.InternalMessageInfo
-
-// MsgTransferResponse defines the Msg/Transfer response type.
-type MsgTransferResponse struct {
-}
-
-func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} }
-func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) }
-func (*MsgTransferResponse) ProtoMessage() {}
-func (*MsgTransferResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7401ed9bed2f8e09, []int{1}
-}
-func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MsgTransferResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MsgTransferResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MsgTransferResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MsgTransferResponse.Merge(m, src)
-}
-func (m *MsgTransferResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MsgTransferResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MsgTransferResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*MsgTransfer)(nil), "ibc.applications.transfer.v1.MsgTransfer")
- proto.RegisterType((*MsgTransferResponse)(nil), "ibc.applications.transfer.v1.MsgTransferResponse")
-}
-
-func init() {
- proto.RegisterFile("ibc/applications/transfer/v1/tx.proto", fileDescriptor_7401ed9bed2f8e09)
-}
-
-var fileDescriptor_7401ed9bed2f8e09 = []byte{
- // 488 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x41, 0x6f, 0xd3, 0x30,
- 0x14, 0xc7, 0x13, 0xd6, 0x95, 0xe2, 0x6a, 0x13, 0x18, 0x36, 0x65, 0xd5, 0x48, 0xaa, 0x48, 0x48,
- 0xe5, 0x80, 0xad, 0x0c, 0x21, 0xa4, 0x1d, 0x10, 0xca, 0x2e, 0x70, 0x98, 0x84, 0xa2, 0x1d, 0x10,
- 0x97, 0x91, 0x78, 0x26, 0xb1, 0xd6, 0xd8, 0x91, 0xed, 0x46, 0xdb, 0x37, 0xe0, 0xc8, 0x47, 0xd8,
- 0x99, 0x4f, 0xb2, 0xe3, 0x8e, 0x9c, 0x2a, 0xd4, 0x5e, 0x38, 0xf7, 0x13, 0xa0, 0xc4, 0x6e, 0x69,
- 0x0f, 0x20, 0x4e, 0xf1, 0x7b, 0xff, 0xdf, 0xf3, 0x5f, 0xcf, 0xef, 0x05, 0x3c, 0x63, 0x19, 0xc1,
- 0x69, 0x55, 0x8d, 0x19, 0x49, 0x35, 0x13, 0x5c, 0x61, 0x2d, 0x53, 0xae, 0xbe, 0x50, 0x89, 0xeb,
- 0x08, 0xeb, 0x2b, 0x54, 0x49, 0xa1, 0x05, 0x3c, 0x64, 0x19, 0x41, 0xeb, 0x18, 0x5a, 0x62, 0xa8,
- 0x8e, 0x06, 0x4f, 0x72, 0x91, 0x8b, 0x16, 0xc4, 0xcd, 0xc9, 0xd4, 0x0c, 0x7c, 0x22, 0x54, 0x29,
- 0x14, 0xce, 0x52, 0x45, 0x71, 0x1d, 0x65, 0x54, 0xa7, 0x11, 0x26, 0x82, 0x71, 0xab, 0x07, 0x8d,
- 0x35, 0x11, 0x92, 0x62, 0x32, 0x66, 0x94, 0xeb, 0xc6, 0xd0, 0x9c, 0x0c, 0x10, 0x7e, 0xdf, 0x02,
- 0xfd, 0x53, 0x95, 0x9f, 0x59, 0x27, 0xf8, 0x1a, 0xf4, 0x95, 0x98, 0x48, 0x42, 0xcf, 0x2b, 0x21,
- 0xb5, 0xe7, 0x0e, 0xdd, 0xd1, 0x83, 0x78, 0x7f, 0x31, 0x0d, 0xe0, 0x75, 0x5a, 0x8e, 0x8f, 0xc3,
- 0x35, 0x31, 0x4c, 0x80, 0x89, 0x3e, 0x08, 0xa9, 0xe1, 0x5b, 0xb0, 0x6b, 0x35, 0x52, 0xa4, 0x9c,
- 0xd3, 0xb1, 0x77, 0xaf, 0xad, 0x3d, 0x58, 0x4c, 0x83, 0xbd, 0x8d, 0x5a, 0xab, 0x87, 0xc9, 0x8e,
- 0x49, 0x9c, 0x98, 0x18, 0xbe, 0x02, 0xdb, 0x5a, 0x5c, 0x52, 0xee, 0x6d, 0x0d, 0xdd, 0x51, 0xff,
- 0xe8, 0x00, 0x99, 0xde, 0x50, 0xd3, 0x1b, 0xb2, 0xbd, 0xa1, 0x13, 0xc1, 0x78, 0xdc, 0xb9, 0x9d,
- 0x06, 0x4e, 0x62, 0x68, 0xb8, 0x0f, 0xba, 0x8a, 0xf2, 0x0b, 0x2a, 0xbd, 0x4e, 0x63, 0x98, 0xd8,
- 0x08, 0x0e, 0x40, 0x4f, 0x52, 0x42, 0x59, 0x4d, 0xa5, 0xb7, 0xdd, 0x2a, 0xab, 0x18, 0x7e, 0x06,
- 0xbb, 0x9a, 0x95, 0x54, 0x4c, 0xf4, 0x79, 0x41, 0x59, 0x5e, 0x68, 0xaf, 0xdb, 0x7a, 0x0e, 0x50,
- 0x33, 0x83, 0xe6, 0xbd, 0x90, 0x7d, 0xa5, 0x3a, 0x42, 0xef, 0x5a, 0x22, 0x7e, 0xda, 0x98, 0xfe,
- 0x69, 0x66, 0xb3, 0x3e, 0x4c, 0x76, 0x6c, 0xc2, 0xd0, 0xf0, 0x3d, 0x78, 0xb4, 0x24, 0x9a, 0xaf,
- 0xd2, 0x69, 0x59, 0x79, 0xf7, 0x87, 0xee, 0xa8, 0x13, 0x1f, 0x2e, 0xa6, 0x81, 0xb7, 0x79, 0xc9,
- 0x0a, 0x09, 0x93, 0x87, 0x36, 0x77, 0xb6, 0x4c, 0x1d, 0xf7, 0xbe, 0xde, 0x04, 0xce, 0xaf, 0x9b,
- 0xc0, 0x09, 0xf7, 0xc0, 0xe3, 0xb5, 0x59, 0x25, 0x54, 0x55, 0x82, 0x2b, 0x7a, 0x24, 0xc0, 0xd6,
- 0xa9, 0xca, 0x61, 0x01, 0x7a, 0xab, 0x31, 0x3e, 0x47, 0xff, 0x5a, 0x26, 0xb4, 0x76, 0xcb, 0x20,
- 0xfa, 0x6f, 0x74, 0x69, 0x18, 0x7f, 0xbc, 0x9d, 0xf9, 0xee, 0xdd, 0xcc, 0x77, 0x7f, 0xce, 0x7c,
- 0xf7, 0xdb, 0xdc, 0x77, 0xee, 0xe6, 0xbe, 0xf3, 0x63, 0xee, 0x3b, 0x9f, 0xde, 0xe4, 0x4c, 0x17,
- 0x93, 0x0c, 0x11, 0x51, 0x62, 0xbb, 0x9a, 0xe6, 0xf3, 0x42, 0x5d, 0x5c, 0xe2, 0x2b, 0xfc, 0xf7,
- 0x3f, 0x41, 0x5f, 0x57, 0x54, 0x65, 0xdd, 0x76, 0x2b, 0x5f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff,
- 0x26, 0x76, 0x5b, 0xfa, 0x33, 0x03, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// MsgClient is the client API for Msg service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MsgClient interface {
- // Transfer defines a rpc handler method for MsgTransfer.
- Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error)
-}
-
-type msgClient struct {
- cc grpc1.ClientConn
-}
-
-func NewMsgClient(cc grpc1.ClientConn) MsgClient {
- return &msgClient{cc}
-}
-
-func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) {
- out := new(MsgTransferResponse)
- err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Msg/Transfer", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// MsgServer is the server API for Msg service.
-type MsgServer interface {
- // Transfer defines a rpc handler method for MsgTransfer.
- Transfer(context.Context, *MsgTransfer) (*MsgTransferResponse, error)
-}
-
-// UnimplementedMsgServer can be embedded to have forward compatible implementations.
-type UnimplementedMsgServer struct {
-}
-
-func (*UnimplementedMsgServer) Transfer(ctx context.Context, req *MsgTransfer) (*MsgTransferResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented")
-}
-
-func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
- s.RegisterService(&_Msg_serviceDesc, srv)
-}
-
-func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MsgTransfer)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MsgServer).Transfer(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/ibc.applications.transfer.v1.Msg/Transfer",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Msg_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibc.applications.transfer.v1.Msg",
- HandlerType: (*MsgServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Transfer",
- Handler: _Msg_Transfer_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "ibc/applications/transfer/v1/tx.proto",
-}
-
-func (m *MsgTransfer) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MsgTransfer) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TimeoutTimestamp != 0 {
- i = encodeVarintTx(dAtA, i, uint64(m.TimeoutTimestamp))
- i--
- dAtA[i] = 0x38
- }
- {
- size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTx(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- if len(m.Receiver) > 0 {
- i -= len(m.Receiver)
- copy(dAtA[i:], m.Receiver)
- i = encodeVarintTx(dAtA, i, uint64(len(m.Receiver)))
- i--
- dAtA[i] = 0x2a
- }
- if len(m.Sender) > 0 {
- i -= len(m.Sender)
- copy(dAtA[i:], m.Sender)
- i = encodeVarintTx(dAtA, i, uint64(len(m.Sender)))
- i--
- dAtA[i] = 0x22
- }
- {
- size, err := m.Token.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintTx(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- if len(m.SourceChannel) > 0 {
- i -= len(m.SourceChannel)
- copy(dAtA[i:], m.SourceChannel)
- i = encodeVarintTx(dAtA, i, uint64(len(m.SourceChannel)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.SourcePort) > 0 {
- i -= len(m.SourcePort)
- copy(dAtA[i:], m.SourcePort)
- i = encodeVarintTx(dAtA, i, uint64(len(m.SourcePort)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MsgTransferResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MsgTransferResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MsgTransferResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- return len(dAtA) - i, nil
-}
-
-func encodeVarintTx(dAtA []byte, offset int, v uint64) int {
- offset -= sovTx(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *MsgTransfer) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.SourcePort)
- if l > 0 {
- n += 1 + l + sovTx(uint64(l))
- }
- l = len(m.SourceChannel)
- if l > 0 {
- n += 1 + l + sovTx(uint64(l))
- }
- l = m.Token.Size()
- n += 1 + l + sovTx(uint64(l))
- l = len(m.Sender)
- if l > 0 {
- n += 1 + l + sovTx(uint64(l))
- }
- l = len(m.Receiver)
- if l > 0 {
- n += 1 + l + sovTx(uint64(l))
- }
- l = m.TimeoutHeight.Size()
- n += 1 + l + sovTx(uint64(l))
- if m.TimeoutTimestamp != 0 {
- n += 1 + sovTx(uint64(m.TimeoutTimestamp))
- }
- return n
-}
-
-func (m *MsgTransferResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- return n
-}
-
-func sovTx(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozTx(x uint64) (n int) {
- return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *MsgTransfer) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MsgTransfer: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MsgTransfer: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTx
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTx
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SourcePort = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTx
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTx
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SourceChannel = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTx
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTx
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTx
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTx
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Sender = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthTx
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthTx
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Receiver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthTx
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthTx
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType)
- }
- m.TimeoutTimestamp = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TimeoutTimestamp |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipTx(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTx
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MsgTransferResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowTx
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MsgTransferResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MsgTransferResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipTx(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthTx
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipTx(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTx
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTx
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowTx
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthTx
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupTx
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthTx
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowTx = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/applications/transfer/client/cli/cli.go b/apps/transfer/client/cli/cli.go
similarity index 100%
rename from applications/transfer/client/cli/cli.go
rename to apps/transfer/client/cli/cli.go
diff --git a/applications/transfer/client/cli/query.go b/apps/transfer/client/cli/query.go
similarity index 97%
rename from applications/transfer/client/cli/query.go
rename to apps/transfer/client/cli/query.go
index b9658e05..d6123e42 100644
--- a/applications/transfer/client/cli/query.go
+++ b/apps/transfer/client/cli/query.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// GetCmdQueryDenomTrace defines the command to query a a denomination trace from a given hash.
diff --git a/applications/transfer/client/cli/tx.go b/apps/transfer/client/cli/tx.go
similarity index 94%
rename from applications/transfer/client/cli/tx.go
rename to apps/transfer/client/cli/tx.go
index 1f9e92f6..9eafea9a 100644
--- a/applications/transfer/client/cli/tx.go
+++ b/apps/transfer/client/cli/tx.go
@@ -12,9 +12,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channelutils "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/utils"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channelutils "github.com/cosmos/ibc-go/core/04-channel/client/utils"
)
const (
diff --git a/applications/transfer/handler.go b/apps/transfer/handler.go
similarity index 90%
rename from applications/transfer/handler.go
rename to apps/transfer/handler.go
index 7c992c92..58ad69fd 100644
--- a/applications/transfer/handler.go
+++ b/apps/transfer/handler.go
@@ -3,7 +3,7 @@ package transfer
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// NewHandler returns sdk.Handler for IBC token transfer module messages
diff --git a/applications/transfer/handler_test.go b/apps/transfer/handler_test.go
similarity index 95%
rename from applications/transfer/handler_test.go
rename to apps/transfer/handler_test.go
index 92a04210..584b4a32 100644
--- a/applications/transfer/handler_test.go
+++ b/apps/transfer/handler_test.go
@@ -6,11 +6,11 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type TransferTestSuite struct {
diff --git a/applications/transfer/keeper/MBT_README.md b/apps/transfer/keeper/MBT_README.md
similarity index 100%
rename from applications/transfer/keeper/MBT_README.md
rename to apps/transfer/keeper/MBT_README.md
diff --git a/applications/transfer/keeper/encoding.go b/apps/transfer/keeper/encoding.go
similarity index 94%
rename from applications/transfer/keeper/encoding.go
rename to apps/transfer/keeper/encoding.go
index ddb1bc4b..1f6e7e63 100644
--- a/applications/transfer/keeper/encoding.go
+++ b/apps/transfer/keeper/encoding.go
@@ -1,7 +1,7 @@
package keeper
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// UnmarshalDenomTrace attempts to decode and return an DenomTrace object from
diff --git a/applications/transfer/keeper/genesis.go b/apps/transfer/keeper/genesis.go
similarity index 94%
rename from applications/transfer/keeper/genesis.go
rename to apps/transfer/keeper/genesis.go
index 58a0c081..1c7aaef8 100644
--- a/applications/transfer/keeper/genesis.go
+++ b/apps/transfer/keeper/genesis.go
@@ -4,7 +4,7 @@ import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// InitGenesis initializes the ibc-transfer state and binds to PortID.
diff --git a/applications/transfer/keeper/genesis_test.go b/apps/transfer/keeper/genesis_test.go
similarity index 92%
rename from applications/transfer/keeper/genesis_test.go
rename to apps/transfer/keeper/genesis_test.go
index a8543491..ad708004 100644
--- a/applications/transfer/keeper/genesis_test.go
+++ b/apps/transfer/keeper/genesis_test.go
@@ -3,7 +3,7 @@ package keeper_test
import (
"fmt"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
func (suite *KeeperTestSuite) TestGenesis() {
diff --git a/applications/transfer/keeper/grpc_query.go b/apps/transfer/keeper/grpc_query.go
similarity index 96%
rename from applications/transfer/keeper/grpc_query.go
rename to apps/transfer/keeper/grpc_query.go
index b6347895..08656587 100644
--- a/applications/transfer/keeper/grpc_query.go
+++ b/apps/transfer/keeper/grpc_query.go
@@ -11,7 +11,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
var _ types.QueryServer = Keeper{}
diff --git a/applications/transfer/keeper/grpc_query_test.go b/apps/transfer/keeper/grpc_query_test.go
similarity index 97%
rename from applications/transfer/keeper/grpc_query_test.go
rename to apps/transfer/keeper/grpc_query_test.go
index 0b16e072..c297ea9d 100644
--- a/applications/transfer/keeper/grpc_query_test.go
+++ b/apps/transfer/keeper/grpc_query_test.go
@@ -5,7 +5,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
func (suite *KeeperTestSuite) TestQueryDenomTrace() {
diff --git a/applications/transfer/keeper/keeper.go b/apps/transfer/keeper/keeper.go
similarity index 96%
rename from applications/transfer/keeper/keeper.go
rename to apps/transfer/keeper/keeper.go
index a2eebb55..fbc4a167 100644
--- a/applications/transfer/keeper/keeper.go
+++ b/apps/transfer/keeper/keeper.go
@@ -11,9 +11,9 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
diff --git a/applications/transfer/keeper/keeper_test.go b/apps/transfer/keeper/keeper_test.go
similarity index 92%
rename from applications/transfer/keeper/keeper_test.go
rename to apps/transfer/keeper/keeper_test.go
index cce9cbcc..f7f01038 100644
--- a/applications/transfer/keeper/keeper_test.go
+++ b/apps/transfer/keeper/keeper_test.go
@@ -8,8 +8,8 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type KeeperTestSuite struct {
diff --git a/applications/transfer/keeper/mbt_relay_test.go b/apps/transfer/keeper/mbt_relay_test.go
similarity index 97%
rename from applications/transfer/keeper/mbt_relay_test.go
rename to apps/transfer/keeper/mbt_relay_test.go
index cd64fbab..52205088 100644
--- a/applications/transfer/keeper/mbt_relay_test.go
+++ b/apps/transfer/keeper/mbt_relay_test.go
@@ -15,11 +15,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type TlaBalance struct {
diff --git a/applications/transfer/keeper/model_based_tests/Test5Packets.json b/apps/transfer/keeper/model_based_tests/Test5Packets.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/Test5Packets.json
rename to apps/transfer/keeper/model_based_tests/Test5Packets.json
diff --git a/applications/transfer/keeper/model_based_tests/Test5Packets.tla b/apps/transfer/keeper/model_based_tests/Test5Packets.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/Test5Packets.tla
rename to apps/transfer/keeper/model_based_tests/Test5Packets.tla
diff --git a/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json b/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
rename to apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
diff --git a/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla b/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
rename to apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla b/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
rename to apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json b/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
rename to apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla b/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
rename to apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json b/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
rename to apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla b/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
rename to apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json b/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
rename to apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla b/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
rename to apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json b/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
rename to apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla b/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
rename to apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json b/apps/transfer/keeper/model_based_tests/TestSendTransferFail.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestSendTransferFail.json
rename to apps/transfer/keeper/model_based_tests/TestSendTransferFail.json
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla b/apps/transfer/keeper/model_based_tests/TestSendTransferFail.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla
rename to apps/transfer/keeper/model_based_tests/TestSendTransferFail.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json b/apps/transfer/keeper/model_based_tests/TestSendTransferPass.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestSendTransferPass.json
rename to apps/transfer/keeper/model_based_tests/TestSendTransferPass.json
diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla b/apps/transfer/keeper/model_based_tests/TestSendTransferPass.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla
rename to apps/transfer/keeper/model_based_tests/TestSendTransferPass.tla
diff --git a/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json b/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.json
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json
rename to apps/transfer/keeper/model_based_tests/TestUnescrowTokens.json
diff --git a/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla b/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
similarity index 100%
rename from applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
rename to apps/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
diff --git a/applications/transfer/keeper/msg_server.go b/apps/transfer/keeper/msg_server.go
similarity index 95%
rename from applications/transfer/keeper/msg_server.go
rename to apps/transfer/keeper/msg_server.go
index dd2999af..4c658434 100644
--- a/applications/transfer/keeper/msg_server.go
+++ b/apps/transfer/keeper/msg_server.go
@@ -4,7 +4,7 @@ import (
"context"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
var _ types.MsgServer = Keeper{}
diff --git a/applications/transfer/keeper/params.go b/apps/transfer/keeper/params.go
similarity index 92%
rename from applications/transfer/keeper/params.go
rename to apps/transfer/keeper/params.go
index 39a6c5d5..1d5a9d0c 100644
--- a/applications/transfer/keeper/params.go
+++ b/apps/transfer/keeper/params.go
@@ -2,7 +2,7 @@ package keeper
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// GetSendEnabled retrieves the send enabled boolean from the paramstore
diff --git a/applications/transfer/keeper/params_test.go b/apps/transfer/keeper/params_test.go
similarity index 86%
rename from applications/transfer/keeper/params_test.go
rename to apps/transfer/keeper/params_test.go
index 96f17ff7..464ce300 100644
--- a/applications/transfer/keeper/params_test.go
+++ b/apps/transfer/keeper/params_test.go
@@ -1,6 +1,6 @@
package keeper_test
-import "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+import "github.com/cosmos/ibc-go/apps/transfer/types"
func (suite *KeeperTestSuite) TestParams() {
expParams := types.DefaultParams()
diff --git a/applications/transfer/keeper/relay.go b/apps/transfer/keeper/relay.go
similarity index 98%
rename from applications/transfer/keeper/relay.go
rename to apps/transfer/keeper/relay.go
index 4889014a..56b0489e 100644
--- a/applications/transfer/keeper/relay.go
+++ b/apps/transfer/keeper/relay.go
@@ -9,10 +9,10 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// SendTransfer handles transfer sending logic. There are 2 possible cases:
diff --git a/applications/transfer/keeper/relay_model/account.tla b/apps/transfer/keeper/relay_model/account.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/account.tla
rename to apps/transfer/keeper/relay_model/account.tla
diff --git a/applications/transfer/keeper/relay_model/account_record.tla b/apps/transfer/keeper/relay_model/account_record.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/account_record.tla
rename to apps/transfer/keeper/relay_model/account_record.tla
diff --git a/applications/transfer/keeper/relay_model/apalache-to-relay-test.json b/apps/transfer/keeper/relay_model/apalache-to-relay-test.json
similarity index 100%
rename from applications/transfer/keeper/relay_model/apalache-to-relay-test.json
rename to apps/transfer/keeper/relay_model/apalache-to-relay-test.json
diff --git a/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json b/apps/transfer/keeper/relay_model/apalache-to-relay-test2.json
similarity index 100%
rename from applications/transfer/keeper/relay_model/apalache-to-relay-test2.json
rename to apps/transfer/keeper/relay_model/apalache-to-relay-test2.json
diff --git a/applications/transfer/keeper/relay_model/denom.tla b/apps/transfer/keeper/relay_model/denom.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/denom.tla
rename to apps/transfer/keeper/relay_model/denom.tla
diff --git a/applications/transfer/keeper/relay_model/denom_record.tla b/apps/transfer/keeper/relay_model/denom_record.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/denom_record.tla
rename to apps/transfer/keeper/relay_model/denom_record.tla
diff --git a/applications/transfer/keeper/relay_model/denom_record2.tla b/apps/transfer/keeper/relay_model/denom_record2.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/denom_record2.tla
rename to apps/transfer/keeper/relay_model/denom_record2.tla
diff --git a/applications/transfer/keeper/relay_model/denom_sequence.tla b/apps/transfer/keeper/relay_model/denom_sequence.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/denom_sequence.tla
rename to apps/transfer/keeper/relay_model/denom_sequence.tla
diff --git a/applications/transfer/keeper/relay_model/identifiers.tla b/apps/transfer/keeper/relay_model/identifiers.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/identifiers.tla
rename to apps/transfer/keeper/relay_model/identifiers.tla
diff --git a/applications/transfer/keeper/relay_model/relay.tla b/apps/transfer/keeper/relay_model/relay.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/relay.tla
rename to apps/transfer/keeper/relay_model/relay.tla
diff --git a/applications/transfer/keeper/relay_model/relay_tests.tla b/apps/transfer/keeper/relay_model/relay_tests.tla
similarity index 100%
rename from applications/transfer/keeper/relay_model/relay_tests.tla
rename to apps/transfer/keeper/relay_model/relay_tests.tla
diff --git a/applications/transfer/keeper/relay_test.go b/apps/transfer/keeper/relay_test.go
similarity index 97%
rename from applications/transfer/keeper/relay_test.go
rename to apps/transfer/keeper/relay_test.go
index 89058ac2..32cacd73 100644
--- a/applications/transfer/keeper/relay_test.go
+++ b/apps/transfer/keeper/relay_test.go
@@ -6,12 +6,12 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
// test sending from chainA to chainB using both coin that orignate on
diff --git a/applications/transfer/module.go b/apps/transfer/module.go
similarity index 96%
rename from applications/transfer/module.go
rename to apps/transfer/module.go
index 25290d69..450e2fca 100644
--- a/applications/transfer/module.go
+++ b/apps/transfer/module.go
@@ -22,13 +22,13 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/client/cli"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/apps/transfer/client/cli"
+ "github.com/cosmos/ibc-go/apps/transfer/keeper"
+ "github.com/cosmos/ibc-go/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/core/05-port/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
var (
diff --git a/applications/transfer/module_test.go b/apps/transfer/module_test.go
similarity index 95%
rename from applications/transfer/module_test.go
rename to apps/transfer/module_test.go
index d2acfb40..c316341e 100644
--- a/applications/transfer/module_test.go
+++ b/apps/transfer/module_test.go
@@ -4,11 +4,11 @@ import (
"math"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *TransferTestSuite) TestOnChanOpenInit() {
diff --git a/applications/transfer/simulation/decoder.go b/apps/transfer/simulation/decoder.go
similarity index 93%
rename from applications/transfer/simulation/decoder.go
rename to apps/transfer/simulation/decoder.go
index df783450..70191c6a 100644
--- a/applications/transfer/simulation/decoder.go
+++ b/apps/transfer/simulation/decoder.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// TransferUnmarshaler defines the expected encoding store functions.
diff --git a/applications/transfer/simulation/decoder_test.go b/apps/transfer/simulation/decoder_test.go
similarity index 90%
rename from applications/transfer/simulation/decoder_test.go
rename to apps/transfer/simulation/decoder_test.go
index 729a067e..a71f3997 100644
--- a/applications/transfer/simulation/decoder_test.go
+++ b/apps/transfer/simulation/decoder_test.go
@@ -8,8 +8,8 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
func TestDecodeStore(t *testing.T) {
diff --git a/applications/transfer/simulation/genesis.go b/apps/transfer/simulation/genesis.go
similarity index 95%
rename from applications/transfer/simulation/genesis.go
rename to apps/transfer/simulation/genesis.go
index a51bce9f..647f2321 100644
--- a/applications/transfer/simulation/genesis.go
+++ b/apps/transfer/simulation/genesis.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// Simulation parameter constants
diff --git a/applications/transfer/simulation/genesis_test.go b/apps/transfer/simulation/genesis_test.go
similarity index 94%
rename from applications/transfer/simulation/genesis_test.go
rename to apps/transfer/simulation/genesis_test.go
index 12791d74..9cac5ab3 100644
--- a/applications/transfer/simulation/genesis_test.go
+++ b/apps/transfer/simulation/genesis_test.go
@@ -11,8 +11,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState.
diff --git a/applications/transfer/simulation/params.go b/apps/transfer/simulation/params.go
similarity index 93%
rename from applications/transfer/simulation/params.go
rename to apps/transfer/simulation/params.go
index 67c61f51..29f84d6c 100644
--- a/applications/transfer/simulation/params.go
+++ b/apps/transfer/simulation/params.go
@@ -9,7 +9,7 @@ import (
"github.com/cosmos/cosmos-sdk/x/simulation"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// ParamChanges defines the parameters that can be modified by param change proposals
diff --git a/applications/transfer/simulation/params_test.go b/apps/transfer/simulation/params_test.go
similarity index 91%
rename from applications/transfer/simulation/params_test.go
rename to apps/transfer/simulation/params_test.go
index a692d432..71e4a815 100644
--- a/applications/transfer/simulation/params_test.go
+++ b/apps/transfer/simulation/params_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation"
+ "github.com/cosmos/ibc-go/apps/transfer/simulation"
)
func TestParamChanges(t *testing.T) {
diff --git a/applications/transfer/spec/01_concepts.md b/apps/transfer/spec/01_concepts.md
similarity index 100%
rename from applications/transfer/spec/01_concepts.md
rename to apps/transfer/spec/01_concepts.md
diff --git a/applications/transfer/spec/02_state.md b/apps/transfer/spec/02_state.md
similarity index 100%
rename from applications/transfer/spec/02_state.md
rename to apps/transfer/spec/02_state.md
diff --git a/applications/transfer/spec/03_state_transitions.md b/apps/transfer/spec/03_state_transitions.md
similarity index 100%
rename from applications/transfer/spec/03_state_transitions.md
rename to apps/transfer/spec/03_state_transitions.md
diff --git a/applications/transfer/spec/04_messages.md b/apps/transfer/spec/04_messages.md
similarity index 100%
rename from applications/transfer/spec/04_messages.md
rename to apps/transfer/spec/04_messages.md
diff --git a/applications/transfer/spec/05_events.md b/apps/transfer/spec/05_events.md
similarity index 100%
rename from applications/transfer/spec/05_events.md
rename to apps/transfer/spec/05_events.md
diff --git a/applications/transfer/spec/06_metrics.md b/apps/transfer/spec/06_metrics.md
similarity index 100%
rename from applications/transfer/spec/06_metrics.md
rename to apps/transfer/spec/06_metrics.md
diff --git a/applications/transfer/spec/07_params.md b/apps/transfer/spec/07_params.md
similarity index 100%
rename from applications/transfer/spec/07_params.md
rename to apps/transfer/spec/07_params.md
diff --git a/applications/transfer/spec/README.md b/apps/transfer/spec/README.md
similarity index 100%
rename from applications/transfer/spec/README.md
rename to apps/transfer/spec/README.md
diff --git a/applications/transfer/types/codec.go b/apps/transfer/types/codec.go
similarity index 100%
rename from applications/transfer/types/codec.go
rename to apps/transfer/types/codec.go
diff --git a/applications/transfer/types/coin.go b/apps/transfer/types/coin.go
similarity index 100%
rename from applications/transfer/types/coin.go
rename to apps/transfer/types/coin.go
diff --git a/applications/transfer/types/errors.go b/apps/transfer/types/errors.go
similarity index 100%
rename from applications/transfer/types/errors.go
rename to apps/transfer/types/errors.go
diff --git a/applications/transfer/types/events.go b/apps/transfer/types/events.go
similarity index 100%
rename from applications/transfer/types/events.go
rename to apps/transfer/types/events.go
diff --git a/applications/transfer/types/expected_keepers.go b/apps/transfer/types/expected_keepers.go
similarity index 90%
rename from applications/transfer/types/expected_keepers.go
rename to apps/transfer/types/expected_keepers.go
index 28446335..6087855c 100644
--- a/applications/transfer/types/expected_keepers.go
+++ b/apps/transfer/types/expected_keepers.go
@@ -4,9 +4,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- ibcexported "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ ibcexported "github.com/cosmos/ibc-go/core/exported"
)
// AccountKeeper defines the contract required for account APIs.
diff --git a/applications/transfer/types/genesis.go b/apps/transfer/types/genesis.go
similarity index 93%
rename from applications/transfer/types/genesis.go
rename to apps/transfer/types/genesis.go
index 682b04c4..6432f3a7 100644
--- a/applications/transfer/types/genesis.go
+++ b/apps/transfer/types/genesis.go
@@ -1,7 +1,7 @@
package types
import (
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// NewGenesisState creates a new ibc-transfer GenesisState instance.
diff --git a/applications/transfer/types/genesis_test.go b/apps/transfer/types/genesis_test.go
similarity index 91%
rename from applications/transfer/types/genesis_test.go
rename to apps/transfer/types/genesis_test.go
index a2aba58c..bac4c35d 100644
--- a/applications/transfer/types/genesis_test.go
+++ b/apps/transfer/types/genesis_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
func TestValidateGenesis(t *testing.T) {
diff --git a/applications/transfer/types/keys.go b/apps/transfer/types/keys.go
similarity index 100%
rename from applications/transfer/types/keys.go
rename to apps/transfer/types/keys.go
diff --git a/applications/transfer/types/keys_test.go b/apps/transfer/types/keys_test.go
similarity index 88%
rename from applications/transfer/types/keys_test.go
rename to apps/transfer/types/keys_test.go
index 9ab3314c..3096fec7 100644
--- a/applications/transfer/types/keys_test.go
+++ b/apps/transfer/types/keys_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
+ "github.com/cosmos/ibc-go/apps/transfer/types"
)
// Test that there is domain separation between the port id and the channel id otherwise an
diff --git a/applications/transfer/types/msgs.go b/apps/transfer/types/msgs.go
similarity index 95%
rename from applications/transfer/types/msgs.go
rename to apps/transfer/types/msgs.go
index cf229321..568c3d8d 100644
--- a/applications/transfer/types/msgs.go
+++ b/apps/transfer/types/msgs.go
@@ -5,8 +5,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// msg types
diff --git a/applications/transfer/types/msgs_test.go b/apps/transfer/types/msgs_test.go
similarity index 98%
rename from applications/transfer/types/msgs_test.go
rename to apps/transfer/types/msgs_test.go
index 1fc70c54..e0598869 100644
--- a/applications/transfer/types/msgs_test.go
+++ b/apps/transfer/types/msgs_test.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
)
// define constants used for testing
diff --git a/applications/transfer/types/packet.go b/apps/transfer/types/packet.go
similarity index 100%
rename from applications/transfer/types/packet.go
rename to apps/transfer/types/packet.go
diff --git a/applications/transfer/types/packet_test.go b/apps/transfer/types/packet_test.go
similarity index 100%
rename from applications/transfer/types/packet_test.go
rename to apps/transfer/types/packet_test.go
diff --git a/applications/transfer/types/params.go b/apps/transfer/types/params.go
similarity index 100%
rename from applications/transfer/types/params.go
rename to apps/transfer/types/params.go
diff --git a/applications/transfer/types/params_test.go b/apps/transfer/types/params_test.go
similarity index 100%
rename from applications/transfer/types/params_test.go
rename to apps/transfer/types/params_test.go
diff --git a/applications/transfer/types/trace.go b/apps/transfer/types/trace.go
similarity index 99%
rename from applications/transfer/types/trace.go
rename to apps/transfer/types/trace.go
index f45113ef..cc19a4c4 100644
--- a/applications/transfer/types/trace.go
+++ b/apps/transfer/types/trace.go
@@ -12,7 +12,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// ParseDenomTrace parses a string with the ibc prefix (denom trace) and the base denomination
diff --git a/applications/transfer/types/trace_test.go b/apps/transfer/types/trace_test.go
similarity index 100%
rename from applications/transfer/types/trace_test.go
rename to apps/transfer/types/trace_test.go
diff --git a/core/02-client/abci.go b/core/02-client/abci.go
index 3c56d90a..b65fb3c6 100644
--- a/core/02-client/abci.go
+++ b/core/02-client/abci.go
@@ -2,8 +2,8 @@ package client
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/core/exported"
)
// BeginBlocker updates an existing localhost client with the latest block height.
diff --git a/core/02-client/abci_test.go b/core/02-client/abci_test.go
index 3a296618..6d4a8d60 100644
--- a/core/02-client/abci_test.go
+++ b/core/02-client/abci_test.go
@@ -5,11 +5,11 @@ import (
"github.com/stretchr/testify/suite"
- client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ client "github.com/cosmos/ibc-go/core/02-client"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type ClientTestSuite struct {
diff --git a/core/02-client/client/cli/cli.go b/core/02-client/client/cli/cli.go
index 33c99152..74bb72be 100644
--- a/core/02-client/client/cli/cli.go
+++ b/core/02-client/client/cli/cli.go
@@ -4,7 +4,7 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
// GetQueryCmd returns the query commands for IBC clients
diff --git a/core/02-client/client/cli/query.go b/core/02-client/client/cli/query.go
index c1b5e51a..2a5ea8e7 100644
--- a/core/02-client/client/cli/query.go
+++ b/core/02-client/client/cli/query.go
@@ -9,9 +9,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/02-client/client/utils"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/core/02-client/client/cli/tx.go b/core/02-client/client/cli/tx.go
index bdaa53a8..9e62835f 100644
--- a/core/02-client/client/cli/tx.go
+++ b/core/02-client/client/cli/tx.go
@@ -16,8 +16,8 @@ import (
"github.com/cosmos/cosmos-sdk/version"
govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// NewCreateClientCmd defines the command to create a new IBC light client.
diff --git a/core/02-client/client/proposal_handler.go b/core/02-client/client/proposal_handler.go
index 63585cbe..265a189a 100644
--- a/core/02-client/client/proposal_handler.go
+++ b/core/02-client/client/proposal_handler.go
@@ -2,7 +2,7 @@ package client
import (
govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/cli"
+ "github.com/cosmos/ibc-go/core/02-client/client/cli"
)
var ProposalHandler = govclient.NewProposalHandler(cli.NewCmdSubmitUpdateClientProposal, nil)
diff --git a/core/02-client/client/utils/utils.go b/core/02-client/client/utils/utils.go
index 1a7bc003..d6080c91 100644
--- a/core/02-client/client/utils/utils.go
+++ b/core/02-client/client/utils/utils.go
@@ -9,12 +9,12 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/core/client"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
// QueryClientState returns a client state. If prove is true, it performs an ABCI store query
diff --git a/core/02-client/genesis.go b/core/02-client/genesis.go
index 26635f07..4516cfb1 100644
--- a/core/02-client/genesis.go
+++ b/core/02-client/genesis.go
@@ -4,9 +4,9 @@ import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// InitGenesis initializes the ibc client submodule's state from a provided genesis
diff --git a/core/02-client/keeper/client.go b/core/02-client/keeper/client.go
index 672dcf5d..45a2af42 100644
--- a/core/02-client/keeper/client.go
+++ b/core/02-client/keeper/client.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CreateClient creates a new client state and populates it with a given consensus
diff --git a/core/02-client/keeper/client_test.go b/core/02-client/keeper/client_test.go
index 0cf5c1fe..231486a2 100644
--- a/core/02-client/keeper/client_test.go
+++ b/core/02-client/keeper/client_test.go
@@ -6,14 +6,14 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/keeper/encoding.go b/core/02-client/keeper/encoding.go
index f2a07b86..1e4750b5 100644
--- a/core/02-client/keeper/encoding.go
+++ b/core/02-client/keeper/encoding.go
@@ -1,8 +1,8 @@
package keeper
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// UnmarshalClientState attempts to decode and return an ClientState object from
diff --git a/core/02-client/keeper/grpc_query.go b/core/02-client/keeper/grpc_query.go
index 21344277..9d4d6ae6 100644
--- a/core/02-client/keeper/grpc_query.go
+++ b/core/02-client/keeper/grpc_query.go
@@ -14,9 +14,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ types.QueryServer = Keeper{}
diff --git a/core/02-client/keeper/grpc_query_test.go b/core/02-client/keeper/grpc_query_test.go
index 5e361a76..e0542ce4 100644
--- a/core/02-client/keeper/grpc_query_test.go
+++ b/core/02-client/keeper/grpc_query_test.go
@@ -8,11 +8,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *KeeperTestSuite) TestQueryClientState() {
diff --git a/core/02-client/keeper/keeper.go b/core/02-client/keeper/keeper.go
index 67c5c065..1278a76b 100644
--- a/core/02-client/keeper/keeper.go
+++ b/core/02-client/keeper/keeper.go
@@ -12,11 +12,11 @@ import (
"github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/keeper/keeper_test.go b/core/02-client/keeper/keeper_test.go
index c22e80cc..4badc9f4 100644
--- a/core/02-client/keeper/keeper_test.go
+++ b/core/02-client/keeper/keeper_test.go
@@ -15,14 +15,14 @@ import (
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ "github.com/cosmos/ibc-go/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
)
diff --git a/core/02-client/keeper/params.go b/core/02-client/keeper/params.go
index 04f4a256..882372d1 100644
--- a/core/02-client/keeper/params.go
+++ b/core/02-client/keeper/params.go
@@ -2,7 +2,7 @@ package keeper
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
// GetAllowedClients retrieves the receive enabled boolean from the paramstore
diff --git a/core/02-client/keeper/params_test.go b/core/02-client/keeper/params_test.go
index 9df08597..36cbea10 100644
--- a/core/02-client/keeper/params_test.go
+++ b/core/02-client/keeper/params_test.go
@@ -1,7 +1,7 @@
package keeper_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
func (suite *KeeperTestSuite) TestParams() {
diff --git a/core/02-client/keeper/proposal.go b/core/02-client/keeper/proposal.go
index 6d4ff350..78cb652d 100644
--- a/core/02-client/keeper/proposal.go
+++ b/core/02-client/keeper/proposal.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// ClientUpdateProposal will retrieve the subject and substitute client.
diff --git a/core/02-client/keeper/proposal_test.go b/core/02-client/keeper/proposal_test.go
index 8dbe43f7..cb0816af 100644
--- a/core/02-client/keeper/proposal_test.go
+++ b/core/02-client/keeper/proposal_test.go
@@ -1,11 +1,11 @@
package keeper_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *KeeperTestSuite) TestClientUpdateProposal() {
diff --git a/core/02-client/module.go b/core/02-client/module.go
index 08efee8b..78749db6 100644
--- a/core/02-client/module.go
+++ b/core/02-client/module.go
@@ -4,8 +4,8 @@ import (
"github.com/gogo/protobuf/grpc"
"github.com/spf13/cobra"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/cli"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/client/cli"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
// Name returns the IBC client name
diff --git a/core/02-client/proposal_handler.go b/core/02-client/proposal_handler.go
index befa95df..8a76f16d 100644
--- a/core/02-client/proposal_handler.go
+++ b/core/02-client/proposal_handler.go
@@ -4,8 +4,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
// NewClientUpdateProposalHandler defines the client update proposal handler
diff --git a/core/02-client/proposal_handler_test.go b/core/02-client/proposal_handler_test.go
index 41b89318..047371ec 100644
--- a/core/02-client/proposal_handler_test.go
+++ b/core/02-client/proposal_handler_test.go
@@ -4,11 +4,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ client "github.com/cosmos/ibc-go/core/02-client"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
diff --git a/core/02-client/simulation/decoder.go b/core/02-client/simulation/decoder.go
index 03a803b1..70736e3b 100644
--- a/core/02-client/simulation/decoder.go
+++ b/core/02-client/simulation/decoder.go
@@ -5,9 +5,9 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/02-client/keeper"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ ClientUnmarshaler = (*keeper.Keeper)(nil)
diff --git a/core/02-client/simulation/decoder_test.go b/core/02-client/simulation/decoder_test.go
index 095834ba..1259409a 100644
--- a/core/02-client/simulation/decoder_test.go
+++ b/core/02-client/simulation/decoder_test.go
@@ -9,10 +9,10 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/core/02-client/simulation"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/02-client/simulation/genesis.go b/core/02-client/simulation/genesis.go
index 2f231970..cc9c1601 100644
--- a/core/02-client/simulation/genesis.go
+++ b/core/02-client/simulation/genesis.go
@@ -4,7 +4,7 @@ import (
"math/rand"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
// GenClientGenesis returns the default client genesis state.
diff --git a/core/02-client/types/client.go b/core/02-client/types/client.go
index 6d51828a..40d25ced 100644
--- a/core/02-client/types/client.go
+++ b/core/02-client/types/client.go
@@ -10,8 +10,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/02-client/types/client_test.go b/core/02-client/types/client_test.go
index 2dfd3967..8854f189 100644
--- a/core/02-client/types/client_test.go
+++ b/core/02-client/types/client_test.go
@@ -5,9 +5,9 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() {
diff --git a/core/02-client/types/codec.go b/core/02-client/types/codec.go
index 59a15832..441846b0 100644
--- a/core/02-client/types/codec.go
+++ b/core/02-client/types/codec.go
@@ -8,7 +8,7 @@ import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/msgservice"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces registers the client interfaces to protobuf Any.
diff --git a/core/02-client/types/codec_test.go b/core/02-client/types/codec_test.go
index 75cfc97e..35913352 100644
--- a/core/02-client/types/codec_test.go
+++ b/core/02-client/types/codec_test.go
@@ -2,12 +2,12 @@ package types_test
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type caseAny struct {
diff --git a/core/02-client/types/encoding.go b/core/02-client/types/encoding.go
index a912b13a..6e9cb07b 100644
--- a/core/02-client/types/encoding.go
+++ b/core/02-client/types/encoding.go
@@ -4,7 +4,7 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// MustUnmarshalClientState attempts to decode and return an ClientState object from
diff --git a/core/02-client/types/events.go b/core/02-client/types/events.go
index d0760ba8..47aeda7a 100644
--- a/core/02-client/types/events.go
+++ b/core/02-client/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// IBC client events
diff --git a/core/02-client/types/genesis.go b/core/02-client/types/genesis.go
index 3f197208..e18059b1 100644
--- a/core/02-client/types/genesis.go
+++ b/core/02-client/types/genesis.go
@@ -5,8 +5,8 @@ import (
"sort"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/02-client/types/genesis_test.go b/core/02-client/types/genesis_test.go
index d57b8d1b..c50f5a30 100644
--- a/core/02-client/types/genesis_test.go
+++ b/core/02-client/types/genesis_test.go
@@ -5,15 +5,15 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
- client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ client "github.com/cosmos/ibc-go/core/02-client"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
const (
diff --git a/core/02-client/types/height.go b/core/02-client/types/height.go
index 4216d54e..9e19b92c 100644
--- a/core/02-client/types/height.go
+++ b/core/02-client/types/height.go
@@ -9,7 +9,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.Height = (*Height)(nil)
diff --git a/core/02-client/types/height_test.go b/core/02-client/types/height_test.go
index a455b7f5..ca8c0092 100644
--- a/core/02-client/types/height_test.go
+++ b/core/02-client/types/height_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
func TestZeroHeight(t *testing.T) {
diff --git a/core/02-client/types/keys.go b/core/02-client/types/keys.go
index 321f5e3f..58e01c88 100644
--- a/core/02-client/types/keys.go
+++ b/core/02-client/types/keys.go
@@ -7,7 +7,7 @@ import (
"strings"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/core/02-client/types/keys_test.go b/core/02-client/types/keys_test.go
index 49381452..b9188d0e 100644
--- a/core/02-client/types/keys_test.go
+++ b/core/02-client/types/keys_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/02-client/types"
)
// tests ParseClientIdentifier and IsValidClientID
diff --git a/core/02-client/types/msgs.go b/core/02-client/types/msgs.go
index 1e884123..1092668c 100644
--- a/core/02-client/types/msgs.go
+++ b/core/02-client/types/msgs.go
@@ -4,8 +4,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// message types for the IBC client
diff --git a/core/02-client/types/msgs_test.go b/core/02-client/types/msgs_test.go
index e42725ba..7a5aa8db 100644
--- a/core/02-client/types/msgs_test.go
+++ b/core/02-client/types/msgs_test.go
@@ -7,12 +7,12 @@ import (
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/suite"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type TypesTestSuite struct {
diff --git a/core/02-client/types/params.go b/core/02-client/types/params.go
index 6477e3f6..a652aa1a 100644
--- a/core/02-client/types/params.go
+++ b/core/02-client/types/params.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
diff --git a/core/02-client/types/params_test.go b/core/02-client/types/params_test.go
index dac80a4b..d29a864b 100644
--- a/core/02-client/types/params_test.go
+++ b/core/02-client/types/params_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
func TestValidateParams(t *testing.T) {
diff --git a/core/02-client/types/proposal_test.go b/core/02-client/types/proposal_test.go
index 597e5cf8..f53d891b 100644
--- a/core/02-client/types/proposal_test.go
+++ b/core/02-client/types/proposal_test.go
@@ -4,9 +4,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *TypesTestSuite) TestValidateBasic() {
diff --git a/core/02-client/types/query.go b/core/02-client/types/query.go
index c46bbfcf..2794d8aa 100644
--- a/core/02-client/types/query.go
+++ b/core/02-client/types/query.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/03-connection/client/cli/cli.go b/core/03-connection/client/cli/cli.go
index 01bb6f9b..05c3770c 100644
--- a/core/03-connection/client/cli/cli.go
+++ b/core/03-connection/client/cli/cli.go
@@ -4,7 +4,7 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
)
// GetQueryCmd returns the query commands for IBC connections
diff --git a/core/03-connection/client/cli/query.go b/core/03-connection/client/cli/query.go
index 21c4bd8f..5771063c 100644
--- a/core/03-connection/client/cli/query.go
+++ b/core/03-connection/client/cli/query.go
@@ -8,9 +8,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/03-connection/client/utils"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// GetCmdQueryConnections defines the command to query all the connection ends
diff --git a/core/03-connection/client/cli/tx.go b/core/03-connection/client/cli/tx.go
index 68b1a620..68115ec0 100644
--- a/core/03-connection/client/cli/tx.go
+++ b/core/03-connection/client/cli/tx.go
@@ -14,10 +14,10 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/types/msgservice"
"github.com/cosmos/cosmos-sdk/version"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/client/utils"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/core/03-connection/client/utils/utils.go b/core/03-connection/client/utils/utils.go
index e1eb1ce0..035fb508 100644
--- a/core/03-connection/client/utils/utils.go
+++ b/core/03-connection/client/utils/utils.go
@@ -10,13 +10,13 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clientutils "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clientutils "github.com/cosmos/ibc-go/core/02-client/client/utils"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/core/client"
+ "github.com/cosmos/ibc-go/core/exported"
)
// QueryConnection returns a connection end.
diff --git a/core/03-connection/genesis.go b/core/03-connection/genesis.go
index a1bb30f1..c97dcc40 100644
--- a/core/03-connection/genesis.go
+++ b/core/03-connection/genesis.go
@@ -2,8 +2,8 @@ package connection
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/03-connection/keeper"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
)
// InitGenesis initializes the ibc connection submodule's state from a provided genesis
diff --git a/core/03-connection/keeper/grpc_query.go b/core/03-connection/keeper/grpc_query.go
index 62b1c00a..e8399f4e 100644
--- a/core/03-connection/keeper/grpc_query.go
+++ b/core/03-connection/keeper/grpc_query.go
@@ -10,9 +10,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
var _ types.QueryServer = Keeper{}
diff --git a/core/03-connection/keeper/grpc_query_test.go b/core/03-connection/keeper/grpc_query_test.go
index 14fdb425..d0f45308 100644
--- a/core/03-connection/keeper/grpc_query_test.go
+++ b/core/03-connection/keeper/grpc_query_test.go
@@ -5,11 +5,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *KeeperTestSuite) TestQueryConnection() {
diff --git a/core/03-connection/keeper/handshake.go b/core/03-connection/keeper/handshake.go
index b8f7466f..fe2715c6 100644
--- a/core/03-connection/keeper/handshake.go
+++ b/core/03-connection/keeper/handshake.go
@@ -8,10 +8,10 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// ConnOpenInit initialises a connection attempt on chain A. The generated connection identifier
diff --git a/core/03-connection/keeper/handshake_test.go b/core/03-connection/keeper/handshake_test.go
index 101c061a..9cad93d6 100644
--- a/core/03-connection/keeper/handshake_test.go
+++ b/core/03-connection/keeper/handshake_test.go
@@ -3,11 +3,11 @@ package keeper_test
import (
"time"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
// TestConnOpenInit - chainA initializes (INIT state) a connection with
diff --git a/core/03-connection/keeper/keeper.go b/core/03-connection/keeper/keeper.go
index 66372686..1dfea3d5 100644
--- a/core/03-connection/keeper/keeper.go
+++ b/core/03-connection/keeper/keeper.go
@@ -6,11 +6,11 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// Keeper defines the IBC connection keeper
diff --git a/core/03-connection/keeper/keeper_test.go b/core/03-connection/keeper/keeper_test.go
index f2a1124b..a2c30e44 100644
--- a/core/03-connection/keeper/keeper_test.go
+++ b/core/03-connection/keeper/keeper_test.go
@@ -6,9 +6,9 @@ import (
"github.com/stretchr/testify/suite"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type KeeperTestSuite struct {
diff --git a/core/03-connection/keeper/verify.go b/core/03-connection/keeper/verify.go
index ddb1ea6b..c8b57ea6 100644
--- a/core/03-connection/keeper/verify.go
+++ b/core/03-connection/keeper/verify.go
@@ -3,8 +3,8 @@ package keeper
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// VerifyClientState verifies a proof of a client state of the running machine
diff --git a/core/03-connection/keeper/verify_test.go b/core/03-connection/keeper/verify_test.go
index 2d94955d..f9a71e29 100644
--- a/core/03-connection/keeper/verify_test.go
+++ b/core/03-connection/keeper/verify_test.go
@@ -4,14 +4,14 @@ import (
"fmt"
"time"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibcmock "github.com/cosmos/ibc-go/testing/mock"
)
var defaultTimeoutHeight = clienttypes.NewHeight(0, 100000)
diff --git a/core/03-connection/module.go b/core/03-connection/module.go
index 6100caa4..dc3432cb 100644
--- a/core/03-connection/module.go
+++ b/core/03-connection/module.go
@@ -4,8 +4,8 @@ import (
"github.com/gogo/protobuf/grpc"
"github.com/spf13/cobra"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/cli"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/03-connection/client/cli"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
)
// Name returns the IBC connection ICS name.
diff --git a/core/03-connection/simulation/decoder.go b/core/03-connection/simulation/decoder.go
index ef988a10..95766356 100644
--- a/core/03-connection/simulation/decoder.go
+++ b/core/03-connection/simulation/decoder.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
diff --git a/core/03-connection/simulation/decoder_test.go b/core/03-connection/simulation/decoder_test.go
index 673bf640..1d670299 100644
--- a/core/03-connection/simulation/decoder_test.go
+++ b/core/03-connection/simulation/decoder_test.go
@@ -8,9 +8,9 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/03-connection/simulation"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/03-connection/simulation/genesis.go b/core/03-connection/simulation/genesis.go
index 43b08237..4f20cb73 100644
--- a/core/03-connection/simulation/genesis.go
+++ b/core/03-connection/simulation/genesis.go
@@ -4,7 +4,7 @@ import (
"math/rand"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
)
// GenConnectionGenesis returns the default connection genesis state.
diff --git a/core/03-connection/types/codec.go b/core/03-connection/types/codec.go
index 6105fa9e..960f259a 100644
--- a/core/03-connection/types/codec.go
+++ b/core/03-connection/types/codec.go
@@ -5,7 +5,7 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf
diff --git a/core/03-connection/types/connection.go b/core/03-connection/types/connection.go
index 197af83c..5eed1958 100644
--- a/core/03-connection/types/connection.go
+++ b/core/03-connection/types/connection.go
@@ -2,9 +2,9 @@ package types
import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.ConnectionI = (*ConnectionEnd)(nil)
diff --git a/core/03-connection/types/connection_test.go b/core/03-connection/types/connection_test.go
index e7e91538..78390bf0 100644
--- a/core/03-connection/types/connection_test.go
+++ b/core/03-connection/types/connection_test.go
@@ -5,10 +5,10 @@ import (
"github.com/stretchr/testify/require"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
var (
diff --git a/core/03-connection/types/events.go b/core/03-connection/types/events.go
index 3cb5997b..dbbb69e0 100644
--- a/core/03-connection/types/events.go
+++ b/core/03-connection/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// IBC connection events
diff --git a/core/03-connection/types/expected_keepers.go b/core/03-connection/types/expected_keepers.go
index 9fc99586..a3e5446a 100644
--- a/core/03-connection/types/expected_keepers.go
+++ b/core/03-connection/types/expected_keepers.go
@@ -2,7 +2,7 @@ package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// ClientKeeper expected account IBC client keeper
diff --git a/core/03-connection/types/genesis.go b/core/03-connection/types/genesis.go
index b10c300a..677f9a94 100644
--- a/core/03-connection/types/genesis.go
+++ b/core/03-connection/types/genesis.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// NewConnectionPaths creates a ConnectionPaths instance.
diff --git a/core/03-connection/types/genesis_test.go b/core/03-connection/types/genesis_test.go
index 846837f9..104147be 100644
--- a/core/03-connection/types/genesis_test.go
+++ b/core/03-connection/types/genesis_test.go
@@ -5,9 +5,9 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func TestValidateGenesis(t *testing.T) {
diff --git a/core/03-connection/types/keys.go b/core/03-connection/types/keys.go
index 65af565c..a06039eb 100644
--- a/core/03-connection/types/keys.go
+++ b/core/03-connection/types/keys.go
@@ -5,7 +5,7 @@ import (
"regexp"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/core/03-connection/types/keys_test.go b/core/03-connection/types/keys_test.go
index 6adb8090..0650aed1 100644
--- a/core/03-connection/types/keys_test.go
+++ b/core/03-connection/types/keys_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
)
// tests ParseConnectionSequence and IsValidConnectionID
diff --git a/core/03-connection/types/msgs.go b/core/03-connection/types/msgs.go
index 3ba1aed8..797ad31e 100644
--- a/core/03-connection/types/msgs.go
+++ b/core/03-connection/types/msgs.go
@@ -4,10 +4,10 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/03-connection/types/msgs_test.go b/core/03-connection/types/msgs_test.go
index 6aff3b09..627cdab2 100644
--- a/core/03-connection/types/msgs_test.go
+++ b/core/03-connection/types/msgs_test.go
@@ -15,11 +15,11 @@ import (
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
var (
diff --git a/core/03-connection/types/query.go b/core/03-connection/types/query.go
index 7661b38d..f182c2b5 100644
--- a/core/03-connection/types/query.go
+++ b/core/03-connection/types/query.go
@@ -2,8 +2,8 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/03-connection/types/version.go b/core/03-connection/types/version.go
index 10c5b33d..97f1a11a 100644
--- a/core/03-connection/types/version.go
+++ b/core/03-connection/types/version.go
@@ -4,7 +4,7 @@ import (
"strings"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/03-connection/types/version_test.go b/core/03-connection/types/version_test.go
index 8f882dd3..cf0e73da 100644
--- a/core/03-connection/types/version_test.go
+++ b/core/03-connection/types/version_test.go
@@ -5,9 +5,9 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func TestValidateVersion(t *testing.T) {
diff --git a/core/04-channel/client/cli/cli.go b/core/04-channel/client/cli/cli.go
index baf386fe..9c0ccb42 100644
--- a/core/04-channel/client/cli/cli.go
+++ b/core/04-channel/client/cli/cli.go
@@ -4,7 +4,7 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// GetQueryCmd returns the query commands for IBC channels
diff --git a/core/04-channel/client/cli/query.go b/core/04-channel/client/cli/query.go
index 03df474f..e86e20c3 100644
--- a/core/04-channel/client/cli/query.go
+++ b/core/04-channel/client/cli/query.go
@@ -9,9 +9,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/utils"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/04-channel/client/utils"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/core/04-channel/client/cli/tx.go b/core/04-channel/client/cli/tx.go
index 20afe622..f3ebaadc 100644
--- a/core/04-channel/client/cli/tx.go
+++ b/core/04-channel/client/cli/tx.go
@@ -10,10 +10,10 @@ import (
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/tx"
"github.com/cosmos/cosmos-sdk/types/msgservice"
- ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectionutils "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectionutils "github.com/cosmos/ibc-go/core/03-connection/client/utils"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// IBC Channel flags
diff --git a/core/04-channel/client/utils/utils.go b/core/04-channel/client/utils/utils.go
index 167e05d0..ab58bea7 100644
--- a/core/04-channel/client/utils/utils.go
+++ b/core/04-channel/client/utils/utils.go
@@ -7,12 +7,12 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clientutils "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clientutils "github.com/cosmos/ibc-go/core/02-client/client/utils"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/core/client"
+ "github.com/cosmos/ibc-go/core/exported"
)
// QueryChannel returns a channel end.
diff --git a/core/04-channel/genesis.go b/core/04-channel/genesis.go
index 07fad47d..9564e581 100644
--- a/core/04-channel/genesis.go
+++ b/core/04-channel/genesis.go
@@ -2,8 +2,8 @@ package channel
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/keeper"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// InitGenesis initializes the ibc channel submodule's state from a provided genesis
diff --git a/core/04-channel/handler.go b/core/04-channel/handler.go
index 375c3526..59d29f09 100644
--- a/core/04-channel/handler.go
+++ b/core/04-channel/handler.go
@@ -4,8 +4,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/keeper"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// HandleMsgChannelOpenInit defines the sdk.Handler for MsgChannelOpenInit
diff --git a/core/04-channel/keeper/grpc_query.go b/core/04-channel/keeper/grpc_query.go
index 30df0a33..689d4f28 100644
--- a/core/04-channel/keeper/grpc_query.go
+++ b/core/04-channel/keeper/grpc_query.go
@@ -12,10 +12,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
var _ types.QueryServer = (*Keeper)(nil)
diff --git a/core/04-channel/keeper/grpc_query_test.go b/core/04-channel/keeper/grpc_query_test.go
index 689c241c..9e0a7696 100644
--- a/core/04-channel/keeper/grpc_query_test.go
+++ b/core/04-channel/keeper/grpc_query_test.go
@@ -5,11 +5,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *KeeperTestSuite) TestQueryChannel() {
diff --git a/core/04-channel/keeper/handshake.go b/core/04-channel/keeper/handshake.go
index b7cff480..a3f8a238 100644
--- a/core/04-channel/keeper/handshake.go
+++ b/core/04-channel/keeper/handshake.go
@@ -5,11 +5,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/core/05-port/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CounterpartyHops returns the connection hops of the counterparty channel.
diff --git a/core/04-channel/keeper/handshake_test.go b/core/04-channel/keeper/handshake_test.go
index 120e1f8f..64a49f7e 100644
--- a/core/04-channel/keeper/handshake_test.go
+++ b/core/04-channel/keeper/handshake_test.go
@@ -4,12 +4,12 @@ import (
"fmt"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type testCase = struct {
diff --git a/core/04-channel/keeper/keeper.go b/core/04-channel/keeper/keeper.go
index 60452f31..930a6ec4 100644
--- a/core/04-channel/keeper/keeper.go
+++ b/core/04-channel/keeper/keeper.go
@@ -12,12 +12,12 @@ import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/core/05-port/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// Keeper defines the IBC channel keeper
diff --git a/core/04-channel/keeper/keeper_test.go b/core/04-channel/keeper/keeper_test.go
index a9b7dd6c..7bc07190 100644
--- a/core/04-channel/keeper/keeper_test.go
+++ b/core/04-channel/keeper/keeper_test.go
@@ -5,9 +5,9 @@ import (
"github.com/stretchr/testify/suite"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
// KeeperTestSuite is a testing suite to test keeper functions.
diff --git a/core/04-channel/keeper/packet.go b/core/04-channel/keeper/packet.go
index 49b59733..3cedbfe4 100644
--- a/core/04-channel/keeper/packet.go
+++ b/core/04-channel/keeper/packet.go
@@ -8,11 +8,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// SendPacket is called by a module in order to send an IBC packet on a channel
diff --git a/core/04-channel/keeper/packet_test.go b/core/04-channel/keeper/packet_test.go
index 232e6875..91743d61 100644
--- a/core/04-channel/keeper/packet_test.go
+++ b/core/04-channel/keeper/packet_test.go
@@ -4,13 +4,13 @@ import (
"fmt"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibcmock "github.com/cosmos/ibc-go/testing/mock"
)
var (
diff --git a/core/04-channel/keeper/timeout.go b/core/04-channel/keeper/timeout.go
index 1f3dac91..2b6e65ea 100644
--- a/core/04-channel/keeper/timeout.go
+++ b/core/04-channel/keeper/timeout.go
@@ -7,10 +7,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// TimeoutPacket is called by a module which originally attempted to send a
diff --git a/core/04-channel/keeper/timeout_test.go b/core/04-channel/keeper/timeout_test.go
index 640452e8..b7a34c73 100644
--- a/core/04-channel/keeper/timeout_test.go
+++ b/core/04-channel/keeper/timeout_test.go
@@ -4,11 +4,11 @@ import (
"fmt"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
// TestTimeoutPacket test the TimeoutPacket call on chainA by ensuring the timeout has passed
diff --git a/core/04-channel/module.go b/core/04-channel/module.go
index 569120ad..6a9aceac 100644
--- a/core/04-channel/module.go
+++ b/core/04-channel/module.go
@@ -4,8 +4,8 @@ import (
"github.com/gogo/protobuf/grpc"
"github.com/spf13/cobra"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/cli"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/client/cli"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// Name returns the IBC channel ICS name.
diff --git a/core/04-channel/simulation/decoder.go b/core/04-channel/simulation/decoder.go
index 809976cc..efdcf589 100644
--- a/core/04-channel/simulation/decoder.go
+++ b/core/04-channel/simulation/decoder.go
@@ -7,8 +7,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
diff --git a/core/04-channel/simulation/decoder_test.go b/core/04-channel/simulation/decoder_test.go
index 5f2ba2f5..10cdcb0b 100644
--- a/core/04-channel/simulation/decoder_test.go
+++ b/core/04-channel/simulation/decoder_test.go
@@ -9,9 +9,9 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/04-channel/simulation"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/04-channel/simulation/genesis.go b/core/04-channel/simulation/genesis.go
index ed339021..10850758 100644
--- a/core/04-channel/simulation/genesis.go
+++ b/core/04-channel/simulation/genesis.go
@@ -4,7 +4,7 @@ import (
"math/rand"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// GenChannelGenesis returns the default channel genesis state.
diff --git a/core/04-channel/types/channel.go b/core/04-channel/types/channel.go
index 8513a812..6036942f 100644
--- a/core/04-channel/types/channel.go
+++ b/core/04-channel/types/channel.go
@@ -5,8 +5,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/04-channel/types/channel_test.go b/core/04-channel/types/channel_test.go
index 30fee444..3f42c5c0 100644
--- a/core/04-channel/types/channel_test.go
+++ b/core/04-channel/types/channel_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
func TestChannelValidateBasic(t *testing.T) {
diff --git a/core/04-channel/types/codec.go b/core/04-channel/types/codec.go
index a74f0a7f..fb83e09c 100644
--- a/core/04-channel/types/codec.go
+++ b/core/04-channel/types/codec.go
@@ -5,7 +5,7 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces register the ibc channel submodule interfaces to protobuf
diff --git a/core/04-channel/types/events.go b/core/04-channel/types/events.go
index b9ddb305..36af818f 100644
--- a/core/04-channel/types/events.go
+++ b/core/04-channel/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// IBC channel events
diff --git a/core/04-channel/types/expected_keepers.go b/core/04-channel/types/expected_keepers.go
index d3b74b7e..d34167b2 100644
--- a/core/04-channel/types/expected_keepers.go
+++ b/core/04-channel/types/expected_keepers.go
@@ -3,8 +3,8 @@ package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// ClientKeeper expected account IBC client keeper
diff --git a/core/04-channel/types/genesis.go b/core/04-channel/types/genesis.go
index 2c431e97..6b0b2d3c 100644
--- a/core/04-channel/types/genesis.go
+++ b/core/04-channel/types/genesis.go
@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// NewPacketState creates a new PacketState instance.
diff --git a/core/04-channel/types/genesis_test.go b/core/04-channel/types/genesis_test.go
index a0d21007..74e53f75 100644
--- a/core/04-channel/types/genesis_test.go
+++ b/core/04-channel/types/genesis_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
const (
diff --git a/core/04-channel/types/keys.go b/core/04-channel/types/keys.go
index d3a6cde2..62f81341 100644
--- a/core/04-channel/types/keys.go
+++ b/core/04-channel/types/keys.go
@@ -5,7 +5,7 @@ import (
"regexp"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/core/04-channel/types/keys_test.go b/core/04-channel/types/keys_test.go
index 9bc6500b..0c3d67b8 100644
--- a/core/04-channel/types/keys_test.go
+++ b/core/04-channel/types/keys_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
// tests ParseChannelSequence and IsValidChannelID
diff --git a/core/04-channel/types/msgs.go b/core/04-channel/types/msgs.go
index da14a310..013ac9bf 100644
--- a/core/04-channel/types/msgs.go
+++ b/core/04-channel/types/msgs.go
@@ -5,9 +5,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
var _ sdk.Msg = &MsgChannelOpenInit{}
diff --git a/core/04-channel/types/msgs_test.go b/core/04-channel/types/msgs_test.go
index 9c27fd69..37bcbbbb 100644
--- a/core/04-channel/types/msgs_test.go
+++ b/core/04-channel/types/msgs_test.go
@@ -14,10 +14,10 @@ import (
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
const (
diff --git a/core/04-channel/types/packet.go b/core/04-channel/types/packet.go
index b5c8d180..5f08223c 100644
--- a/core/04-channel/types/packet.go
+++ b/core/04-channel/types/packet.go
@@ -6,9 +6,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CommitPacket returns the packet commitment bytes. The commitment consists of:
diff --git a/core/04-channel/types/packet_test.go b/core/04-channel/types/packet_test.go
index 12ed828e..d7a9878b 100644
--- a/core/04-channel/types/packet_test.go
+++ b/core/04-channel/types/packet_test.go
@@ -7,8 +7,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/04-channel/types"
)
func TestCommitPacket(t *testing.T) {
diff --git a/core/04-channel/types/query.go b/core/04-channel/types/query.go
index d1536dfc..9a50900c 100644
--- a/core/04-channel/types/query.go
+++ b/core/04-channel/types/query.go
@@ -2,8 +2,8 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/05-port/keeper/keeper.go b/core/05-port/keeper/keeper.go
index 8a4b2300..31ba4c91 100644
--- a/core/05-port/keeper/keeper.go
+++ b/core/05-port/keeper/keeper.go
@@ -8,8 +8,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ "github.com/cosmos/ibc-go/core/05-port/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// Keeper defines the IBC connection keeper
diff --git a/core/05-port/keeper/keeper_test.go b/core/05-port/keeper/keeper_test.go
index 29c0e158..e27938a1 100644
--- a/core/05-port/keeper/keeper_test.go
+++ b/core/05-port/keeper/keeper_test.go
@@ -9,7 +9,7 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/keeper"
+ "github.com/cosmos/ibc-go/core/05-port/keeper"
)
var (
diff --git a/core/05-port/types/module.go b/core/05-port/types/module.go
index 4c686732..40a737e3 100644
--- a/core/05-port/types/module.go
+++ b/core/05-port/types/module.go
@@ -4,7 +4,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
)
// IBCModule defines an interface that implements all the callbacks
diff --git a/core/23-commitment/types/codec.go b/core/23-commitment/types/codec.go
index 1195c7c2..11389f2d 100644
--- a/core/23-commitment/types/codec.go
+++ b/core/23-commitment/types/codec.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces registers the commitment interfaces to protobuf Any.
diff --git a/core/23-commitment/types/merkle.go b/core/23-commitment/types/merkle.go
index e90fccc3..706ba7df 100644
--- a/core/23-commitment/types/merkle.go
+++ b/core/23-commitment/types/merkle.go
@@ -10,7 +10,7 @@ import (
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// var representing the proofspecs for a SDK chain
diff --git a/core/23-commitment/types/merkle_test.go b/core/23-commitment/types/merkle_test.go
index 3c53847f..54016dd7 100644
--- a/core/23-commitment/types/merkle_test.go
+++ b/core/23-commitment/types/merkle_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/23-commitment/types"
)
func (suite *MerkleTestSuite) TestVerifyMembership() {
diff --git a/core/23-commitment/types/utils_test.go b/core/23-commitment/types/utils_test.go
index f852fb6c..44513ac9 100644
--- a/core/23-commitment/types/utils_test.go
+++ b/core/23-commitment/types/utils_test.go
@@ -7,7 +7,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
crypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/23-commitment/types"
)
func (suite *MerkleTestSuite) TestConvertProofs() {
diff --git a/core/24-host/keys.go b/core/24-host/keys.go
index 21f4bc43..81a4999b 100644
--- a/core/24-host/keys.go
+++ b/core/24-host/keys.go
@@ -3,7 +3,7 @@ package host
import (
"fmt"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
const (
diff --git a/core/24-host/parse_test.go b/core/24-host/parse_test.go
index 9f74bf5f..83c2a864 100644
--- a/core/24-host/parse_test.go
+++ b/core/24-host/parse_test.go
@@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/require"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
func TestParseIdentifier(t *testing.T) {
diff --git a/core/client/cli/cli.go b/core/client/cli/cli.go
index bda4123b..b1fced08 100644
--- a/core/client/cli/cli.go
+++ b/core/client/cli/cli.go
@@ -4,10 +4,10 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection"
- channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/core/02-client"
+ connection "github.com/cosmos/ibc-go/core/03-connection"
+ channel "github.com/cosmos/ibc-go/core/04-channel"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// GetTxCmd returns the transaction commands for this module
diff --git a/core/client/query.go b/core/client/query.go
index 7055f1c7..72923d71 100644
--- a/core/client/query.go
+++ b/core/client/query.go
@@ -7,9 +7,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
// QueryTendermintProof performs an ABCI query with the given key and returns
diff --git a/core/genesis.go b/core/genesis.go
index 7d5d60b9..c7fa47cd 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -2,11 +2,11 @@ package ibc
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection"
- channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ client "github.com/cosmos/ibc-go/core/02-client"
+ connection "github.com/cosmos/ibc-go/core/03-connection"
+ channel "github.com/cosmos/ibc-go/core/04-channel"
+ "github.com/cosmos/ibc-go/core/keeper"
+ "github.com/cosmos/ibc-go/core/types"
)
// InitGenesis initializes the ibc state from a provided genesis
diff --git a/core/genesis_test.go b/core/genesis_test.go
index c29feef7..3e6e6003 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -9,16 +9,16 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/simapp"
- ibc "github.com/cosmos/cosmos-sdk/x/ibc/core"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ ibc "github.com/cosmos/ibc-go/core"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/core/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
const (
diff --git a/core/handler.go b/core/handler.go
index c8e4dfc8..040d9065 100644
--- a/core/handler.go
+++ b/core/handler.go
@@ -3,10 +3,10 @@ package ibc
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/core/keeper"
)
// NewHandler defines the IBC handler
diff --git a/core/keeper/grpc_query.go b/core/keeper/grpc_query.go
index f406d2e8..98eecb31 100644
--- a/core/keeper/grpc_query.go
+++ b/core/keeper/grpc_query.go
@@ -3,9 +3,9 @@ package keeper
import (
"context"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
)
// ClientState implements the IBC QueryServer interface
diff --git a/core/keeper/keeper.go b/core/keeper/keeper.go
index 5f9abc38..47735993 100644
--- a/core/keeper/keeper.go
+++ b/core/keeper/keeper.go
@@ -4,13 +4,13 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
- clientkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectionkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/keeper"
- channelkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper"
- portkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/keeper"
- porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ clientkeeper "github.com/cosmos/ibc-go/core/02-client/keeper"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectionkeeper "github.com/cosmos/ibc-go/core/03-connection/keeper"
+ channelkeeper "github.com/cosmos/ibc-go/core/04-channel/keeper"
+ portkeeper "github.com/cosmos/ibc-go/core/05-port/keeper"
+ porttypes "github.com/cosmos/ibc-go/core/05-port/types"
+ "github.com/cosmos/ibc-go/core/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
diff --git a/core/keeper/msg_server.go b/core/keeper/msg_server.go
index dcddcaed..c2a8912c 100644
--- a/core/keeper/msg_server.go
+++ b/core/keeper/msg_server.go
@@ -8,11 +8,11 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channel "github.com/cosmos/ibc-go/core/04-channel"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/core/05-port/types"
)
var _ clienttypes.MsgServer = Keeper{}
diff --git a/core/keeper/msg_server_test.go b/core/keeper/msg_server_test.go
index 1af4cdc1..1fd7d9e1 100644
--- a/core/keeper/msg_server_test.go
+++ b/core/keeper/msg_server_test.go
@@ -6,15 +6,15 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/core/keeper"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibcmock "github.com/cosmos/ibc-go/testing/mock"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/module.go b/core/module.go
index 6527ab71..5907190c 100644
--- a/core/module.go
+++ b/core/module.go
@@ -18,15 +18,15 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/client/cli"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ ibcclient "github.com/cosmos/ibc-go/core/02-client"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/client/cli"
+ "github.com/cosmos/ibc-go/core/keeper"
+ "github.com/cosmos/ibc-go/core/simulation"
+ "github.com/cosmos/ibc-go/core/types"
)
var (
diff --git a/core/simulation/decoder.go b/core/simulation/decoder.go
index 459eebb8..8b4e3074 100644
--- a/core/simulation/decoder.go
+++ b/core/simulation/decoder.go
@@ -4,11 +4,11 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/types/kv"
- clientsim "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation"
- connectionsim "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation"
- channelsim "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper"
+ clientsim "github.com/cosmos/ibc-go/core/02-client/simulation"
+ connectionsim "github.com/cosmos/ibc-go/core/03-connection/simulation"
+ channelsim "github.com/cosmos/ibc-go/core/04-channel/simulation"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/keeper"
)
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
diff --git a/core/simulation/decoder_test.go b/core/simulation/decoder_test.go
index 09515727..192dc9a8 100644
--- a/core/simulation/decoder_test.go
+++ b/core/simulation/decoder_test.go
@@ -8,12 +8,12 @@ import (
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/simulation"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/simulation/genesis.go b/core/simulation/genesis.go
index d71f4492..7944e275 100644
--- a/core/simulation/genesis.go
+++ b/core/simulation/genesis.go
@@ -8,14 +8,14 @@ import (
"math/rand"
"github.com/cosmos/cosmos-sdk/types/module"
- clientsims "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectionsims "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channelsims "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ clientsims "github.com/cosmos/ibc-go/core/02-client/simulation"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectionsims "github.com/cosmos/ibc-go/core/03-connection/simulation"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channelsims "github.com/cosmos/ibc-go/core/04-channel/simulation"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/types"
)
// Simulation parameter constants
diff --git a/core/simulation/genesis_test.go b/core/simulation/genesis_test.go
index 54aff75a..44b5549d 100644
--- a/core/simulation/genesis_test.go
+++ b/core/simulation/genesis_test.go
@@ -11,9 +11,9 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/simulation"
+ "github.com/cosmos/ibc-go/core/types"
)
// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState.
diff --git a/core/types/codec.go b/core/types/codec.go
index db110ac9..16351c74 100644
--- a/core/types/codec.go
+++ b/core/types/codec.go
@@ -2,13 +2,13 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
)
// RegisterInterfaces registers x/ibc interfaces into protobuf Any.
diff --git a/core/types/genesis.go b/core/types/genesis.go
index f7d78e5c..cd8051af 100644
--- a/core/types/genesis.go
+++ b/core/types/genesis.go
@@ -2,9 +2,9 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
)
var _ codectypes.UnpackInterfacesMessage = GenesisState{}
diff --git a/core/types/query.go b/core/types/query.go
index fba69b3a..bd7d2e83 100644
--- a/core/types/query.go
+++ b/core/types/query.go
@@ -3,12 +3,12 @@ package types
import (
"github.com/gogo/protobuf/grpc"
- client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ client "github.com/cosmos/ibc-go/core/02-client"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connection "github.com/cosmos/ibc-go/core/03-connection"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channel "github.com/cosmos/ibc-go/core/04-channel"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
)
// QueryServer defines the IBC interfaces that the gRPC query server must implement
diff --git a/light-clients/06-solomachine/module.go b/light-clients/06-solomachine/module.go
index bafbd015..bfc820b8 100644
--- a/light-clients/06-solomachine/module.go
+++ b/light-clients/06-solomachine/module.go
@@ -1,7 +1,7 @@
package solomachine
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
)
// Name returns the solo machine client name.
diff --git a/light-clients/06-solomachine/types/client_state.go b/light-clients/06-solomachine/types/client_state.go
index 24a6582f..5dfadd25 100644
--- a/light-clients/06-solomachine/types/client_state.go
+++ b/light-clients/06-solomachine/types/client_state.go
@@ -10,10 +10,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.ClientState = (*ClientState)(nil)
diff --git a/light-clients/06-solomachine/types/client_state_test.go b/light-clients/06-solomachine/types/client_state_test.go
index 4f6c195c..88931bc5 100644
--- a/light-clients/06-solomachine/types/client_state_test.go
+++ b/light-clients/06-solomachine/types/client_state_test.go
@@ -1,14 +1,14 @@
package types_test
import (
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
const (
diff --git a/light-clients/06-solomachine/types/codec.go b/light-clients/06-solomachine/types/codec.go
index 313a910c..5b82081f 100644
--- a/light-clients/06-solomachine/types/codec.go
+++ b/light-clients/06-solomachine/types/codec.go
@@ -5,8 +5,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces register the ibc channel submodule interfaces to protobuf
diff --git a/light-clients/06-solomachine/types/codec_test.go b/light-clients/06-solomachine/types/codec_test.go
index 70be186a..68539aa8 100644
--- a/light-clients/06-solomachine/types/codec_test.go
+++ b/light-clients/06-solomachine/types/codec_test.go
@@ -1,11 +1,11 @@
package types_test
import (
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite SoloMachineTestSuite) TestUnmarshalDataByType() {
diff --git a/light-clients/06-solomachine/types/consensus_state.go b/light-clients/06-solomachine/types/consensus_state.go
index 7d6d09cd..72efd980 100644
--- a/light-clients/06-solomachine/types/consensus_state.go
+++ b/light-clients/06-solomachine/types/consensus_state.go
@@ -5,8 +5,8 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.ConsensusState = &ConsensusState{}
diff --git a/light-clients/06-solomachine/types/consensus_state_test.go b/light-clients/06-solomachine/types/consensus_state_test.go
index e0c22f95..d943b6ee 100644
--- a/light-clients/06-solomachine/types/consensus_state_test.go
+++ b/light-clients/06-solomachine/types/consensus_state_test.go
@@ -1,9 +1,9 @@
package types_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestConsensusState() {
diff --git a/light-clients/06-solomachine/types/header.go b/light-clients/06-solomachine/types/header.go
index f9c5f176..384193cf 100644
--- a/light-clients/06-solomachine/types/header.go
+++ b/light-clients/06-solomachine/types/header.go
@@ -5,8 +5,8 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.Header = &Header{}
diff --git a/light-clients/06-solomachine/types/header_test.go b/light-clients/06-solomachine/types/header_test.go
index a5ca45e8..65ca94ad 100644
--- a/light-clients/06-solomachine/types/header_test.go
+++ b/light-clients/06-solomachine/types/header_test.go
@@ -1,9 +1,9 @@
package types_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestHeaderValidateBasic() {
diff --git a/light-clients/06-solomachine/types/misbehaviour.go b/light-clients/06-solomachine/types/misbehaviour.go
index f5b218cc..d0d9bfe3 100644
--- a/light-clients/06-solomachine/types/misbehaviour.go
+++ b/light-clients/06-solomachine/types/misbehaviour.go
@@ -4,9 +4,9 @@ import (
"bytes"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.Misbehaviour = &Misbehaviour{}
diff --git a/light-clients/06-solomachine/types/misbehaviour_handle.go b/light-clients/06-solomachine/types/misbehaviour_handle.go
index ce5d6351..2306c47f 100644
--- a/light-clients/06-solomachine/types/misbehaviour_handle.go
+++ b/light-clients/06-solomachine/types/misbehaviour_handle.go
@@ -4,8 +4,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CheckMisbehaviourAndUpdateState determines whether or not the currently registered
diff --git a/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/light-clients/06-solomachine/types/misbehaviour_handle_test.go
index 97ce22a3..50b7523a 100644
--- a/light-clients/06-solomachine/types/misbehaviour_handle_test.go
+++ b/light-clients/06-solomachine/types/misbehaviour_handle_test.go
@@ -1,10 +1,10 @@
package types_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
diff --git a/light-clients/06-solomachine/types/misbehaviour_test.go b/light-clients/06-solomachine/types/misbehaviour_test.go
index 7c1f9168..e8fc4d4f 100644
--- a/light-clients/06-solomachine/types/misbehaviour_test.go
+++ b/light-clients/06-solomachine/types/misbehaviour_test.go
@@ -1,9 +1,9 @@
package types_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestMisbehaviour() {
diff --git a/light-clients/06-solomachine/types/proof.go b/light-clients/06-solomachine/types/proof.go
index 6c2e0b84..e4e1032e 100644
--- a/light-clients/06-solomachine/types/proof.go
+++ b/light-clients/06-solomachine/types/proof.go
@@ -6,11 +6,11 @@ import (
"github.com/cosmos/cosmos-sdk/crypto/types/multisig"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// VerifySignature verifies if the the provided public key generated the signature
diff --git a/light-clients/06-solomachine/types/proof_test.go b/light-clients/06-solomachine/types/proof_test.go
index e2ba679a..43e06b15 100644
--- a/light-clients/06-solomachine/types/proof_test.go
+++ b/light-clients/06-solomachine/types/proof_test.go
@@ -3,9 +3,9 @@ package types_test
import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestVerifySignature() {
diff --git a/light-clients/06-solomachine/types/proposal_handle.go b/light-clients/06-solomachine/types/proposal_handle.go
index e38155b2..269a914a 100644
--- a/light-clients/06-solomachine/types/proposal_handle.go
+++ b/light-clients/06-solomachine/types/proposal_handle.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CheckSubstituteAndUpdateState verifies that the subject is allowed to be updated by
diff --git a/light-clients/06-solomachine/types/proposal_handle_test.go b/light-clients/06-solomachine/types/proposal_handle_test.go
index 0113da10..94f44c88 100644
--- a/light-clients/06-solomachine/types/proposal_handle_test.go
+++ b/light-clients/06-solomachine/types/proposal_handle_test.go
@@ -1,10 +1,10 @@
package types_test
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestCheckSubstituteAndUpdateState() {
diff --git a/light-clients/06-solomachine/types/solomachine.go b/light-clients/06-solomachine/types/solomachine.go
index d3936ef4..a49953a1 100644
--- a/light-clients/06-solomachine/types/solomachine.go
+++ b/light-clients/06-solomachine/types/solomachine.go
@@ -3,7 +3,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// Interface implementation checks.
diff --git a/light-clients/06-solomachine/types/solomachine_test.go b/light-clients/06-solomachine/types/solomachine_test.go
index 50555e45..deec20be 100644
--- a/light-clients/06-solomachine/types/solomachine_test.go
+++ b/light-clients/06-solomachine/types/solomachine_test.go
@@ -12,10 +12,10 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/testutil/testdata"
sdk "github.com/cosmos/cosmos-sdk/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
type SoloMachineTestSuite struct {
diff --git a/light-clients/06-solomachine/types/update.go b/light-clients/06-solomachine/types/update.go
index 4cf31fd9..5072d3b9 100644
--- a/light-clients/06-solomachine/types/update.go
+++ b/light-clients/06-solomachine/types/update.go
@@ -4,8 +4,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CheckHeaderAndUpdateState checks if the provided header is valid and updates
diff --git a/light-clients/06-solomachine/types/update_test.go b/light-clients/06-solomachine/types/update_test.go
index e49992cb..e6170351 100644
--- a/light-clients/06-solomachine/types/update_test.go
+++ b/light-clients/06-solomachine/types/update_test.go
@@ -3,10 +3,10 @@ package types_test
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() {
diff --git a/light-clients/07-tendermint/module.go b/light-clients/07-tendermint/module.go
index 4c5cc2f9..0fe57fa9 100644
--- a/light-clients/07-tendermint/module.go
+++ b/light-clients/07-tendermint/module.go
@@ -1,7 +1,7 @@
package tendermint
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
// Name returns the IBC client name
diff --git a/light-clients/07-tendermint/types/client_state.go b/light-clients/07-tendermint/types/client_state.go
index c2bb5239..75503454 100644
--- a/light-clients/07-tendermint/types/client_state.go
+++ b/light-clients/07-tendermint/types/client_state.go
@@ -10,12 +10,12 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.ClientState = (*ClientState)(nil)
diff --git a/light-clients/07-tendermint/types/client_state_test.go b/light-clients/07-tendermint/types/client_state_test.go
index 744b4729..84f98551 100644
--- a/light-clients/07-tendermint/types/client_state_test.go
+++ b/light-clients/07-tendermint/types/client_state_test.go
@@ -5,14 +5,14 @@ import (
ics23 "github.com/confio/ics23/go"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibcmock "github.com/cosmos/ibc-go/testing/mock"
)
const (
diff --git a/light-clients/07-tendermint/types/codec.go b/light-clients/07-tendermint/types/codec.go
index 5d876c8f..33911b81 100644
--- a/light-clients/07-tendermint/types/codec.go
+++ b/light-clients/07-tendermint/types/codec.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces registers the tendermint concrete client-related
diff --git a/light-clients/07-tendermint/types/consensus_state.go b/light-clients/07-tendermint/types/consensus_state.go
index adb469a3..775b0785 100644
--- a/light-clients/07-tendermint/types/consensus_state.go
+++ b/light-clients/07-tendermint/types/consensus_state.go
@@ -7,9 +7,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// NewConsensusState creates a new ConsensusState instance.
diff --git a/light-clients/07-tendermint/types/consensus_state_test.go b/light-clients/07-tendermint/types/consensus_state_test.go
index 313815d0..5bcf8ec5 100644
--- a/light-clients/07-tendermint/types/consensus_state_test.go
+++ b/light-clients/07-tendermint/types/consensus_state_test.go
@@ -3,9 +3,9 @@ package types_test
import (
"time"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() {
diff --git a/light-clients/07-tendermint/types/genesis.go b/light-clients/07-tendermint/types/genesis.go
index 7124643b..2c69d35d 100644
--- a/light-clients/07-tendermint/types/genesis.go
+++ b/light-clients/07-tendermint/types/genesis.go
@@ -2,8 +2,8 @@ package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// ExportMetadata exports all the processed times in the client store so they can be included in clients genesis
diff --git a/light-clients/07-tendermint/types/genesis_test.go b/light-clients/07-tendermint/types/genesis_test.go
index 5732151e..de7ce828 100644
--- a/light-clients/07-tendermint/types/genesis_test.go
+++ b/light-clients/07-tendermint/types/genesis_test.go
@@ -4,9 +4,9 @@ import (
"time"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
func (suite *TendermintTestSuite) TestExportMetadata() {
diff --git a/light-clients/07-tendermint/types/header.go b/light-clients/07-tendermint/types/header.go
index 0b9cfa1d..b346e6b2 100644
--- a/light-clients/07-tendermint/types/header.go
+++ b/light-clients/07-tendermint/types/header.go
@@ -7,9 +7,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.Header = &Header{}
diff --git a/light-clients/07-tendermint/types/header_test.go b/light-clients/07-tendermint/types/header_test.go
index 97647f86..a1a3222d 100644
--- a/light-clients/07-tendermint/types/header_test.go
+++ b/light-clients/07-tendermint/types/header_test.go
@@ -5,9 +5,9 @@ import (
tmprotocrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
func (suite *TendermintTestSuite) TestGetHeight() {
diff --git a/light-clients/07-tendermint/types/misbehaviour.go b/light-clients/07-tendermint/types/misbehaviour.go
index 340130d2..cc6c86b3 100644
--- a/light-clients/07-tendermint/types/misbehaviour.go
+++ b/light-clients/07-tendermint/types/misbehaviour.go
@@ -8,9 +8,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.Misbehaviour = &Misbehaviour{}
diff --git a/light-clients/07-tendermint/types/misbehaviour_handle.go b/light-clients/07-tendermint/types/misbehaviour_handle.go
index 4c55552d..c5380527 100644
--- a/light-clients/07-tendermint/types/misbehaviour_handle.go
+++ b/light-clients/07-tendermint/types/misbehaviour_handle.go
@@ -8,8 +8,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CheckMisbehaviourAndUpdateState determines whether or not two conflicting
diff --git a/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/light-clients/07-tendermint/types/misbehaviour_handle_test.go
index 3ca2e4dc..e5b94da3 100644
--- a/light-clients/07-tendermint/types/misbehaviour_handle_test.go
+++ b/light-clients/07-tendermint/types/misbehaviour_handle_test.go
@@ -7,12 +7,12 @@ import (
"github.com/tendermint/tendermint/crypto/tmhash"
tmtypes "github.com/tendermint/tendermint/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
diff --git a/light-clients/07-tendermint/types/misbehaviour_test.go b/light-clients/07-tendermint/types/misbehaviour_test.go
index dede4e60..4acb085e 100644
--- a/light-clients/07-tendermint/types/misbehaviour_test.go
+++ b/light-clients/07-tendermint/types/misbehaviour_test.go
@@ -7,11 +7,11 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
func (suite *TendermintTestSuite) TestMisbehaviour() {
diff --git a/light-clients/07-tendermint/types/proposal_handle.go b/light-clients/07-tendermint/types/proposal_handle.go
index c64c52b3..080ee4c2 100644
--- a/light-clients/07-tendermint/types/proposal_handle.go
+++ b/light-clients/07-tendermint/types/proposal_handle.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CheckSubstituteAndUpdateState will try to update the client with the state of the
diff --git a/light-clients/07-tendermint/types/proposal_handle_test.go b/light-clients/07-tendermint/types/proposal_handle_test.go
index 66a51203..5baf621a 100644
--- a/light-clients/07-tendermint/types/proposal_handle_test.go
+++ b/light-clients/07-tendermint/types/proposal_handle_test.go
@@ -3,10 +3,10 @@ package types_test
import (
"time"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
var (
diff --git a/light-clients/07-tendermint/types/store.go b/light-clients/07-tendermint/types/store.go
index 7d6a841b..4c62eb95 100644
--- a/light-clients/07-tendermint/types/store.go
+++ b/light-clients/07-tendermint/types/store.go
@@ -6,9 +6,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
// KeyProcessedTime is appended to consensus state key to store the processed time
diff --git a/light-clients/07-tendermint/types/store_test.go b/light-clients/07-tendermint/types/store_test.go
index b8badc09..3bf23500 100644
--- a/light-clients/07-tendermint/types/store_test.go
+++ b/light-clients/07-tendermint/types/store_test.go
@@ -1,13 +1,13 @@
package types_test
import (
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *TendermintTestSuite) TestGetConsensusState() {
diff --git a/light-clients/07-tendermint/types/tendermint_test.go b/light-clients/07-tendermint/types/tendermint_test.go
index 4f9b8142..be09ddb8 100644
--- a/light-clients/07-tendermint/types/tendermint_test.go
+++ b/light-clients/07-tendermint/types/tendermint_test.go
@@ -12,10 +12,10 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
const (
diff --git a/light-clients/07-tendermint/types/update.go b/light-clients/07-tendermint/types/update.go
index e692e746..da64ef87 100644
--- a/light-clients/07-tendermint/types/update.go
+++ b/light-clients/07-tendermint/types/update.go
@@ -10,9 +10,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
// CheckHeaderAndUpdateState checks if the provided header is valid, and if valid it will:
diff --git a/light-clients/07-tendermint/types/update_test.go b/light-clients/07-tendermint/types/update_test.go
index d9e550ed..9f89a0fb 100644
--- a/light-clients/07-tendermint/types/update_test.go
+++ b/light-clients/07-tendermint/types/update_test.go
@@ -5,11 +5,11 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- types "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ types "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
diff --git a/light-clients/07-tendermint/types/upgrade.go b/light-clients/07-tendermint/types/upgrade.go
index 397e9cfd..65017a2f 100644
--- a/light-clients/07-tendermint/types/upgrade.go
+++ b/light-clients/07-tendermint/types/upgrade.go
@@ -6,9 +6,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/light-clients/07-tendermint/types/upgrade_test.go b/light-clients/07-tendermint/types/upgrade_test.go
index 7be3a494..f7a851a0 100644
--- a/light-clients/07-tendermint/types/upgrade_test.go
+++ b/light-clients/07-tendermint/types/upgrade_test.go
@@ -1,10 +1,10 @@
package types_test
import (
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/light-clients/09-localhost/module.go b/light-clients/09-localhost/module.go
index 57b9c5bb..68a59226 100644
--- a/light-clients/09-localhost/module.go
+++ b/light-clients/09-localhost/module.go
@@ -1,7 +1,7 @@
package localhost
import (
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
)
// Name returns the IBC client name
diff --git a/light-clients/09-localhost/types/client_state.go b/light-clients/09-localhost/types/client_state.go
index 5a4a41a1..fdfc7a41 100644
--- a/light-clients/09-localhost/types/client_state.go
+++ b/light-clients/09-localhost/types/client_state.go
@@ -11,11 +11,11 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var _ exported.ClientState = (*ClientState)(nil)
diff --git a/light-clients/09-localhost/types/client_state_test.go b/light-clients/09-localhost/types/client_state_test.go
index bc58f625..d46e63a8 100644
--- a/light-clients/09-localhost/types/client_state_test.go
+++ b/light-clients/09-localhost/types/client_state_test.go
@@ -2,14 +2,14 @@ package types_test
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
)
const (
diff --git a/light-clients/09-localhost/types/codec.go b/light-clients/09-localhost/types/codec.go
index b338dfb6..a672323a 100644
--- a/light-clients/09-localhost/types/codec.go
+++ b/light-clients/09-localhost/types/codec.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ "github.com/cosmos/ibc-go/core/exported"
)
// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf
diff --git a/light-clients/09-localhost/types/localhost_test.go b/light-clients/09-localhost/types/localhost_test.go
index 8ebaef84..73356dcd 100644
--- a/light-clients/09-localhost/types/localhost_test.go
+++ b/light-clients/09-localhost/types/localhost_test.go
@@ -9,8 +9,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
const (
diff --git a/testing/chain.go b/testing/chain.go
index 0534066d..69f0c94c 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -25,16 +25,16 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/types"
- ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/core/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/mock"
"github.com/cosmos/cosmos-sdk/x/staking/teststaking"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
)
diff --git a/testing/chain_test.go b/testing/chain_test.go
index 361a9c4c..5a30b0c6 100644
--- a/testing/chain_test.go
+++ b/testing/chain_test.go
@@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/require"
tmtypes "github.com/tendermint/tendermint/types"
- ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
- "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/mock"
)
func TestCreateSortedSignerArray(t *testing.T) {
diff --git a/testing/coordinator.go b/testing/coordinator.go
index ade28b4d..282416ab 100644
--- a/testing/coordinator.go
+++ b/testing/coordinator.go
@@ -10,9 +10,9 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index 663497aa..97a03c8b 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -18,8 +18,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
)
const (
diff --git a/testing/mock/privval_test.go b/testing/mock/privval_test.go
index b9f0487a..b076f544 100644
--- a/testing/mock/privval_test.go
+++ b/testing/mock/privval_test.go
@@ -7,7 +7,7 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
- "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
+ "github.com/cosmos/ibc-go/testing/mock"
)
const chainID = "testChain"
diff --git a/testing/solomachine.go b/testing/solomachine.go
index bee63785..2d04a904 100644
--- a/testing/solomachine.go
+++ b/testing/solomachine.go
@@ -12,11 +12,11 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/crypto/types/multisig"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
- commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
- host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
- "github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
- solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types"
+ clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/core/exported"
+ solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
)
var prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
diff --git a/testing/types.go b/testing/types.go
index 16cda621..78231352 100644
--- a/testing/types.go
+++ b/testing/types.go
@@ -1,7 +1,7 @@
package ibctesting
import (
- channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
+ channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
)
// TestConnection is a testing helper struct to keep track of the connectionID, source clientID,
From 338db0c23bbd9c9232f3abc4e105dcba6e94efd2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 1 Mar 2021 18:05:34 +0100
Subject: [PATCH 004/393] update docs and readme (#6)
* update docs and readme
* Update README.md
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
---
README.md | 54 +++-
docs/README.md | 116 +-------
docs/custom.md | 468 +++++++++++++++++++++++++++++++
docs/integration.md | 252 +++++++++++++++++
docs/overview.md | 181 ++++++++++++
docs/proposals.md | 42 +++
docs/relayer.md | 45 +++
docs/spec.md | 114 ++++++++
docs/upgrades/README.md | 14 +
docs/upgrades/developer-guide.md | 50 ++++
docs/upgrades/quick-guide.md | 54 ++++
11 files changed, 1282 insertions(+), 108 deletions(-)
create mode 100644 docs/custom.md
create mode 100644 docs/integration.md
create mode 100644 docs/overview.md
create mode 100644 docs/proposals.md
create mode 100644 docs/relayer.md
create mode 100644 docs/spec.md
create mode 100644 docs/upgrades/README.md
create mode 100644 docs/upgrades/developer-guide.md
create mode 100644 docs/upgrades/quick-guide.md
diff --git a/README.md b/README.md
index 15644729..56f3bab5 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,54 @@
# ibc-go
-Interblockchain communication protocol (IBC) implementation in Golang.
+
+
+Interblockchain communication protocol (IBC) implementation in Golang built as a SDK module.
+
+## Components
+
+### Core
+
+The `core/` directory contains the SDK IBC module that SDK based chains must integrate in order to utilize this implementation of IBC.
+It handles the core components of IBC including clients, connection, channels, packets, acknowledgements, and timeouts.
+
+### Applications
+
+Applications can be built as modules to utilize core IBC by fulfilling a set of callbacks.
+Fungible Token Transfers is currently the only supported application module.
+
+### IBC Light Clients
+
+IBC light clients are on-chain implementations of an off-chain light clients.
+This repository currently supports tendermint and solo-machine light clients.
+The localhost client is currently non-functional.
+
+## Docs
+
+Please see our [documentation](docs/README.md) for more information.
+
diff --git a/docs/README.md b/docs/README.md
index a699c10a..e84a4d1b 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,114 +1,16 @@
-# `ibc`
+# IBC
-## Abstract
+This repository contains reference documentation for the IBC protocol integration and concepts:
-This specification defines the implementation of the IBC protocol on the Cosmos SDK, the
-changes made to the specification and where to find each specific ICS spec within
-the module.
+1. [Overview](./overview.md)
+2. [Integration](./integration.md)
+3. [Customization](./custom.md)
+4. [Relayer](./relayer.md)
+5. [Governance Proposals](./proposals.md)
-For the general specification please refer to the [Interchain Standards](https://github.com/cosmos/ics).
-
-## Contents
-
-1. **Applications**
-
- 1.1. [Transfer](./../applications/transfer/spec/README.md)
-2. **[Core](./../core/spec/README.md)**
-3. **Light Clients**
-
- 3.1 [Solo Machine Client](./../light-clients/06-solomachine/spec/README.md)
-
- 3.2 [Tendermint Client](./../light-clients/07-tendermint/spec/README.md)
-
- 3.3 [Localhost Client](./../light-clients/09-localhost/spec/README.md)
-
-## Implementation Details
-
-As stated above, the IBC implementation on the Cosmos SDK introduces some changes
-to the general specification, in order to avoid code duplication and to take
-advantage of the SDK architectural components such as the transaction routing
-through `Handlers`.
-
-### Interchain Standards reference
-
-The following list is a mapping from each Interchain Standard to their implementation
-in the SDK's `x/ibc` module:
-
-* [ICS 002 - Client Semantics](https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics): Implemented in [`x/ibc/core/02-client`](https://github.com/cosmos/tree/master/ibc/core/02-client)
-* [ICS 003 - Connection Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-003-connection-semantics): Implemented in [`x/ibc/core/03-connection`](https://github.com/cosmos/tree/master/ibc/core/03-connection)
-* [ICS 004 - Channel and Packet Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-004-channel-and-packet-semantics): Implemented in [`x/ibc/core/04-channel`](https://github.com/cosmos/tree/master/ibc/core/04-channel)
-* [ICS 005 - Port Allocation](https://github.com/cosmos/ics/blob/master/spec/ics-005-port-allocation): Implemented in [`x/ibc/core/05-port`](https://github.com/cosmos/tree/master/ibc/core/05-port)
-* [ICS 006 - Solo Machine Client](https://github.com/cosmos/ics/blob/master/spec/ics-006-solo-machine-client): Implemented in [`x/ibc/light-clients/06-solomachine`](https://github.com/cosmos/tree/master/ibc/solomachine)
-* [ICS 007 - Tendermint Client](https://github.com/cosmos/ics/blob/master/spec/ics-007-tendermint-client): Implemented in [`x/ibc/light-clients/07-tendermint`](https://github.com/cosmos/tree/master/ibc/light-clients/07-tendermint)
-* [ICS 009 - Loopback Client](https://github.com/cosmos/ics/blob/master/spec/ics-009-loopback-client): Implemented in [`x/ibc/light-clients/09-localhost`](https://github.com/cosmos/tree/master/ibc/light-clients/09-localhost)
-* [ICS 018- Relayer Algorithms](https://github.com/cosmos/ics/tree/master/spec/ics-018-relayer-algorithms): Implemented in it's own [relayer repository](https://github.com/cosmos/relayer)
-* [ICS 020 - Fungible Token Transfer](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer): Implemented in [`x/ibc/applications/transfer`](https://github.com/cosmos/tree/master/ibc/applications/transfer)
-* [ICS 023 - Vector Commitments](https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments): Implemented in [`x/ibc/core/23-commitment`](https://github.com/cosmos/tree/master/ibc/core/23-commitment)
-* [ICS 024 - Host Requirements](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements): Implemented in [`x/ibc/core/24-host`](https://github.com/cosmos/tree/master/ibc/core/24-host)
-* [ICS 025 - Handler Interface](https://github.com/cosmos/ics/tree/master/spec/ics-025-handler-interface): `Handler` interfaces are implemented at the top level in `x/ibc/handler.go`,
-which call each ICS submodule's handlers (i.e `x/ibc/*/{XX-ICS}/handler.go`).
-* [ICS 026 - Routing Module](https://github.com/cosmos/ics/blob/master/spec/ics-026-routing-module): Replaced by [ADR 15 - IBC Packet Receiver](../../../docs/architecture/adr-015-ibc-packet-receiver.md).
-
-### Architecture Decision Records (ADR)
-
-The following ADR provide the design and architecture decision of IBC-related components.
-
-* [ADR 001 - Coin Source Tracing](../../../docs/architecture/adr-001-coin-source-tracing.md): standard to hash the ICS20's fungible token
-denomination trace path in order to support special characters and limit the maximum denomination length.
-* [ADR 17 - Historical Header Module](../../../docs/architecture/adr-017-historical-header-module.md): Introduces the ability to introspect past
-consensus states in order to verify their membership in the counterparty clients.
-* [ADR 19 - Protobuf State Encoding](../../../docs/architecture/adr-019-protobuf-state-encoding.md): Migration from Amino to Protobuf for state encoding.
-* [ADR 020 - Protocol Buffer Transaction Encoding](./../../docs/architecture/adr-020-protobuf-transaction-encoding.md): Client side migration to Protobuf.
-* [ADR 021 - Protocol Buffer Query Encoding](../../../docs/architecture/adr-020-protobuf-query-encoding.md): Queries migration to Protobuf.
-* [ADR 026 - IBC Client Recovery Mechanisms](../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md): Allows IBC Clients to be recovered after freezing or expiry.
-
-### SDK Modules
-
-* [`x/capability`](https://github.com/cosmos/tree/master/x/capability): The capability module provides object-capability keys support through scoped keepers in order to authenticate usage of ports or channels. Check [ADR 3 - Dynamic Capability Store](../../../docs/architecture/adr-003-dynamic-capability-store.md) for more details.
-
-## IBC module architecture
-
-> **NOTE for auditors**: If you're not familiar with the overall module structure from
-the SDK modules, please check this [document](../../../docs/building-modules/structure.md) as
-prerequisite reading.
-
-For ease of auditing, every Interchain Standard has been developed in its own
-package. The development team separated the IBC TAO (Transport, Authentication, Ordering) ICS specifications from the IBC application level
-specification. The following tree describes the architecture of the directories that
-the `ibc` (TAO) and `ibc-transfer` ([ICS20](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer)) modules:
-
-```shell
-x/ibc
-├── applications/
-│ └──transfer/
-├── core/
-│ ├── 02-client/
-│ ├── 03-connection/
-│ ├── 04-channel/
-│ ├── 05-port/
-│ ├── 23-commitment/
-│ ├── 24-host/
-│ ├── client
-│ │ └── cli
-│ │ └── cli.go
-│ ├── keeper
-│ │ ├── keeper.go
-│ │ └── querier.go
-│ ├── types
-│ │ ├── errors.go
-│ │ └── keys.go
-│ ├── handler.go
-│ └── module.go
-├── light-clients/
-│ ├── 06-solomachine/
-│ ├── 07-tendermint/
-│ └── 09-localhost/
-└── testing/
-```
diff --git a/docs/custom.md b/docs/custom.md
new file mode 100644
index 00000000..4d4c30c0
--- /dev/null
+++ b/docs/custom.md
@@ -0,0 +1,468 @@
+
+
+# Customization
+
+Learn how to configure your application to use IBC and send data packets to other chains. {synopsis}
+
+This document serves as a guide for developers who want to write their own Inter-blockchain
+Communication Protocol (IBC) applications for custom [use-cases](https://github.com/cosmos/ics/blob/master/ibc/4_IBC_USECASES.md).
+
+Due to the modular design of the IBC protocol, IBC
+application developers do not need to concern themselves with the low-level details of clients,
+connections, and proof verification. Nevertheless a brief explanation of the lower levels of the
+stack is given so that application developers may have a high-level understanding of the IBC
+protocol. Then the document goes into detail on the abstraction layer most relevant for application
+developers (channels and ports), and describes how to define your own custom packets, and
+`IBCModule` callbacks.
+
+To have your module interact over IBC you must: bind to a port(s), define your own packet data and acknolwedgement structs as well as how to encode/decode them, and implement the
+`IBCModule` interface. Below is a more detailed explanation of how to write an IBC application
+module correctly.
+
+## Pre-requisites Readings
+
+- [IBC Overview](./overview.md)) {prereq}
+- [IBC default integration](./integration.md) {prereq}
+
+## Create a custom IBC application module
+
+### Implement `IBCModule` Interface and callbacks
+
+The Cosmos SDK expects all IBC modules to implement the [`IBCModule`
+interface](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/05-port/types/module.go). This
+interface contains all of the callbacks IBC expects modules to implement. This section will describe
+the callbacks that are called during channel handshake execution.
+
+Here are the channel handshake callbacks that modules are expected to implement:
+
+```go
+// Called by IBC Handler on MsgOpenInit
+func (k Keeper) OnChanOpenInit(ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID string,
+ channelID string,
+ channelCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version string,
+) error {
+ // OpenInit must claim the channelCapability that IBC passes into the callback
+ if err := k.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil {
+ return err
+ }
+
+ // ... do custom initialization logic
+
+ // Use above arguments to determine if we want to abort handshake
+ // Examples: Abort if order == UNORDERED,
+ // Abort if version is unsupported
+ err := checkArguments(args)
+ return err
+}
+
+// Called by IBC Handler on MsgOpenTry
+OnChanOpenTry(
+ ctx sdk.Context,
+ order channeltypes.Order,
+ connectionHops []string,
+ portID,
+ channelID string,
+ channelCap *capabilitytypes.Capability,
+ counterparty channeltypes.Counterparty,
+ version,
+ counterpartyVersion string,
+) error {
+ // Module may have already claimed capability in OnChanOpenInit in the case of crossing hellos
+ // (ie chainA and chainB both call ChanOpenInit before one of them calls ChanOpenTry)
+ // If the module can already authenticate the capability then the module already owns it so we don't need to claim
+ // Otherwise, module does not have channel capability and we must claim it from IBC
+ if !k.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) {
+ // Only claim channel capability passed back by IBC module if we do not already own it
+ if err := k.scopedKeeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil {
+ return err
+ }
+ }
+
+ // ... do custom initialization logic
+
+ // Use above arguments to determine if we want to abort handshake
+ err := checkArguments(args)
+ return err
+}
+
+// Called by IBC Handler on MsgOpenAck
+OnChanOpenAck(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+ counterpartyVersion string,
+) error {
+ // ... do custom initialization logic
+
+ // Use above arguments to determine if we want to abort handshake
+ err := checkArguments(args)
+ return err
+}
+
+// Called by IBC Handler on MsgOpenConfirm
+OnChanOpenConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // ... do custom initialization logic
+
+ // Use above arguments to determine if we want to abort handshake
+ err := checkArguments(args)
+ return err
+}
+```
+
+The channel closing handshake will also invoke module callbacks that can return errors to abort the
+closing handshake. Closing a channel is a 2-step handshake, the initiating chain calls
+`ChanCloseInit` and the finalizing chain calls `ChanCloseConfirm`.
+
+```go
+// Called by IBC Handler on MsgCloseInit
+OnChanCloseInit(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // ... do custom finalization logic
+
+ // Use above arguments to determine if we want to abort handshake
+ err := checkArguments(args)
+ return err
+}
+
+// Called by IBC Handler on MsgCloseConfirm
+OnChanCloseConfirm(
+ ctx sdk.Context,
+ portID,
+ channelID string,
+) error {
+ // ... do custom finalization logic
+
+ // Use above arguments to determine if we want to abort handshake
+ err := checkArguments(args)
+ return err
+}
+```
+
+#### Channel Handshake Version Negotiation
+
+Application modules are expected to verify versioning used during the channel handshake procedure.
+
+* `ChanOpenInit` callback should verify that the `MsgChanOpenInit.Version` is valid
+* `ChanOpenTry` callback should verify that the `MsgChanOpenTry.Version` is valid and that `MsgChanOpenTry.CounterpartyVersion` is valid.
+* `ChanOpenAck` callback should verify that the `MsgChanOpenAck.CounterpartyVersion` is valid and supported.
+
+Versions must be strings but can implement any versioning structure. If your application plans to
+have linear releases then semantic versioning is recommended. If your application plans to release
+various features in between major releases then it is advised to use the same versioning scheme
+as IBC. This versioning scheme specifies a version identifier and compatible feature set with
+that identifier. Valid version selection includes selecting a compatible version identifier with
+a subset of features supported by your application for that version. The struct is used for this
+scheme can be found in `03-connection/types`.
+
+Since the version type is a string, applications have the ability to do simple version verification
+via string matching or they can use the already impelemented versioning system and pass the proto
+encoded version into each handhshake call as necessary.
+
+ICS20 currently implements basic string matching with a single supported version.
+
+### Bind Ports
+
+Currently, ports must be bound on app initialization. A module may bind to ports in `InitGenesis`
+like so:
+
+```go
+func InitGenesis(ctx sdk.Context, keeper keeper.Keeper, state types.GenesisState) {
+ // ... other initialization logic
+
+ // Only try to bind to port if it is not already bound, since we may already own
+ // port capability from capability InitGenesis
+ if !isBound(ctx, state.PortID) {
+ // module binds to desired ports on InitChain
+ // and claims returned capabilities
+ cap1 := keeper.IBCPortKeeper.BindPort(ctx, port1)
+ cap2 := keeper.IBCPortKeeper.BindPort(ctx, port2)
+ cap3 := keeper.IBCPortKeeper.BindPort(ctx, port3)
+
+ // NOTE: The module's scoped capability keeper must be private
+ keeper.scopedKeeper.ClaimCapability(cap1)
+ keeper.scopedKeeper.ClaimCapability(cap2)
+ keeper.scopedKeeper.ClaimCapability(cap3)
+ }
+
+ // ... more initialization logic
+}
+```
+
+### Custom Packets
+
+Modules connected by a channel must agree on what application data they are sending over the
+channel, as well as how they will encode/decode it. This process is not specified by IBC as it is up
+to each application module to determine how to implement this agreement. However, for most
+applications this will happen as a version negotiation during the channel handshake. While more
+complex version negotiation is possible to implement inside the channel opening handshake, a very
+simple version negotation is implemented in the [ibc-transfer module](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc-transfer/module.go).
+
+Thus, a module must define its a custom packet data structure, along with a well-defined way to
+encode and decode it to and from `[]byte`.
+
+```go
+// Custom packet data defined in application module
+type CustomPacketData struct {
+ // Custom fields ...
+}
+
+EncodePacketData(packetData CustomPacketData) []byte {
+ // encode packetData to bytes
+}
+
+DecodePacketData(encoded []byte) (CustomPacketData) {
+ // decode from bytes to packet data
+}
+```
+
+Then a module must encode its packet data before sending it through IBC.
+
+```go
+// Sending custom application packet data
+data := EncodePacketData(customPacketData)
+packet.Data = data
+IBCChannelKeeper.SendPacket(ctx, packet)
+```
+
+A module receiving a packet must decode the `PacketData` into a structure it expects so that it can
+act on it.
+
+```go
+// Receiving custom application packet data (in OnRecvPacket)
+packetData := DecodePacketData(packet.Data)
+// handle received custom packet data
+```
+
+#### Packet Flow Handling
+
+Just as IBC expected modules to implement callbacks for channel handshakes, IBC also expects modules
+to implement callbacks for handling the packet flow through a channel.
+
+Once a module A and module B are connected to each other, relayers can start relaying packets and
+acknowledgements back and forth on the channel.
+
+![IBC packet flow diagram](https://media.githubusercontent.com/media/cosmos/ics/master/spec/ics-004-channel-and-packet-semantics/packet-state-machine.png)
+
+Briefly, a successful packet flow works as follows:
+
+1. module A sends a packet through the IBC module
+2. the packet is received by module B
+3. if module B writes an acknowledgement of the packet then module A will process the
+ acknowledgement
+4. if the packet is not successfully received before the timeout, then module A processes the
+ packet's timeout.
+
+##### Sending Packets
+
+Modules do not send packets through callbacks, since the modules initiate the action of sending
+packets to the IBC module, as opposed to other parts of the packet flow where msgs sent to the IBC
+module must trigger execution on the port-bound module through the use of callbacks. Thus, to send a
+packet a module simply needs to call `SendPacket` on the `IBCChannelKeeper`.
+
+```go
+// retrieve the dynamic capability for this channel
+channelCap := scopedKeeper.GetCapability(ctx, channelCapName)
+// Sending custom application packet data
+data := EncodePacketData(customPacketData)
+packet.Data = data
+// Send packet to IBC, authenticating with channelCap
+IBCChannelKeeper.SendPacket(ctx, channelCap, packet)
+```
+
+::: warning
+In order to prevent modules from sending packets on channels they do not own, IBC expects
+modules to pass in the correct channel capability for the packet's source channel.
+:::
+
+##### Receiving Packets
+
+To handle receiving packets, the module must implement the `OnRecvPacket` callback. This gets
+invoked by the IBC module after the packet has been proved valid and correctly processed by the IBC
+keepers. Thus, the `OnRecvPacket` callback only needs to worry about making the appropriate state
+changes given the packet data without worrying about whether the packet is valid or not.
+
+Modules may return an acknowledgement as a byte string and return it to the IBC handler.
+The IBC handler will then commit this acknowledgement of the packet so that a relayer may relay the
+acknowledgement back to the sender module.
+
+```go
+OnRecvPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+) (res *sdk.Result, ack []byte, abort error) {
+ // Decode the packet data
+ packetData := DecodePacketData(packet.Data)
+
+ // do application state changes based on packet data
+ // and return result, acknowledgement and abortErr
+ // Note: abortErr is only not nil if we need to abort the entire receive packet, and allow a replay of the receive.
+ // If the application state change failed but we do not want to replay the packet,
+ // simply encode this failure with relevant information in ack and return nil error
+ res, ack, abortErr := processPacket(ctx, packet, packetData)
+
+ // if we need to abort the entire receive packet, return error
+ if abortErr != nil {
+ return nil, nil, abortErr
+ }
+
+ // Encode the ack since IBC expects acknowledgement bytes
+ ackBytes := EncodeAcknowledgement(ack)
+
+ return res, ackBytes, nil
+}
+```
+
+::: warning
+`OnRecvPacket` should **only** return an error if we want the entire receive packet execution
+(including the IBC handling) to be reverted. This will allow the packet to be replayed in the case
+that some mistake in the relaying caused the packet processing to fail.
+
+If some application-level error happened while processing the packet data, in most cases, we will
+not want the packet processing to revert. Instead, we may want to encode this failure into the
+acknowledgement and finish processing the packet. This will ensure the packet cannot be replayed,
+and will also allow the sender module to potentially remediate the situation upon receiving the
+acknowledgement. An example of this technique is in the `ibc-transfer` module's
+[`OnRecvPacket`](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc-transfer/module.go).
+:::
+
+### Acknowledgements
+
+Modules may commit an acknowledgement upon receiving and processing a packet in the case of synchronous packet processing.
+In the case where a packet is processed at some later point after the packet has been received (asynchronous execution), the acknowledgement
+will be written once the packet has been processed by the application which may be well after the packet receipt.
+
+NOTE: Most blockchain modules will want to use the synchronous execution model in which the module processes and writes the acknowledgement
+for a packet as soon as it has been received from the IBC module.
+
+This acknowledgement can then be relayed back to the original sender chain, which can take action
+depending on the contents of the acknowledgement.
+
+Just as packet data was opaque to IBC, acknowledgements are similarly opaque. Modules must pass and
+receive acknowledegments with the IBC modules as byte strings.
+
+Thus, modules must agree on how to encode/decode acknowledgements. The process of creating an
+acknowledgement struct along with encoding and decoding it, is very similar to the packet data
+example above. [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope)
+specifies a recommended format for acknowledgements. This acknowledgement type can be imported from
+[channel types](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel/types).
+
+While modules may choose arbitrary acknowledgement structs, a default acknowledgement types is provided by IBC [here](https://github.com/cosmos/cosmos-sdk/blob/master/proto/ibc/core/channel/v1/channel.proto):
+
+```proto
+// Acknowledgement is the recommended acknowledgement format to be used by
+// app-specific protocols.
+// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
+// conflicts with other protobuf message formats used for acknowledgements.
+// The first byte of any message with this format will be the non-ASCII values
+// `0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
+// https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
+message Acknowledgement {
+ // response contains either a result or an error and must be non-empty
+ oneof response {
+ bytes result = 21;
+ string error = 22;
+ }
+}
+```
+
+#### Acknowledging Packets
+
+After a module writes an acknowledgement, a relayer can relay back the acknowledgement to the sender module. The sender module can
+then process the acknowledgement using the `OnAcknowledgementPacket` callback. The contents of the
+acknowledgement is entirely upto the modules on the channel (just like the packet data); however, it
+may often contain information on whether the packet was successfully processed along
+with some additional data that could be useful for remediation if the packet processing failed.
+
+Since the modules are responsible for agreeing on an encoding/decoding standard for packet data and
+acknowledgements, IBC will pass in the acknowledgements as `[]byte` to this callback. The callback
+is responsible for decoding the acknowledgement and processing it.
+
+```go
+OnAcknowledgementPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+ acknowledgement []byte,
+) (*sdk.Result, error) {
+ // Decode acknowledgement
+ ack := DecodeAcknowledgement(acknowledgement)
+
+ // process ack
+ res, err := processAck(ack)
+ return res, err
+}
+```
+
+#### Timeout Packets
+
+If the timeout for a packet is reached before the packet is successfully received or the
+counterparty channel end is closed before the packet is successfully received, then the receiving
+chain can no longer process it. Thus, the sending chain must process the timeout using
+`OnTimeoutPacket` to handle this situation. Again the IBC module will verify that the timeout is
+indeed valid, so our module only needs to implement the state machine logic for what to do once a
+timeout is reached and the packet can no longer be received.
+
+```go
+OnTimeoutPacket(
+ ctx sdk.Context,
+ packet channeltypes.Packet,
+) (*sdk.Result, error) {
+ // do custom timeout logic
+}
+```
+
+### Routing
+
+As mentioned above, modules must implement the IBC module interface (which contains both channel
+handshake callbacks and packet handling callbacks). The concrete implementation of this interface
+must be registered with the module name as a route on the IBC `Router`.
+
+```go
+// app.go
+func NewApp(...args) *App {
+// ...
+
+// Create static IBC router, add module routes, then set and seal it
+ibcRouter := port.NewRouter()
+
+ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferModule)
+// Note: moduleCallbacks must implement IBCModule interface
+ibcRouter.AddRoute(moduleName, moduleCallbacks)
+
+// Setting Router will finalize all routes by sealing router
+// No more routes can be added
+app.IBCKeeper.SetRouter(ibcRouter)
+```
+
+## Working Example
+
+For a real working example of an IBC application, you can look through the `ibc-transfer` module
+which implements everything discussed above.
+
+Here are the useful parts of the module to look at:
+
+[Binding to transfer
+port](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc-transfer/genesis.go)
+
+[Sending transfer
+packets](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc-transfer/keeper/relay.go)
+
+[Implementing IBC
+callbacks](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc-transfer/module.go)
+
+## Next {hide}
+
+Learn about [building modules](../building-modules/intro.md) {hide}
diff --git a/docs/integration.md b/docs/integration.md
new file mode 100644
index 00000000..50bc983f
--- /dev/null
+++ b/docs/integration.md
@@ -0,0 +1,252 @@
+
+
+# Integration
+
+Learn how to integrate IBC to your application and send data packets to other chains. {synopsis}
+
+This document outlines the required steps to integrate and configure the [IBC
+module](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc) to your Cosmos SDK application and
+send fungible token transfers to other chains.
+
+## Integrating the IBC module
+
+Integrating the IBC module to your SDK-based application is straighforward. The general changes can be summarized in the following steps:
+
+- Add required modules to the `module.BasicManager`
+- Define additional `Keeper` fields for the new modules on the `App` type
+- Add the module's `StoreKeys` and initialize their `Keepers`
+- Set up corresponding routers and routes for the `ibc` and `evidence` modules
+- Add the modules to the module `Manager`
+- Add modules to `Begin/EndBlockers` and `InitGenesis`
+- Update the module `SimulationManager` to enable simulations
+
+### Module `BasicManager` and `ModuleAccount` permissions
+
+The first step is to add the following modules to the `BasicManager`: `x/capability`, `x/ibc`,
+`x/evidence` and `x/ibc-transfer`. After that, we need to grant `Minter` and `Burner` permissions to
+the `ibc-transfer` `ModuleAccount` to mint and burn relayed tokens.
+
+```go
+// app.go
+var (
+
+ ModuleBasics = module.NewBasicManager(
+ // ...
+ capability.AppModuleBasic{},
+ ibc.AppModuleBasic{},
+ evidence.AppModuleBasic{},
+ transfer.AppModuleBasic{}, // i.e ibc-transfer module
+ )
+
+ // module account permissions
+ maccPerms = map[string][]string{
+ // other module accounts permissions
+ // ...
+ ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner},
+)
+```
+
+### Application fields
+
+Then, we need to register the `Keepers` as follows:
+
+```go
+// app.go
+type App struct {
+ // baseapp, keys and subspaces definitions
+
+ // other keepers
+ // ...
+ IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
+ EvidenceKeeper evidencekeeper.Keeper // required to set up the client misbehaviour route
+ TransferKeeper ibctransferkeeper.Keeper // for cross-chain fungible token transfers
+
+ // make scoped keepers public for test purposes
+ ScopedIBCKeeper capabilitykeeper.ScopedKeeper
+ ScopedTransferKeeper capabilitykeeper.ScopedKeeper
+
+ /// ...
+ /// module and simulation manager definitions
+}
+```
+
+### Configure the `Keepers`
+
+During initialization, besides initializing the IBC `Keepers` (for the `x/ibc`, and
+`x/ibc-transfer` modules), we need to grant specific capabilities through the capability module
+`ScopedKeepers` so that we can authenticate the object-capability permissions for each of the IBC
+channels.
+
+```go
+func NewApp(...args) *App {
+ // define codecs and baseapp
+
+ // add capability keeper and ScopeToModule for ibc module
+ app.CapabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey])
+
+ // grant capabilities for the ibc and ibc-transfer modules
+ scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName)
+ scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName)
+
+ // ... other modules keepers
+
+ // Create IBC Keeper
+ app.IBCKeeper = ibckeeper.NewKeeper(
+ appCodec, keys[ibchost.StoreKey], app.StakingKeeper, scopedIBCKeeper,
+ )
+
+ // Create Transfer Keepers
+ app.TransferKeeper = ibctransferkeeper.NewKeeper(
+ appCodec, keys[ibctransfertypes.StoreKey],
+ app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper,
+ app.AccountKeeper, app.BankKeeper, scopedTransferKeeper,
+ )
+ transferModule := transfer.NewAppModule(app.TransferKeeper)
+
+ // Create evidence Keeper for to register the IBC light client misbehaviour evidence route
+ evidenceKeeper := evidencekeeper.NewKeeper(
+ appCodec, keys[evidencetypes.StoreKey], &app.StakingKeeper, app.SlashingKeeper,
+ )
+
+ // .. continues
+}
+```
+
+### Register `Routers`
+
+IBC needs to know which module is bound to which port so that it can route packets to the
+appropriate module and call the appropriate callbacks. The port to module name mapping is handled by
+IBC's port `Keeper`. However, the mapping from module name to the relevant callbacks is accomplished
+by the port
+[`Router`](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc//core/05-port/types/router.go) on the
+IBC module.
+
+Adding the module routes allows the IBC handler to call the appropriate callback when processing a
+channel handshake or a packet.
+
+The second `Router` that is required is the evidence module router. This router handles genenal
+evidence submission and routes the business logic to each registered evidence handler. In the case
+of IBC, it is required to submit evidence for [light client
+misbehaviour](https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#misbehaviour)
+in order to freeze a client and prevent further data packets from being sent/received.
+
+Currently, a `Router` is static so it must be initialized and set correctly on app initialization.
+Once the `Router` has been set, no new routes can be added.
+
+```go
+// app.go
+func NewApp(...args) *App {
+ // .. continuation from above
+
+ // Create static IBC router, add ibc-tranfer module route, then set and seal it
+ ibcRouter := port.NewRouter()
+ ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferModule)
+ // Setting Router will finalize all routes by sealing router
+ // No more routes can be added
+ app.IBCKeeper.SetRouter(ibcRouter)
+
+ // create static Evidence routers
+
+ evidenceRouter := evidencetypes.NewRouter().
+ // add IBC ClientMisbehaviour evidence handler
+ AddRoute(ibcclient.RouterKey, ibcclient.HandlerClientMisbehaviour(app.IBCKeeper.ClientKeeper))
+
+ // Setting Router will finalize all routes by sealing router
+ // No more routes can be added
+ evidenceKeeper.SetRouter(evidenceRouter)
+
+ // set the evidence keeper from the section above
+ app.EvidenceKeeper = *evidenceKeeper
+
+ // .. continues
+```
+
+### Module Managers
+
+In order to use IBC, we need to add the new modules to the module `Manager` and to the `SimulationManager` in case your application supports [simulations](./../building-modules/simulator.md).
+
+```go
+// app.go
+func NewApp(...args) *App {
+ // .. continuation from above
+
+ app.mm = module.NewManager(
+ // other modules
+ // ...
+ capability.NewAppModule(appCodec, *app.CapabilityKeeper),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ ibc.NewAppModule(app.IBCKeeper),
+ transferModule,
+ )
+
+ // ...
+
+ app.sm = module.NewSimulationManager(
+ // other modules
+ // ...
+ capability.NewAppModule(appCodec, *app.CapabilityKeeper),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ ibc.NewAppModule(app.IBCKeeper),
+ transferModule,
+ )
+
+ // .. continues
+```
+
+### Application ABCI Ordering
+
+One addition from IBC is the concept of `HistoricalEntries` which are stored on the staking module.
+Each entry contains the historical information for the `Header` and `ValidatorSet` of this chain which is stored
+at each height during the `BeginBlock` call. The historical info is required to introspect the
+past historical info at any given height in order to verify the light client `ConsensusState` during the
+connection handhake.
+
+The IBC module also has
+[`BeginBlock`](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc/core/02-client/abci.go) logic as
+well. This is optional as it is only required if your application uses the [localhost
+client](https://github.com/cosmos/ics/blob/master/spec/ics-009-loopback-client) to connect two
+different modules from the same chain.
+
+::: tip
+Only register the ibc module to the `SetOrderBeginBlockers` if your application will use the
+localhost (_aka_ loopback) client.
+:::
+
+```go
+// app.go
+func NewApp(...args) *App {
+ // .. continuation from above
+
+ // add evidence, staking and ibc modules to BeginBlockers
+ app.mm.SetOrderBeginBlockers(
+ // other modules ...
+ evidencetypes.ModuleName, stakingtypes.ModuleName, ibchost.ModuleName,
+ )
+
+ // ...
+
+ // NOTE: Capability module must occur first so that it can initialize any capabilities
+ // so that other modules that want to create or claim capabilities afterwards in InitChain
+ // can do so safely.
+ app.mm.SetOrderInitGenesis(
+ capabilitytypes.ModuleName,
+ // other modules ...
+ ibchost.ModuleName, evidencetypes.ModuleName, ibctransfertypes.ModuleName,
+ )
+
+ // .. continues
+```
+
+::: warning
+**IMPORTANT**: The capability module **must** be declared first in `SetOrderInitGenesis`
+:::
+
+That's it! You have now wired up the IBC module and are now able to send fungible tokens across
+different chains. If you want to have a broader view of the changes take a look into the SDK's
+[`SimApp`](https://github.com/cosmos/cosmos-sdk/blob/master/simapp/app.go).
+
+## Next {hide}
+
+Learn about how to create [custom IBC modules](./custom.md) for your application {hide}
diff --git a/docs/overview.md b/docs/overview.md
new file mode 100644
index 00000000..ff915eee
--- /dev/null
+++ b/docs/overview.md
@@ -0,0 +1,181 @@
+
+
+# Overview
+
+Learn what IBC is, its components and use cases. {synopsis}
+
+## What is the Interblockchain Communication Protocol (IBC)?
+
+This document serves as a guide for developers who want to write their own Inter-blockchain
+Communication Protocol (IBC) applications for custom [use-cases](https://github.com/cosmos/ics/blob/master/ibc/4_IBC_USECASES.md).
+
+Due to the modular design of the IBC protocol, IBC
+application developers do not need to concern themselves with the low-level details of clients,
+connections, and proof verification. Nevertheless a brief explanation of the lower levels of the
+stack is given so that application developers may have a high-level understanding of the IBC
+protocol. Then the document goes into detail on the abstraction layer most relevant for application
+developers (channels and ports), and describes how to define your own custom packets, and
+`IBCModule` callbacks.
+
+To have your module interact over IBC you must: bind to a port(s), define your own packet data (and
+optionally acknowledgement) structs as well as how to encode/decode them, and implement the
+`IBCModule` interface. Below is a more detailed explanation of how to write an IBC application
+module correctly.
+
+## Components Overview
+
+### [Clients](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/02-client)
+
+IBC Clients are light clients (identified by a unique client-id) that track the consensus states of
+other blockchains, along with the proof spec necessary to properly verify proofs against the
+client's consensus state. A client may be associated with any number of connections to multiple
+chains. The supported IBC clients are:
+
+* [Solo Machine light client](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/light-clients/06-solomachine): devices such as phones, browsers, or laptops.
+* [Tendermint light client](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/light-clients/07-tendermint): The default for SDK-based chains,
+* [Localhost (loopback) client](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/light-clients/09-localhost): Useful for
+testing, simulation and relaying packets to modules on the same application.
+
+### [Connections](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/03-connection)
+
+Connections encapsulate two `ConnectionEnd` objects on two seperate blockchains. Each
+`ConnectionEnd` is associated with a client of the other blockchain (ie counterparty blockchain).
+The connection handshake is responsible for verifying that the light clients on each chain are
+correct for their respective counterparties. Connections, once established, are responsible for
+facilitation all cross-chain verification of IBC state. A connection may be associated with any
+number of channels.
+
+### [Proofs](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/23-commitment) and [Paths](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/24-host)
+
+In IBC, blockchains do not directly pass messages to each other over the network. Instead, to
+communicate, a blockchain will commit some state to a specifically defined path reserved for a
+specific message type and a specific counterparty (perhaps storing a specific connectionEnd as part
+of a handshake, or a packet intended to be relayed to a module on the counterparty chain). A relayer
+process monitors for updates to these paths, and will relay messages, by submitting the data stored
+under the path along with a proof to the counterparty chain. The paths that all IBC implementations
+must use for committing IBC messages is defined in
+[ICS-24](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements) and the proof
+format that all implementations must be able to produce and verify is defined in this [ICS-23 implementation](https://github.com/confio/ics23).
+
+### [Capabilities](./ocap.md)
+
+IBC is intended to work in execution environements where modules do not necessarily trust each
+other. Thus IBC must authenticate module actions on ports and channels so that only modules with the
+appropriate permissions can use them. This is accomplished using [dynamic
+capabilities](../architecture/adr-003-dynamic-capability-store.md). Upon binding to a port or
+creating a channel for a module, IBC will return a dynamic capability that the module must claim in
+order to use that port or channel. This prevents other modules from using that port or channel since
+they will not own the appropriate capability.
+
+While the above is useful background information, IBC modules do not need to interact at all with
+these lower-level abstractions. The relevant abstraction layer for IBC application developers is
+that of channels and ports. IBC applications should be written as self-contained **modules**. A
+module on one blockchain can thus communicate with other modules on other blockchains by sending,
+receiving and acknowledging packets through channels, which are uniquely identified by the
+`(channelID, portID)` tuple. A useful analogy is to consider IBC modules as internet applications on
+a computer. A channel can then be conceptualized as an IP connection, with the IBC portID being
+analogous to a IP port and the IBC channelID being analogous to an IP address. Thus, a single
+instance of an IBC module may communicate on the same port with any number of other modules and and
+IBC will correctly route all packets to the relevant module using the (channelID, portID tuple). An
+IBC module may also communicate with another IBC module over multiple ports, with each
+`(portID<->portID)` packet stream being sent on a different unique channel.
+
+### [Ports](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/05-port)
+
+An IBC module may bind to any number of ports. Each port must be identified by a unique `portID`.
+Since IBC is designed to be secure with mutually-distrusted modules operating on the same ledger,
+binding a port will return a dynamic object capability. In order to take action on a particular port
+(eg open a channel with its portID), a module must provide the dynamic object capability to the IBC
+handler. This prevents a malicious module from opening channels with ports it does not own. Thus,
+IBC modules are responsible for claiming the capability that is returned on `BindPort`.
+
+### [Channels](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+
+An IBC channel can be established between 2 IBC ports. Currently, a port is exclusively owned by a
+single module. IBC packets are sent over channels. Just as IP packets contain the destination IP
+address and IP port as well as the source IP address and source IP port, IBC packets will contain
+the destination portID and channelID as well as the source portID and channelID. This enables IBC to
+correctly route packets to the destination module, while also allowing modules receiving packets to
+know the sender module.
+
+A channel may be `ORDERED`, in which case, packets from a sending module must be processed by the
+receiving module in the order they were sent. Or a channel may be `UNORDERED`, in which case packets
+from a sending module are processed in the order they arrive (may not be the order they were sent).
+
+Modules may choose which channels they wish to communicate over with, thus IBC expects modules to
+implement callbacks that are called during the channel handshake. These callbacks may do custom
+channel initialization logic, if any return an error, the channel handshake will fail. Thus, by
+returning errors on callbacks, modules can programatically reject and accept channels.
+
+The channel handshake is a 4 step handshake. Briefly, if a given chain A wants to open a channel with
+chain B using an already established connection:
+
+1. chain A sends a `ChanOpenInit` message to signal a channel initialization attempt with chain B.
+2. chain B sends a `ChanOpenTry` message to try opening the channel on chain A.
+3. chain A sends a `ChanOpenAck` message to mark its channel end status as open.
+4. chain B sends a `ChanOpenConfirm` message to mark its channel end status as open.
+
+If all this happens successfully, the channel will be open on both sides. At each step in the handshake, the module
+associated with the `ChannelEnd` will have it's callback executed for that step of the handshake. So
+on `ChanOpenInit`, the module on chain A will have its callback `OnChanOpenInit` executed.
+
+Just as ports came with dynamic capabilites, channel initialization will return a dynamic capability
+that the module **must** claim so that they can pass in a capability to authenticate channel actions
+like sending packets. The channel capability is passed into the callback on the first parts of the
+handshake; either `OnChanOpenInit` on the initializing chain or `OnChanOpenTry` on the other chain.
+
+### [Packets](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+
+Modules communicate with each other by sending packets over IBC channels. As mentioned above, all
+IBC packets contain the destination `portID` and `channelID` along with the source `portID` and
+`channelID`, this allows modules to know the sender module of a given packet. IBC packets also
+contain a sequence to optionally enforce ordering. IBC packets also contain a `TimeoutTimestamp` and
+`TimeoutHeight`, which when non-zero, will determine the deadline before which the receiving module
+must process a packet. If the timeout passes without the packet being successfully received, the
+sending module can timeout the packet and take appropriate actions.
+
+Modules send custom application data to each other inside the `Data []byte` field of the IBC packet.
+Thus, packet data is completely opaque to IBC handlers. It is incumbent on a sender module to encode
+their application-specific packet information into the `Data` field of packets, and the receiver
+module to decode that `Data` back to the original application data.
+
+### [Receipts and Timeouts](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+
+Since IBC works over a distributed network and relies on potentially faulty relayers to relay messages between ledgers,
+IBC must handle the case where a packet does not get sent to its destination in a timely manner or at all. Thus, packets must
+specify a timeout height or timeout timestamp after which a packet can no longer be successfully received on the destination chain.
+
+If the timeout does get reached, then a proof of packet timeout can be submitted to the original chain which can then perform
+application-specific logic to timeout the packet, perhaps by rolling back the packet send changes (refunding senders any locked funds, etc).
+
+In ORDERED channels, a timeout of a single packet in the channel will cause the channel to close. If packet sequence `n` times out,
+then no packet at sequence `k > n` can be successfully received without violating the contract of ORDERED channels that packets are processed in the order that they are sent. Since ORDERED channels enforce this invariant, a proof that sequence `n` hasn't been received on the destination chain by packet `n`'s specified timeout is sufficient to timeout packet `n` and close the channel.
+
+In the UNORDERED case, packets may be received in any order. Thus, IBC will write a packet receipt for each sequence it has received in the UNORDERED channel. This receipt contains no information, it is simply a marker intended to signify that the UNORDERED channel has received a packet at the specified sequence. To timeout a packet on an UNORDERED channel, one must provide a proof that a packet receipt does not exist for the packet's sequence by the specified timeout. Of course, timing out a packet on an UNORDERED channel will simply trigger the application specific timeout logic for that packet, and will not close the channel.
+
+For this reason, most modules should use UNORDERED channels as they require less liveness guarantees to function effectively for users of that channel.
+
+### [Acknowledgements](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+
+Modules may also choose to write application-specific acknowledgements upon processing a packet. This may either be done synchronously on `OnRecvPacket`, if the module processes packets as soon as they are received from IBC module. Or they may be done asynchronously if module processes packets at some later point after receiving the packet.
+
+Regardless, this acknowledgement data is opaque to IBC much like the packet `Data` and will be treated by IBC as a simple byte string `[]byte`. It is incumbent on receiver modules to encode their acknowledgemnet in such a way that the sender module can decode it correctly. This should be decided through version negotiation during the channel handshake.
+
+The acknowledgement may encode whether the packet processing succeeded or failed, along with additional information that will allow the sender module to take appropriate action.
+
+Once the acknowledgement has been written by the receiving chain, a relayer will relay the acknowledgement back to the original sender module which will then execute application-specific acknowledgment logic using the contents of the acknowledgement. This may involve rolling back packet-send changes in the case of a failed acknowledgement (refunding senders).
+
+Once an acknowledgement is received successfully on the original sender the chain, the IBC module deletes the corresponding packet commitment as it is no longer needed.
+
+## Further Readings and Specs
+
+If you want to learn more about IBC, check the following specifications:
+
+* [IBC specification overview](https://github.com/cosmos/ics/blob/master/ibc/README.md)
+* [IBC SDK specification](../../modules/ibc)
+
+## Next {hide}
+
+Learn about how to [integrate](./integration.md) IBC to your application {hide}
diff --git a/docs/proposals.md b/docs/proposals.md
new file mode 100644
index 00000000..6bdf9f70
--- /dev/null
+++ b/docs/proposals.md
@@ -0,0 +1,42 @@
+
+
+# Governance Proposals
+
+In uncommon situations, a highly valued client may become frozen due to uncontrollable
+circumstances. A highly valued client might have hundreds of channels being actively used.
+Some of those channels might have a significant amount of locked tokens used for ICS 20.
+
+If the one third of the validator set of the chain the client represents decides to collude,
+they can sign off on two valid but conflicting headers each signed by the other one third
+of the honest validator set. The light client can now be updated with two valid, but conflicting
+headers at the same height. The light client cannot know which header is trustworthy and therefore
+evidence of such misbehaviour is likely to be submitted resulting in a frozen light client.
+
+Frozen light clients cannot be updated under any circumstance except via a governance proposal.
+Since a quorum of validators can sign arbitrary state roots which may not be valid executions
+of the state machine, a governance proposal has been added to ease the complexity of unfreezing
+or updating clients which have become "stuck". Without this mechanism, validator sets would need
+to construct a state root to unfreeze the client. Unfreezing clients, re-enables all of the channels
+built upon that client. This may result in recovery of otherwise lost funds.
+
+Tendermint light clients may become expired if the trusting period has passed since their
+last update. This may occur if relayers stop submitting headers to update the clients.
+
+An unplanned upgrade by the counterparty chain may also result in expired clients. If the counterparty
+chain undergoes an unplanned upgrade, there may be no commitment to that upgrade signed by the validator
+set before the chain-id changes. In this situation, the validator set of the last valid update for the
+light client is never expected to produce another valid header since the chain-id has changed, which will
+ultimately lead the on-chain light client to become expired.
+
+In the case that a highly valued light client is frozen, expired, or rendered non-updateable, a
+governance proposal may be submitted to update this client, known as the subject client. The
+proposal includes the client identifier for the subject, the client identifier for a substitute
+client, and an initial height to reference the substitute client from. Light client implementations
+may implement custom updating logic, but in most cases, the subject will be updated with information
+from the substitute client, if the proposal passes. The substitute client is used as a "stand in"
+while the subject is on trial. It is best practice to create a substitute client *after* the subject
+has become frozen to avoid the substitute from also becoming frozen. An active substitute client
+allows headers to be submitted during the voting period to prevent accidental expiry once the proposal
+passes.
diff --git a/docs/relayer.md b/docs/relayer.md
new file mode 100644
index 00000000..15512125
--- /dev/null
+++ b/docs/relayer.md
@@ -0,0 +1,45 @@
+
+
+# Relayer
+
+## Pre-requisites Readings
+
+- [IBC Overview](./overview.md) {prereq}
+- [Events](../core/events.md) {prereq}
+
+## Events
+
+Events are emitted for every transaction processed by the base application to indicate the execution
+of some logic clients may want to be aware of. This is extremely useful when relaying IBC packets.
+Any message that uses IBC will emit events for the corresponding TAO logic executed as defined in
+the [IBC events spec](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/spec/06_events.md).
+
+In the SDK, it can be assumed that for every message there is an event emitted with the type `message`,
+attribute key `action`, and an attribute value representing the type of message sent
+(`channel_open_init` would be the attribute value for `MsgChannelOpenInit`). If a relayer queries
+for transaction events, it can split message events using this event Type/Attribute Key pair.
+
+The Event Type `message` with the Attribute Key `module` may be emitted multiple times for a single
+message due to application callbacks. It can be assumed that any TAO logic executed will result in
+a module event emission with the attribute value `ibc_` (02-client emits `ibc_client`).
+
+### Subscribing with Tendermint
+
+Calling the Tendermint RPC method `Subscribe` via [Tendermint's Websocket](https://docs.tendermint.com/master/rpc/) will return events using
+Tendermint's internal representation of them. Instead of receiving back a list of events as they
+were emitted, Tendermint will return the type `map[string][]string` which maps a string in the
+form `.` to `attribute_value`. This causes extraction of the event
+ordering to be non-trivial, but still possible.
+
+A relayer should use the `message.action` key to extract the number of messages in the transaction
+and the type of IBC transactions sent. For every IBC transaction within the string array for
+`message.action`, the necessary information should be extracted from the other event fields. If
+`send_packet` appears at index 2 in the value for `message.action`, a relayer will need to use the
+value at index 2 of the key `send_packet.packet_sequence`. This process should be repeated for each
+piece of information needed to relay a packet.
+
+## Example Implementations
+
+- [Golang Relayer](https://github.com/iqlusioninc/relayer)
diff --git a/docs/spec.md b/docs/spec.md
new file mode 100644
index 00000000..a699c10a
--- /dev/null
+++ b/docs/spec.md
@@ -0,0 +1,114 @@
+
+
+# `ibc`
+
+## Abstract
+
+This specification defines the implementation of the IBC protocol on the Cosmos SDK, the
+changes made to the specification and where to find each specific ICS spec within
+the module.
+
+For the general specification please refer to the [Interchain Standards](https://github.com/cosmos/ics).
+
+## Contents
+
+1. **Applications**
+
+ 1.1. [Transfer](./../applications/transfer/spec/README.md)
+2. **[Core](./../core/spec/README.md)**
+3. **Light Clients**
+
+ 3.1 [Solo Machine Client](./../light-clients/06-solomachine/spec/README.md)
+
+ 3.2 [Tendermint Client](./../light-clients/07-tendermint/spec/README.md)
+
+ 3.3 [Localhost Client](./../light-clients/09-localhost/spec/README.md)
+
+## Implementation Details
+
+As stated above, the IBC implementation on the Cosmos SDK introduces some changes
+to the general specification, in order to avoid code duplication and to take
+advantage of the SDK architectural components such as the transaction routing
+through `Handlers`.
+
+### Interchain Standards reference
+
+The following list is a mapping from each Interchain Standard to their implementation
+in the SDK's `x/ibc` module:
+
+* [ICS 002 - Client Semantics](https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics): Implemented in [`x/ibc/core/02-client`](https://github.com/cosmos/tree/master/ibc/core/02-client)
+* [ICS 003 - Connection Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-003-connection-semantics): Implemented in [`x/ibc/core/03-connection`](https://github.com/cosmos/tree/master/ibc/core/03-connection)
+* [ICS 004 - Channel and Packet Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-004-channel-and-packet-semantics): Implemented in [`x/ibc/core/04-channel`](https://github.com/cosmos/tree/master/ibc/core/04-channel)
+* [ICS 005 - Port Allocation](https://github.com/cosmos/ics/blob/master/spec/ics-005-port-allocation): Implemented in [`x/ibc/core/05-port`](https://github.com/cosmos/tree/master/ibc/core/05-port)
+* [ICS 006 - Solo Machine Client](https://github.com/cosmos/ics/blob/master/spec/ics-006-solo-machine-client): Implemented in [`x/ibc/light-clients/06-solomachine`](https://github.com/cosmos/tree/master/ibc/solomachine)
+* [ICS 007 - Tendermint Client](https://github.com/cosmos/ics/blob/master/spec/ics-007-tendermint-client): Implemented in [`x/ibc/light-clients/07-tendermint`](https://github.com/cosmos/tree/master/ibc/light-clients/07-tendermint)
+* [ICS 009 - Loopback Client](https://github.com/cosmos/ics/blob/master/spec/ics-009-loopback-client): Implemented in [`x/ibc/light-clients/09-localhost`](https://github.com/cosmos/tree/master/ibc/light-clients/09-localhost)
+* [ICS 018- Relayer Algorithms](https://github.com/cosmos/ics/tree/master/spec/ics-018-relayer-algorithms): Implemented in it's own [relayer repository](https://github.com/cosmos/relayer)
+* [ICS 020 - Fungible Token Transfer](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer): Implemented in [`x/ibc/applications/transfer`](https://github.com/cosmos/tree/master/ibc/applications/transfer)
+* [ICS 023 - Vector Commitments](https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments): Implemented in [`x/ibc/core/23-commitment`](https://github.com/cosmos/tree/master/ibc/core/23-commitment)
+* [ICS 024 - Host Requirements](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements): Implemented in [`x/ibc/core/24-host`](https://github.com/cosmos/tree/master/ibc/core/24-host)
+* [ICS 025 - Handler Interface](https://github.com/cosmos/ics/tree/master/spec/ics-025-handler-interface): `Handler` interfaces are implemented at the top level in `x/ibc/handler.go`,
+which call each ICS submodule's handlers (i.e `x/ibc/*/{XX-ICS}/handler.go`).
+* [ICS 026 - Routing Module](https://github.com/cosmos/ics/blob/master/spec/ics-026-routing-module): Replaced by [ADR 15 - IBC Packet Receiver](../../../docs/architecture/adr-015-ibc-packet-receiver.md).
+
+### Architecture Decision Records (ADR)
+
+The following ADR provide the design and architecture decision of IBC-related components.
+
+* [ADR 001 - Coin Source Tracing](../../../docs/architecture/adr-001-coin-source-tracing.md): standard to hash the ICS20's fungible token
+denomination trace path in order to support special characters and limit the maximum denomination length.
+* [ADR 17 - Historical Header Module](../../../docs/architecture/adr-017-historical-header-module.md): Introduces the ability to introspect past
+consensus states in order to verify their membership in the counterparty clients.
+* [ADR 19 - Protobuf State Encoding](../../../docs/architecture/adr-019-protobuf-state-encoding.md): Migration from Amino to Protobuf for state encoding.
+* [ADR 020 - Protocol Buffer Transaction Encoding](./../../docs/architecture/adr-020-protobuf-transaction-encoding.md): Client side migration to Protobuf.
+* [ADR 021 - Protocol Buffer Query Encoding](../../../docs/architecture/adr-020-protobuf-query-encoding.md): Queries migration to Protobuf.
+* [ADR 026 - IBC Client Recovery Mechanisms](../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md): Allows IBC Clients to be recovered after freezing or expiry.
+
+### SDK Modules
+
+* [`x/capability`](https://github.com/cosmos/tree/master/x/capability): The capability module provides object-capability keys support through scoped keepers in order to authenticate usage of ports or channels. Check [ADR 3 - Dynamic Capability Store](../../../docs/architecture/adr-003-dynamic-capability-store.md) for more details.
+
+## IBC module architecture
+
+> **NOTE for auditors**: If you're not familiar with the overall module structure from
+the SDK modules, please check this [document](../../../docs/building-modules/structure.md) as
+prerequisite reading.
+
+For ease of auditing, every Interchain Standard has been developed in its own
+package. The development team separated the IBC TAO (Transport, Authentication, Ordering) ICS specifications from the IBC application level
+specification. The following tree describes the architecture of the directories that
+the `ibc` (TAO) and `ibc-transfer` ([ICS20](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer)) modules:
+
+```shell
+x/ibc
+├── applications/
+│ └──transfer/
+├── core/
+│ ├── 02-client/
+│ ├── 03-connection/
+│ ├── 04-channel/
+│ ├── 05-port/
+│ ├── 23-commitment/
+│ ├── 24-host/
+│ ├── client
+│ │ └── cli
+│ │ └── cli.go
+│ ├── keeper
+│ │ ├── keeper.go
+│ │ └── querier.go
+│ ├── types
+│ │ ├── errors.go
+│ │ └── keys.go
+│ ├── handler.go
+│ └── module.go
+├── light-clients/
+│ ├── 06-solomachine/
+│ ├── 07-tendermint/
+│ └── 09-localhost/
+└── testing/
+```
diff --git a/docs/upgrades/README.md b/docs/upgrades/README.md
new file mode 100644
index 00000000..11ccabe2
--- /dev/null
+++ b/docs/upgrades/README.md
@@ -0,0 +1,14 @@
+
+
+### Upgrading IBC Chains Overview
+
+This directory contains information on how to upgrade an IBC chain without breaking counterparty clients and connections.
+
+IBC-connnected chains must be able to upgrade without breaking connections to other chains. Otherwise there would be a massive disincentive towards upgrading and disrupting high-value IBC connections, thus preventing chains in the IBC ecosystem from evolving and improving. Many chain upgrades may be irrelevant to IBC, however some upgrades could potentially break counterparty clients if not handled correctly. Thus, any IBC chain that wishes to perform a IBC-client-breaking upgrade must perform an IBC upgrade in order to allow counterparty clients to securely upgrade to the new light client.
+
+1. The [quick-guide](./quick-guide.md) describes how IBC-connected chains can perform client-breaking upgrades and how relayers can securely upgrade counterparty clients using the SDK.
+2. The [developer-guide](./developer-guide.md) is a guide for developers intending to develop IBC client implementations with upgrade functionality.
diff --git a/docs/upgrades/developer-guide.md b/docs/upgrades/developer-guide.md
new file mode 100644
index 00000000..998cb276
--- /dev/null
+++ b/docs/upgrades/developer-guide.md
@@ -0,0 +1,50 @@
+
+
+# IBC Client Developer Guide to Upgrades
+
+Learn how to implement upgrade functionality for your custom IBC client. {synopsis}
+
+As mentioned in the [README](./README.md), it is vital that high-value IBC clients can upgrade along with their underlying chains to avoid disruption to the IBC ecosystem. Thus, IBC client developers will want to implement upgrade functionality to enable clients to maintain connections and channels even across chain upgrades.
+
+The IBC protocol allows client implementations to provide a path to upgrading clients given the upgraded client state, upgraded consensus state and proofs for each.
+
+```go
+// Upgrade functions
+// NOTE: proof heights are not included as upgrade to a new revision is expected to pass only on the last
+// height committed by the current revision. Clients are responsible for ensuring that the planned last
+// height of the current revision is somehow encoded in the proof verification process.
+// This is to ensure that no premature upgrades occur, since upgrade plans committed to by the counterparty
+// may be cancelled or modified before the last planned height.
+VerifyUpgradeAndUpdateState(
+ ctx sdk.Context,
+ cdc codec.BinaryMarshaler,
+ store sdk.KVStore,
+ newClient ClientState,
+ newConsState ConsensusState,
+ proofUpgradeClient,
+ proofUpgradeConsState []byte,
+) (upgradedClient ClientState, upgradedConsensus ConsensusState, err error)
+```
+
+Note that the clients should have prior knowledge of the merkle path that the upgraded client and upgraded consensus states will use. The height at which the upgrade has occurred should also be encoded in the proof. The Tendermint client implementation accomplishes this by including an `UpgradePath` in the ClientState itself, which is used along with the upgrade height to construct the merkle path under which the client state and consensus state are committed.
+
+Developers must ensure that the `UpgradeClientMsg` does not pass until the last height of the old chain has been committed, and after the chain upgrades, the `UpgradeClientMsg` should pass once and only once on all counterparty clients.
+
+Developers must ensure that the new client adopts all of the new Client parameters that must be uniform across every valid light client of a chain (chain-chosen parameters), while maintaining the Client parameters that are customizable by each individual client (client-chosen parameters) from the previous version of the client.
+
+Upgrades must adhere to the IBC Security Model. IBC does not rely on the assumption of honest relayers for correctness. Thus users should not have to rely on relayers to maintain client correctness and security (though honest relayers must exist to maintain relayer liveness). While relayers may choose any set of client parameters while creating a new `ClientState`, this still holds under the security model since users can always choose a relayer-created client that suits their security and correctness needs or create a Client with their desired parameters if no such client exists.
+
+However, when upgrading an existing client, one must keep in mind that there are already many users who depend on this client's particular parameters. We cannot give the upgrading relayer free choice over these parameters once they have already been chosen. This would violate the security model since users who rely on the client would have to rely on the upgrading relayer to maintain the same level of security. Thus, developers must make sure that their upgrade mechanism allows clients to upgrade the chain-specified parameters whenever a chain upgrade changes these parameters (examples in the Tendermint client include `UnbondingPeriod`, `ChainID`, `UpgradePath`, etc.), while ensuring that the relayer submitting the `UpgradeClientMsg` cannot alter the client-chosen parameters that the users are relying upon (examples in Tendermint client include `TrustingPeriod`, `TrustLevel`, `MaxClockDrift`, etc).
+
+Developers should maintain the distinction between Client parameters that are uniform across every valid light client of a chain (chain-chosen parameters), and Client parameters that are customizable by each individual client (client-chosen parameters); since this distinction is necessary to implement the `ZeroCustomFields` method in the `ClientState` interface:
+
+```go
+// Utility function that zeroes out any client customizable fields in client state
+// Ledger enforced fields are maintained while all custom fields are zero values
+// Used to verify upgrades
+ZeroCustomFields() ClientState
+```
+
+Counterparty clients can upgrade securely by using all of the chain-chosen parameters from the chain-committed `UpgradedClient` and preserving all of the old client-chosen parameters. This enables chains to securely upgrade without relying on an honest relayer, however it can in some cases lead to an invalid final `ClientState` if the new chain-chosen parameters clash with the old client-chosen parameter. This can happen in the Tendermint client case if the upgrading chain lowers the `UnbondingPeriod` (chain-chosen) to a duration below that of a counterparty client's `TrustingPeriod` (client-chosen). Such cases should be clearly documented by developers, so that chains know which upgrades should be avoided to prevent this problem. The final upgraded client should also be validated in `VerifyUpgradeAndUpdateState` before returning to ensure that the client does not upgrade to an invalid `ClientState`.
diff --git a/docs/upgrades/quick-guide.md b/docs/upgrades/quick-guide.md
new file mode 100644
index 00000000..4717e52f
--- /dev/null
+++ b/docs/upgrades/quick-guide.md
@@ -0,0 +1,54 @@
+
+
+# How to Upgrade IBC Chains and their Clients
+
+Learn how to upgrade your chain and counterparty clients. {synopsis}
+
+The information in this doc for upgrading chains is relevant to SDK chains. However, the guide for counterparty clients is relevant to any Tendermint client that enables upgrades.
+
+### IBC Client Breaking Upgrades
+
+IBC-connected chains must perform an IBC upgrade if their upgrade will break counterparty IBC clients. The current IBC protocol supports upgrading tendermint chains for a specific subset of IBC-client-breaking upgrades. Here is the exhaustive list of IBC client-breaking upgrades and whether the IBC protocol currently supports such upgrades.
+
+IBC currently does **NOT** support unplanned upgrades. All of the following upgrades must be planned and committed to in advance by the upgrading chain, in order for counterparty clients to maintain their connections securely.
+
+Note: Since upgrades are only implemented for Tendermint clients, this doc only discusses upgrades on Tendermint chains that would break counterparty IBC Tendermint Clients.
+
+1. Changing the Chain-ID: **Supported**
+2. Changing the UnbondingPeriod: **Partially Supported**, chains may increase the unbonding period with no issues. However, decreasing the unbonding period may irreversibly break some counterparty clients. Thus, it is **not recommended** that chains reduce the unbonding period.
+3. Changing the height (resetting to 0): **Supported**, so long as chains remember to increment the revision number in their chain-id.
+4. Changing the ProofSpecs: **Supported**, this should be changed if the proof structure needed to verify IBC proofs is changed across the upgrade. Ex: Switching from an IAVL store, to a SimpleTree Store
+5. Changing the UpgradePath: **Supported**, this might involve changing the key under which upgraded clients and consensus states are stored in the upgrade store, or even migrating the upgrade store itself.
+6. Migrating the IBC store: **Unsupported**, as the IBC store location is negotiated by the connection.
+7. Upgrading to a backwards compatible version of IBC: Supported
+8. Upgrading to a non-backwards compatible version of IBC: **Unsupported**, as IBC version is negotiated on connection handshake.
+9. Changing the Tendermint LightClient algorithm: **Partially Supported**. Changes to the light client algorithm that do not change the ClientState or ConsensusState struct may be supported, provided that the counterparty is also upgraded to support the new light client algorithm. Changes that require updating the ClientState and ConsensusState structs themselves are theoretically possible by providing a path to translate an older ClientState struct into the new ClientState struct; however this is not currently implemented.
+
+### Step-by-Step Upgrade Process for SDK chains
+
+If the IBC-connected chain is conducting an upgrade that will break counterparty clients, it must ensure that the upgrade is first supported by IBC using the list above and then execute the upgrade process described below in order to prevent counterparty clients from breaking.
+
+1. Create a `SoftwareUpgradeProposal` with an `UpgradePlan` that includes the new IBC ClientState in the `UpgradedClientState`. Note that the `UpgradePlan` must specify an upgrade height **only** (no upgrade time), and the `ClientState` should only include the fields common to all valid clients and zero out any client-customizable fields (such as TrustingPeriod).
+2. Vote on and pass the `SoftwareUpgradeProposal`
+
+Upon the `SoftwareUpgradeProposal` passing, the upgrade module will commit the UpgradedClient under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedClient`. On the block right before the upgrade height, the upgrade module will also commit an initial consensus state for the next chain under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedConsState`.
+
+Once the chain reaches the upgrade height and halts, a relayer can upgrade the counterparty clients to the last block of the old chain. They can then submit the proofs of the `UpgradedClient` and `UpgradedConsensusState` against this last block and upgrade the counterparty client.
+
+### Step-by-Step Upgrade Process for Relayers Upgrading Counterparty Clients
+
+Once the upgrading chain has committed to upgrading, relayers must wait till the chain halts at the upgrade height before upgrading counterparty clients. This is because chains may reschedule or cancel upgrade plans before they occur. Thus, relayers must wait till the chain reaches the upgrade height and halts before they can be sure the upgrade will take place.
+
+Thus, the upgrade process for relayers trying to upgrade the counterparty clients is as follows:
+
+1. Wait for the upgrading chain to reach the upgrade height and halt
+2. Query a full node for the proofs of `UpgradedClient` and `UpgradedConsensusState` at the last height of the old chain.
+3. Update the counterparty client to the last height of the old chain using the `UpdateClient` msg.
+4. Submit an `UpgradeClient` msg to the counterparty chain with the `UpgradedClient`, `UpgradedConsensusState` and their respective proofs.
+5. Submit an `UpdateClient` msg to the counterparty chain with a header from the new upgraded chain.
+
+The Tendermint client on the counterparty chain will verify that the upgrading chain did indeed commit to the upgraded client and upgraded consensus state at the upgrade height (since the upgrade height is included in the key). If the proofs are verified against the upgrade height, then the client will upgrade to the new client while retaining all of its client-customized fields. Thus, it will retain its old TrustingPeriod, TrustLevel, MaxClockDrift, etc; while adopting the new chain-specified fields such as UnbondingPeriod, ChainId, UpgradePath, etc. Note, this can lead to an invalid client since the old client-chosen fields may no longer be valid given the new chain-chosen fields. Upgrading chains should try to avoid these situations by not altering parameters that can break old clients. For an example, see the UnbondingPeriod example in the supported upgrades section.
+
+The upgraded consensus state will serve purely as a basis of trust for future `UpdateClientMsgs` and will not contain a consensus root to perform proof verification against. Thus, relayers must submit an `UpdateClientMsg` with a header from the new chain so that the connection can be used for proof verification again.
\ No newline at end of file
From 9d7a49f65a3b88a15c26b4714137ac1b7ccccc49 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 3 Mar 2021 15:41:27 +0100
Subject: [PATCH 005/393] Update code and add github action test workflows (#7)
* update docs and readme
* migrate code changes from SDK
* add workflows
* fix spacing
* remove conflicts from README
* remove docker and do lint
* remove liveness test
* update interface URL type and add sdk tests from x/auth/client/rest
---
.github/workflows/test.yml | 225 ++++++
Makefile | 6 +-
README.md | 2 +
apps/transfer/keeper/keeper.go | 2 +-
apps/transfer/keeper/relay_test.go | 2 +-
apps/transfer/module.go | 4 -
apps/transfer/simulation/decoder_test.go | 2 +-
core/02-client/abci.go | 23 +-
core/02-client/abci_test.go | 34 +
core/02-client/client/cli/tx.go | 90 +++
core/02-client/client/proposal_handler.go | 19 +-
core/02-client/keeper/client_test.go | 56 +-
core/02-client/keeper/grpc_query.go | 43 ++
core/02-client/keeper/keeper.go | 23 +-
core/02-client/keeper/keeper_test.go | 4 +-
core/02-client/keeper/proposal.go | 26 +
core/02-client/keeper/proposal_test.go | 169 ++++-
core/02-client/proposal_handler.go | 6 +-
core/02-client/proposal_handler_test.go | 2 +-
core/02-client/simulation/decoder_test.go | 2 +-
core/02-client/types/client.pb.go | 441 ++++++++++--
core/02-client/types/codec.go | 11 +-
core/02-client/types/errors.go | 1 +
core/02-client/types/expected_keepers.go | 11 +
core/02-client/types/params.go | 2 +-
core/02-client/types/proposal.go | 94 ++-
core/02-client/types/proposal_test.go | 150 +++-
core/02-client/types/query.pb.go | 541 ++++++++++++--
core/02-client/types/query.pb.gw.go | 116 +++
core/03-connection/simulation/decoder_test.go | 2 +-
core/03-connection/types/codec.go | 6 +-
core/03-connection/types/msgs_test.go | 2 +-
core/04-channel/simulation/decoder_test.go | 2 +-
core/04-channel/types/acknowledgement.go | 50 ++
core/04-channel/types/acknowledgement_test.go | 63 ++
core/04-channel/types/channel.go | 45 --
core/04-channel/types/channel_test.go | 60 --
core/04-channel/types/codec.go | 19 +-
core/04-channel/types/msgs_test.go | 2 +-
core/05-port/keeper/keeper_test.go | 2 +-
core/23-commitment/types/codec.go | 8 +-
core/genesis_test.go | 2 +-
core/keeper/grpc_query.go | 5 +
core/keeper/keeper.go | 7 +-
core/keeper/msg_server_test.go | 26 +-
core/module.go | 2 -
core/simulation/decoder_test.go | 2 +-
docs/ibc/proto-docs.md | 135 ++++
go.mod | 9 +-
go.sum | 34 +-
.../07-tendermint/types/tendermint_test.go | 2 +-
light-clients/07-tendermint/types/upgrade.go | 6 +-
.../07-tendermint/types/upgrade_test.go | 180 ++---
.../09-localhost/types/localhost_test.go | 2 +-
proto/ibcgo/core/client/v1/client.proto | 22 +
proto/ibcgo/core/client/v1/query.proto | 23 +
testing/chain.go | 6 +-
testing/mock/mock.go | 2 -
testing/sdk_test.go | 334 +++++++++
testing/simapp/README.md | 51 ++
testing/simapp/app.go | 666 ++++++++++++++++++
testing/simapp/app_test.go | 193 +++++
testing/simapp/config.go | 75 ++
testing/simapp/encoding.go | 19 +
testing/simapp/export.go | 193 +++++
testing/simapp/genesis.go | 21 +
testing/simapp/genesis_account.go | 47 ++
testing/simapp/genesis_account_test.go | 88 +++
testing/simapp/helpers/test_helpers.go | 80 +++
testing/simapp/params/amino.go | 26 +
testing/simapp/params/doc.go | 19 +
testing/simapp/params/encoding.go | 16 +
testing/simapp/params/params.go | 7 +
testing/simapp/params/proto.go | 26 +
testing/simapp/params/weights.go | 28 +
testing/simapp/sim_bench_test.go | 122 ++++
testing/simapp/sim_test.go | 339 +++++++++
testing/simapp/simd/cmd/cmd_test.go | 24 +
testing/simapp/simd/cmd/genaccounts.go | 181 +++++
testing/simapp/simd/cmd/genaccounts_test.go | 85 +++
testing/simapp/simd/cmd/root.go | 230 ++++++
testing/simapp/simd/cmd/testnet.go | 400 +++++++++++
testing/simapp/simd/main.go | 24 +
testing/simapp/state.go | 233 ++++++
testing/simapp/test_helpers.go | 448 ++++++++++++
testing/simapp/types.go | 44 ++
testing/simapp/utils.go | 131 ++++
testing/simapp/utils_test.go | 60 ++
.../cosmos/upgrade/v1beta1/upgrade.proto | 60 ++
89 files changed, 6636 insertions(+), 467 deletions(-)
create mode 100644 .github/workflows/test.yml
create mode 100644 core/04-channel/types/acknowledgement.go
create mode 100644 core/04-channel/types/acknowledgement_test.go
create mode 100644 testing/sdk_test.go
create mode 100644 testing/simapp/README.md
create mode 100644 testing/simapp/app.go
create mode 100644 testing/simapp/app_test.go
create mode 100644 testing/simapp/config.go
create mode 100644 testing/simapp/encoding.go
create mode 100644 testing/simapp/export.go
create mode 100644 testing/simapp/genesis.go
create mode 100644 testing/simapp/genesis_account.go
create mode 100644 testing/simapp/genesis_account_test.go
create mode 100644 testing/simapp/helpers/test_helpers.go
create mode 100644 testing/simapp/params/amino.go
create mode 100644 testing/simapp/params/doc.go
create mode 100644 testing/simapp/params/encoding.go
create mode 100644 testing/simapp/params/params.go
create mode 100644 testing/simapp/params/proto.go
create mode 100644 testing/simapp/params/weights.go
create mode 100644 testing/simapp/sim_bench_test.go
create mode 100644 testing/simapp/sim_test.go
create mode 100644 testing/simapp/simd/cmd/cmd_test.go
create mode 100644 testing/simapp/simd/cmd/genaccounts.go
create mode 100644 testing/simapp/simd/cmd/genaccounts_test.go
create mode 100644 testing/simapp/simd/cmd/root.go
create mode 100644 testing/simapp/simd/cmd/testnet.go
create mode 100644 testing/simapp/simd/main.go
create mode 100644 testing/simapp/state.go
create mode 100644 testing/simapp/test_helpers.go
create mode 100644 testing/simapp/types.go
create mode 100644 testing/simapp/utils.go
create mode 100644 testing/simapp/utils_test.go
create mode 100644 third_party/proto/cosmos/upgrade/v1beta1/upgrade.proto
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 00000000..98308a96
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,225 @@
+name: Tests / Code Coverage
+# Tests / Code Coverage workflow runs unit tests and uploads a code coverage report
+# This workflow is run on pushes to main & every Pull Requests where a .go, .mod, .sum have been changed
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+jobs:
+ cleanup-runs:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: rokroskar/workflow-run-cleanup-action@master
+ env:
+ GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
+ if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/main'"
+
+ install-tparse:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-go@v2.1.3
+ with:
+ go-version: 1.15
+ - name: Display go version
+ run: go version
+ - name: install tparse
+ run: |
+ export GO111MODULE="on" && go get github.com/mfridman/tparse@v0.8.3
+ - uses: actions/cache@v2.1.4
+ with:
+ path: ~/go/bin
+ key: ${{ runner.os }}-go-tparse-binary
+
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ go-arch: ["amd64", "arm", "arm64"]
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-go@v2.1.3
+ with:
+ go-version: 1.15
+ - uses: technote-space/get-diff-action@v4
+ id: git_diff
+ with:
+ PATTERNS: |
+ **/**.go
+ go.mod
+ go.sum
+ - name: Build
+ run: GOARCH=${{ matrix.go-arch }} LEDGER_ENABLED=false make build
+
+ split-test-files:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Create a file with all the pkgs
+ run: go list ./... > pkgs.txt
+ - name: Split pkgs into 4 files
+ run: split -d -n l/4 pkgs.txt pkgs.txt.part.
+ # cache multiple
+ - uses: actions/upload-artifact@v2
+ with:
+ name: "${{ github.sha }}-00"
+ path: ./pkgs.txt.part.00
+ - uses: actions/upload-artifact@v2
+ with:
+ name: "${{ github.sha }}-01"
+ path: ./pkgs.txt.part.01
+ - uses: actions/upload-artifact@v2
+ with:
+ name: "${{ github.sha }}-02"
+ path: ./pkgs.txt.part.02
+ - uses: actions/upload-artifact@v2
+ with:
+ name: "${{ github.sha }}-03"
+ path: ./pkgs.txt.part.03
+
+ tests:
+ runs-on: ubuntu-latest
+ needs: split-test-files
+ strategy:
+ fail-fast: false
+ matrix:
+ part: ["00", "01", "02", "03"]
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-go@v2.1.3
+ with:
+ go-version: 1.15
+ - uses: technote-space/get-diff-action@v4
+ with:
+ PATTERNS: |
+ **/**.go
+ go.mod
+ go.sum
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-${{ matrix.part }}"
+ if: env.GIT_DIFF
+ - name: test & coverage report creation
+ run: |
+ cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 30m -coverprofile=${{ matrix.part }}profile.out -covermode=atomic -tags='norace ledger test_ledger_mock'
+ if: env.GIT_DIFF
+ - uses: actions/upload-artifact@v2
+ with:
+ name: "${{ github.sha }}-${{ matrix.part }}-coverage"
+ path: ./${{ matrix.part }}profile.out
+
+ upload-coverage-report:
+ runs-on: ubuntu-latest
+ needs: tests
+ steps:
+ - uses: actions/checkout@v2
+ - uses: technote-space/get-diff-action@v4
+ with:
+ PATTERNS: |
+ **/**.go
+ go.mod
+ go.sum
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-00-coverage"
+ if: env.GIT_DIFF
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-01-coverage"
+ if: env.GIT_DIFF
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-02-coverage"
+ if: env.GIT_DIFF
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-03-coverage"
+ if: env.GIT_DIFF
+ - run: |
+ cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
+ if: env.GIT_DIFF
+ - name: filter out DONTCOVER
+ run: |
+ excludelist="$(find ./ -type f -name '*.go' | xargs grep -l 'DONTCOVER')"
+ excludelist+=" $(find ./ -type f -name '*.pb.go')"
+ excludelist+=" $(find ./ -type f -name '*.pb.gw.go')"
+ excludelist+=" $(find ./ -type f -path './tests/mocks/*.go')"
+ for filename in ${excludelist}; do
+ filename=$(echo $filename | sed 's/^./github.com\/cosmos\/cosmos-sdk/g')
+ echo "Excluding ${filename} from coverage report..."
+ sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
+ done
+ if: env.GIT_DIFF
+ - uses: codecov/codecov-action@v1.2.1
+ with:
+ file: ./coverage.txt
+ if: env.GIT_DIFF
+
+ test-race:
+ runs-on: ubuntu-latest
+ needs: split-test-files
+ strategy:
+ fail-fast: false
+ matrix:
+ part: ["00", "01", "02", "03"]
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-go@v2.1.3
+ with:
+ go-version: 1.15
+ - uses: technote-space/get-diff-action@v4
+ with:
+ PATTERNS: |
+ **/**.go
+ go.mod
+ go.sum
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-${{ matrix.part }}"
+ if: env.GIT_DIFF
+ - name: test & coverage report creation
+ run: |
+ xargs --arg-file=pkgs.txt.part.${{ matrix.part }} go test -mod=readonly -json -timeout 30m -race -tags='cgo ledger test_ledger_mock' | tee ${{ matrix.part }}-race-output.txt
+ if: env.GIT_DIFF
+ - uses: actions/upload-artifact@v2
+ with:
+ name: "${{ github.sha }}-${{ matrix.part }}-race-output"
+ path: ./${{ matrix.part }}-race-output.txt
+
+ race-detector-report:
+ runs-on: ubuntu-latest
+ needs: [test-race, install-tparse]
+ timeout-minutes: 5
+ steps:
+ - uses: actions/checkout@v2
+ - uses: technote-space/get-diff-action@v4
+ id: git_diff
+ with:
+ PATTERNS: |
+ **/**.go
+ go.mod
+ go.sum
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-00-race-output"
+ if: env.GIT_DIFF
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-01-race-output"
+ if: env.GIT_DIFF
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-02-race-output"
+ if: env.GIT_DIFF
+ - uses: actions/download-artifact@v2
+ with:
+ name: "${{ github.sha }}-03-race-output"
+ if: env.GIT_DIFF
+ - uses: actions/cache@v2.1.4
+ with:
+ path: ~/go/bin
+ key: ${{ runner.os }}-go-tparse-binary
+ if: env.GIT_DIFF
+ - name: Generate test report (go test -race)
+ run: cat ./*-race-output.txt | ~/go/bin/tparse
+ if: env.GIT_DIFF
diff --git a/Makefile b/Makefile
index 7ed1d5ab..8bf73ae6 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ COMMIT := $(shell git log -1 --format='%H')
LEDGER_ENABLED ?= true
BINDIR ?= $(GOPATH)/bin
BUILDDIR ?= $(CURDIR)/build
-SIMAPP = ./simapp
+SIMAPP = ./testing/simapp
MOCKS_DIR = $(CURDIR)/tests/mocks
HTTPS_GIT := https://github.com/cosmos/ibc-go.git
DOCKER := $(shell which docker)
@@ -400,6 +400,7 @@ TM_P2P = third_party/proto/tendermint/p2p
SDK_QUERY = third_party/proto/cosmos/base/query/v1beta1
SDK_BASE = third_party/proto/cosmos/base/v1beta1
+SDK_UPGRADE = third_party/proto/cosmos/upgrade
GOGO_PROTO_TYPES = third_party/proto/gogoproto
CONFIO_TYPES = third_party/proto/confio
@@ -414,6 +415,9 @@ proto-update-deps:
@mkdir -p $(SDK_BASE)
@curl -sSL $(SDK_PROTO_URL)/base/v1beta1/coin.proto > $(SDK_BASE)/coin.proto
+ @mkdir -p $(SDK_UPGRADE)
+ @curl -sSL $(SDK_PROTO_URL)/upgrade/v1beta1/upgrade.proto > $(SDK_UPGRADE)/v1beta1/upgrade.proto
+
## Importing of tendermint protobuf definitions currently requires the
## use of `sed` in order to build properly with cosmos-sdk's proto file layout
## (which is the standard Buf.build FILE_LAYOUT)
diff --git a/README.md b/README.md
index 56f3bab5..0e6c4e01 100644
--- a/README.md
+++ b/README.md
@@ -28,6 +28,7 @@
+
Interblockchain communication protocol (IBC) implementation in Golang built as a SDK module.
## Components
@@ -52,3 +53,4 @@ The localhost client is currently non-functional.
Please see our [documentation](docs/README.md) for more information.
+
diff --git a/apps/transfer/keeper/keeper.go b/apps/transfer/keeper/keeper.go
index fbc4a167..27db6db2 100644
--- a/apps/transfer/keeper/keeper.go
+++ b/apps/transfer/keeper/keeper.go
@@ -11,10 +11,10 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/cosmos/ibc-go/apps/transfer/types"
channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
host "github.com/cosmos/ibc-go/core/24-host"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
// Keeper defines the IBC fungible transfer keeper
diff --git a/apps/transfer/keeper/relay_test.go b/apps/transfer/keeper/relay_test.go
index 32cacd73..97f4f96a 100644
--- a/apps/transfer/keeper/relay_test.go
+++ b/apps/transfer/keeper/relay_test.go
@@ -3,7 +3,7 @@ package keeper_test
import (
"fmt"
- "github.com/cosmos/cosmos-sdk/simapp"
+ "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/ibc-go/apps/transfer/types"
diff --git a/apps/transfer/module.go b/apps/transfer/module.go
index 450e2fca..20dd3919 100644
--- a/apps/transfer/module.go
+++ b/apps/transfer/module.go
@@ -157,8 +157,6 @@ func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.V
return []abci.ValidatorUpdate{}
}
-//____________________________________________________________________________
-
// AppModuleSimulation functions
// GenerateGenesisState creates a randomized GenState of the transfer module.
@@ -186,8 +184,6 @@ func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.Weig
return nil
}
-//____________________________________________________________________________
-
// ValidateTransferChannelParams does validation of a newly created transfer channel. A transfer
// channel must be UNORDERED, use the correct port (by default 'transfer'), and use the current
// supported version. Only 2^32 channels are allowed to be created.
diff --git a/apps/transfer/simulation/decoder_test.go b/apps/transfer/simulation/decoder_test.go
index a71f3997..69206b93 100644
--- a/apps/transfer/simulation/decoder_test.go
+++ b/apps/transfer/simulation/decoder_test.go
@@ -6,10 +6,10 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/apps/transfer/simulation"
"github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/02-client/abci.go b/core/02-client/abci.go
index b65fb3c6..b5ddef8c 100644
--- a/core/02-client/abci.go
+++ b/core/02-client/abci.go
@@ -4,11 +4,32 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/ibc-go/core/02-client/keeper"
"github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
)
// BeginBlocker updates an existing localhost client with the latest block height.
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
- _, found := k.GetClientState(ctx, exported.Localhost)
+ plan, found := k.GetUpgradePlan(ctx)
+ if found {
+ // Once we are at the last block this chain will commit, set the upgraded consensus state
+ // so that IBC clients can use the last NextValidatorsHash as a trusted kernel for verifying
+ // headers on the next version of the chain.
+ // Set the time to the last block time of the current chain.
+ // In order for a client to upgrade successfully, the first block of the new chain must be committed
+ // within the trusting period of the last block time on this chain.
+ _, exists := k.GetUpgradedClient(ctx, plan.Height)
+ if exists && ctx.BlockHeight() == plan.Height-1 {
+ upgradedConsState := &ibctmtypes.ConsensusState{
+ Timestamp: ctx.BlockTime(),
+ NextValidatorsHash: ctx.BlockHeader().NextValidatorsHash,
+ }
+ bz := k.MustMarshalConsensusState(upgradedConsState)
+
+ k.SetUpgradedConsensusState(ctx, plan.Height, bz)
+ }
+ }
+
+ _, found = k.GetClientState(ctx, exported.Localhost)
if !found {
return
}
diff --git a/core/02-client/abci_test.go b/core/02-client/abci_test.go
index 6d4a8d60..a36bed8b 100644
--- a/core/02-client/abci_test.go
+++ b/core/02-client/abci_test.go
@@ -4,10 +4,14 @@ import (
"testing"
"github.com/stretchr/testify/suite"
+ abci "github.com/tendermint/tendermint/abci/types"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
client "github.com/cosmos/ibc-go/core/02-client"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -58,3 +62,33 @@ func (suite *ClientTestSuite) TestBeginBlocker() {
prevHeight = localHostClient.GetLatestHeight().(types.Height)
}
}
+
+func (suite *ClientTestSuite) TestBeginBlockerConsensusState() {
+ plan := &upgradetypes.Plan{
+ Name: "test",
+ Height: suite.chainA.GetContext().BlockHeight() + 1,
+ }
+ // set upgrade plan in the upgrade store
+ store := suite.chainA.GetContext().KVStore(suite.chainA.App.GetKey(upgradetypes.StoreKey))
+ bz := suite.chainA.App.AppCodec().MustMarshalBinaryBare(plan)
+ store.Set(upgradetypes.PlanKey(), bz)
+
+ nextValsHash := []byte("nextValsHash")
+ newCtx := suite.chainA.GetContext().WithBlockHeader(tmproto.Header{
+ Height: suite.chainA.GetContext().BlockHeight(),
+ NextValidatorsHash: nextValsHash,
+ })
+
+ err := suite.chainA.App.UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state"))
+ suite.Require().NoError(err)
+
+ req := abci.RequestBeginBlock{Header: newCtx.BlockHeader()}
+ suite.chainA.App.BeginBlock(req)
+
+ // plan Height is at ctx.BlockHeight+1
+ consState, found := suite.chainA.App.UpgradeKeeper.GetUpgradedConsensusState(newCtx, plan.Height)
+ suite.Require().True(found)
+ bz, err = types.MarshalConsensusState(suite.chainA.App.AppCodec(), &ibctmtypes.ConsensusState{Timestamp: newCtx.BlockTime(), NextValidatorsHash: nextValsHash})
+ suite.Require().NoError(err)
+ suite.Require().Equal(bz, consState)
+}
diff --git a/core/02-client/client/cli/tx.go b/core/02-client/client/cli/tx.go
index 9e62835f..e2ca4d9c 100644
--- a/core/02-client/client/cli/tx.go
+++ b/core/02-client/client/cli/tx.go
@@ -16,6 +16,7 @@ import (
"github.com/cosmos/cosmos-sdk/version"
govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
)
@@ -326,3 +327,92 @@ func NewCmdSubmitUpdateClientProposal() *cobra.Command {
return cmd
}
+
+// NewCmdSubmitUpgradeProposal implements a command handler for submitting an upgrade IBC client proposal transaction.
+func NewCmdSubmitUpgradeProposal() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "ibc-upgrade [name] [height] [path/to/upgraded_client_state.json] [flags]",
+ Args: cobra.ExactArgs(3),
+ Short: "Submit an IBC upgrade proposal",
+ Long: "Submit an IBC client breaking upgrade proposal along with an initial deposit.\n" +
+ "The client state specified is the upgraded client state representing the upgraded chain",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientTxContext(cmd)
+ if err != nil {
+ return err
+ }
+ cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
+
+ title, err := cmd.Flags().GetString(govcli.FlagTitle)
+ if err != nil {
+ return err
+ }
+
+ description, err := cmd.Flags().GetString(govcli.FlagDescription)
+ if err != nil {
+ return err
+ }
+
+ name := args[0]
+
+ height, err := cmd.Flags().GetInt64(args[1])
+ if err != nil {
+ return err
+ }
+
+ plan := upgradetypes.Plan{
+ Name: name,
+ Height: height,
+ }
+
+ // attempt to unmarshal client state argument
+ var clientState exported.ClientState
+ clientContentOrFileName := args[2]
+ if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil {
+
+ // check for file path if JSON input is not provided
+ contents, err := ioutil.ReadFile(clientContentOrFileName)
+ if err != nil {
+ return errors.Wrap(err, "neither JSON input nor path to .json file for client state were provided")
+ }
+
+ if err := cdc.UnmarshalInterfaceJSON(contents, &clientState); err != nil {
+ return errors.Wrap(err, "error unmarshalling client state file")
+ }
+ }
+
+ content, err := types.NewUpgradeProposal(title, description, plan, clientState)
+ if err != nil {
+ return err
+ }
+
+ from := clientCtx.GetFromAddress()
+
+ depositStr, err := cmd.Flags().GetString(govcli.FlagDeposit)
+ if err != nil {
+ return err
+ }
+ deposit, err := sdk.ParseCoinsNormalized(depositStr)
+ if err != nil {
+ return err
+ }
+
+ msg, err := govtypes.NewMsgSubmitProposal(content, deposit, from)
+ if err != nil {
+ return err
+ }
+
+ if err = msg.ValidateBasic(); err != nil {
+ return err
+ }
+
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
+ },
+ }
+
+ cmd.Flags().String(govcli.FlagTitle, "", "title of proposal")
+ cmd.Flags().String(govcli.FlagDescription, "", "description of proposal")
+ cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal")
+
+ return cmd
+}
diff --git a/core/02-client/client/proposal_handler.go b/core/02-client/client/proposal_handler.go
index 265a189a..f4f2fa7b 100644
--- a/core/02-client/client/proposal_handler.go
+++ b/core/02-client/client/proposal_handler.go
@@ -1,8 +1,25 @@
package client
import (
+ "net/http"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/types/rest"
govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
+ govrest "github.com/cosmos/cosmos-sdk/x/gov/client/rest"
"github.com/cosmos/ibc-go/core/02-client/client/cli"
)
-var ProposalHandler = govclient.NewProposalHandler(cli.NewCmdSubmitUpdateClientProposal, nil)
+var (
+ UpdateClientProposalHandler = govclient.NewProposalHandler(cli.NewCmdSubmitUpdateClientProposal, emptyRestHandler)
+ UpgradeProposalHandler = govclient.NewProposalHandler(cli.NewCmdSubmitUpgradeProposal, emptyRestHandler)
+)
+
+func emptyRestHandler(client.Context) govrest.ProposalRESTHandler {
+ return govrest.ProposalRESTHandler{
+ SubRoute: "unsupported-ibc-client",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ rest.WriteErrorResponse(w, http.StatusBadRequest, "Legacy REST Routes are not supported for IBC proposals")
+ },
+ }
+}
diff --git a/core/02-client/keeper/client_test.go b/core/02-client/keeper/client_test.go
index 231486a2..b5f259fc 100644
--- a/core/02-client/keeper/client_test.go
+++ b/core/02-client/keeper/client_test.go
@@ -6,6 +6,7 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
@@ -14,7 +15,6 @@ import (
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *KeeperTestSuite) TestCreateClient() {
@@ -230,6 +230,8 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
lastHeight exported.Height
clientA string
proofUpgradedClient, proofUpgradedConsState []byte
+ upgradedClientBz, upgradedConsStateBz []byte
+ err error
)
testCases := []struct {
@@ -240,18 +242,12 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
{
name: "successful upgrade",
setup: func() {
-
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- upgradedConsState = &ibctmtypes.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -270,18 +266,12 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
{
name: "client state not found",
setup: func() {
-
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- upgradedConsState = &ibctmtypes.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -302,18 +292,12 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
{
name: "client state frozen",
setup: func() {
-
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- upgradedConsState = &ibctmtypes.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -338,18 +322,12 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
{
name: "tendermint client VerifyUpgrade fails",
setup: func() {
-
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- upgradedConsState = &ibctmtypes.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
upgradedClient = ibctmtypes.NewClientState("wrongchainID", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, true, true)
@@ -371,13 +349,23 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
for _, tc := range testCases {
tc := tc
clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedClient = upgradedClient.ZeroCustomFields()
+ upgradedClientBz, err = types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
+
+ upgradedConsState = &ibctmtypes.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+ upgradedConsStateBz, err = types.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState)
+ suite.Require().NoError(err)
tc.setup()
// Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
upgradedClient = upgradedClient.ZeroCustomFields()
- err := suite.chainA.App.IBCKeeper.ClientKeeper.UpgradeClient(suite.chainA.GetContext(), clientA, upgradedClient, upgradedConsState, proofUpgradedClient, proofUpgradedConsState)
+ err = suite.chainA.App.IBCKeeper.ClientKeeper.UpgradeClient(suite.chainA.GetContext(), clientA, upgradedClient, upgradedConsState, proofUpgradedClient, proofUpgradedConsState)
if tc.expPass {
suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name)
diff --git a/core/02-client/keeper/grpc_query.go b/core/02-client/keeper/grpc_query.go
index 9d4d6ae6..5d98ab16 100644
--- a/core/02-client/keeper/grpc_query.go
+++ b/core/02-client/keeper/grpc_query.go
@@ -197,3 +197,46 @@ func (q Keeper) ClientParams(c context.Context, _ *types.QueryClientParamsReques
Params: ¶ms,
}, nil
}
+
+// UpgradedClientState implements the Query/UpgradedClientState gRPC method
+func (q Keeper) UpgradedClientState(c context.Context, req *types.QueryUpgradedClientStateRequest) (*types.QueryUpgradedClientStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ plan, found := q.GetUpgradePlan(ctx)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound, "upgrade plan not found",
+ )
+ }
+
+ bz, found := q.GetUpgradedClient(ctx, plan.Height)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrap(types.ErrClientNotFound, req.ClientId).Error(),
+ )
+ }
+
+ clientState, err := types.UnmarshalClientState(q.cdc, bz)
+ if err != nil {
+ return nil, status.Error(
+ codes.Internal, err.Error(),
+ )
+ }
+
+ any, err := types.PackClientState(clientState)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ return &types.QueryUpgradedClientStateResponse{
+ UpgradedClientState: any,
+ }, nil
+}
diff --git a/core/02-client/keeper/keeper.go b/core/02-client/keeper/keeper.go
index 1278a76b..a41eaf16 100644
--- a/core/02-client/keeper/keeper.go
+++ b/core/02-client/keeper/keeper.go
@@ -12,13 +12,13 @@ import (
"github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
host "github.com/cosmos/ibc-go/core/24-host"
"github.com/cosmos/ibc-go/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// Keeper represents a type that grants read and write permissions to any client
@@ -28,10 +28,11 @@ type Keeper struct {
cdc codec.BinaryMarshaler
paramSpace paramtypes.Subspace
stakingKeeper types.StakingKeeper
+ upgradeKeeper types.UpgradeKeeper
}
// NewKeeper creates a new NewKeeper instance
-func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper) Keeper {
+func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper, uk types.UpgradeKeeper) Keeper {
// set KeyTable if it has not already been set
if !paramSpace.HasKeyTable() {
paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable())
@@ -42,6 +43,7 @@ func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtype
cdc: cdc,
paramSpace: paramSpace,
stakingKeeper: sk,
+ upgradeKeeper: uk,
}
}
@@ -327,6 +329,21 @@ func (k Keeper) ValidateSelfClient(ctx sdk.Context, clientState exported.ClientS
return nil
}
+// GetUpgradePlan executes the upgrade keeper GetUpgradePlan function.
+func (k Keeper) GetUpgradePlan(ctx sdk.Context) (plan upgradetypes.Plan, havePlan bool) {
+ return k.upgradeKeeper.GetUpgradePlan(ctx)
+}
+
+// GetUpgradedClient executes the upgrade keeper GetUpgradeClient function.
+func (k Keeper) GetUpgradedClient(ctx sdk.Context, planHeight int64) ([]byte, bool) {
+ return k.upgradeKeeper.GetUpgradedClient(ctx, planHeight)
+}
+
+// SetUpgradedConsensusState executes the upgrade keeper SetUpgradedConsensusState function.
+func (k Keeper) SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error {
+ return k.upgradeKeeper.SetUpgradedConsensusState(ctx, planHeight, bz)
+}
+
// IterateClients provides an iterator over all stored light client State
// objects. For each State object, cb will be called. If the cb returns true,
// the iterator will close and stop.
diff --git a/core/02-client/keeper/keeper_test.go b/core/02-client/keeper/keeper_test.go
index 4badc9f4..806aaee2 100644
--- a/core/02-client/keeper/keeper_test.go
+++ b/core/02-client/keeper/keeper_test.go
@@ -13,8 +13,8 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
- "github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/cosmos/ibc-go/core/02-client/keeper"
"github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
@@ -23,7 +23,7 @@ import (
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
- stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/core/02-client/keeper/proposal.go b/core/02-client/keeper/proposal.go
index 78cb652d..c8beb7cd 100644
--- a/core/02-client/keeper/proposal.go
+++ b/core/02-client/keeper/proposal.go
@@ -70,3 +70,29 @@ func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdatePropo
return nil
}
+
+// HandleUpgradeProposal sets the upgraded client state in the upgrade store. It clears
+// an IBC client state and consensus state if a previous plan was set. Then it
+// will schedule an upgrade and finally set the upgraded client state in upgrade
+// store.
+func (k Keeper) HandleUpgradeProposal(ctx sdk.Context, p *types.UpgradeProposal) error {
+ clientState, err := types.UnpackClientState(p.UpgradedClientState)
+ if err != nil {
+ return sdkerrors.Wrap(err, "could not unpack UpgradedClientState")
+ }
+
+ // zero out any custom fields before setting
+ cs := clientState.ZeroCustomFields()
+ bz, err := types.MarshalClientState(k.cdc, cs)
+ if err != nil {
+ return sdkerrors.Wrap(err, "could not marshal UpgradedClientState")
+ }
+
+ if err := k.upgradeKeeper.ScheduleUpgrade(ctx, p.Plan); err != nil {
+ return err
+ }
+
+ // sets the new upgraded client in last height committed on this chain is at plan.Height,
+ // since the chain will panic at plan.Height and new chain will resume at plan.Height
+ return k.upgradeKeeper.SetUpgradedClient(ctx, p.Plan.Height, bz)
+}
diff --git a/core/02-client/keeper/proposal_test.go b/core/02-client/keeper/proposal_test.go
index cb0816af..1d2580dc 100644
--- a/core/02-client/keeper/proposal_test.go
+++ b/core/02-client/keeper/proposal_test.go
@@ -1,8 +1,9 @@
package keeper_test
import (
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
@@ -12,8 +13,8 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
var (
subject, substitute string
subjectClientState, substituteClientState exported.ClientState
- initialHeight clienttypes.Height
- content *types.ClientUpdateProposal
+ initialHeight types.Height
+ content govtypes.Content
err error
)
@@ -24,7 +25,7 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
}{
{
"valid update client proposal", func() {
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, true,
},
{
@@ -35,52 +36,52 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
suite.Require().True(found)
newRevisionNumber := tmClientState.GetLatestHeight().GetRevisionNumber() + 1
- tmClientState.LatestHeight = clienttypes.NewHeight(newRevisionNumber, tmClientState.GetLatestHeight().GetRevisionHeight())
- initialHeight = clienttypes.NewHeight(newRevisionNumber, initialHeight.GetRevisionHeight())
+ tmClientState.LatestHeight = types.NewHeight(newRevisionNumber, tmClientState.GetLatestHeight().GetRevisionHeight())
+ initialHeight = types.NewHeight(newRevisionNumber, initialHeight.GetRevisionHeight())
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState)
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, true,
},
{
"cannot use localhost as subject", func() {
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, exported.Localhost, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, exported.Localhost, substitute, initialHeight)
}, false,
},
{
"cannot use localhost as substitute", func() {
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, exported.Localhost, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, exported.Localhost, initialHeight)
}, false,
},
{
"subject client does not exist", func() {
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight)
}, false,
},
{
"substitute client does not exist", func() {
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight)
}, false,
},
{
"subject and substitute have equal latest height", func() {
tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
- tmClientState.LatestHeight = substituteClientState.GetLatestHeight().(clienttypes.Height)
+ tmClientState.LatestHeight = substituteClientState.GetLatestHeight().(types.Height)
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, false,
},
{
"update fails, client is not frozen or expired", func() {
tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
- tmClientState.FrozenHeight = clienttypes.ZeroHeight()
+ tmClientState.FrozenHeight = types.ZeroHeight()
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, false,
},
}
@@ -94,7 +95,7 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
subjectClientState = suite.chainA.GetClientState(subject)
substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- initialHeight = clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
+ initialHeight = types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
// update substitute twice
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
@@ -117,7 +118,9 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
tc.malleate()
- err = suite.chainA.App.IBCKeeper.ClientKeeper.ClientUpdateProposal(suite.chainA.GetContext(), content)
+ updateProp, ok := content.(*types.ClientUpdateProposal)
+ suite.Require().True(ok)
+ err = suite.chainA.App.IBCKeeper.ClientKeeper.ClientUpdateProposal(suite.chainA.GetContext(), updateProp)
if tc.expPass {
suite.Require().NoError(err)
@@ -128,3 +131,135 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
}
}
+
+func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
+ var (
+ upgradedClientState *ibctmtypes.ClientState
+ oldPlan, plan upgradetypes.Plan
+ content govtypes.Content
+ err error
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "valid upgrade proposal", func() {
+ content, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, upgradedClientState)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "valid upgrade proposal with previous IBC state", func() {
+ oldPlan = upgradetypes.Plan{
+ Name: "upgrade IBC clients",
+ Height: 100,
+ }
+
+ content, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, upgradedClientState)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "cannot unpack client state", func() {
+ any, err := types.PackConsensusState(&ibctmtypes.ConsensusState{})
+ suite.Require().NoError(err)
+ content = &types.UpgradeProposal{
+ Title: ibctesting.Title,
+ Description: ibctesting.Description,
+ Plan: plan,
+ UpgradedClientState: any,
+ }
+ }, false,
+ },
+ {
+ "schedule upgrade fails - plan sets time and height", func() {
+ plan = upgradetypes.Plan{
+ Name: "invalid plan",
+ Height: 1000,
+ Time: suite.chainA.GetContext().BlockTime(),
+ }
+ content, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, upgradedClientState)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+ oldPlan.Height = 0 //reset
+
+ clientID, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ upgradedClientState = suite.chainA.GetClientState(clientID).ZeroCustomFields().(*ibctmtypes.ClientState)
+
+ // use height 1000 to distinguish from old plan
+ plan = upgradetypes.Plan{
+ Name: "upgrade IBC clients",
+ Height: 1000,
+ }
+
+ tc.malleate()
+
+ // set the old plan if it is not empty
+ if oldPlan.Height != 0 {
+ // set upgrade plan in the upgrade store
+ store := suite.chainA.GetContext().KVStore(suite.chainA.App.GetKey(upgradetypes.StoreKey))
+ bz := suite.chainA.App.AppCodec().MustMarshalBinaryBare(&oldPlan)
+ store.Set(upgradetypes.PlanKey(), bz)
+
+ bz, err := types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClientState)
+ suite.Require().NoError(err)
+ suite.chainA.App.UpgradeKeeper.SetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height, bz)
+ }
+
+ upgradeProp, ok := content.(*types.UpgradeProposal)
+ suite.Require().True(ok)
+ err = suite.chainA.App.IBCKeeper.ClientKeeper.HandleUpgradeProposal(suite.chainA.GetContext(), upgradeProp)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+
+ // check that the correct plan is returned
+ storedPlan, found := suite.chainA.App.UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext())
+ suite.Require().True(found)
+ suite.Require().Equal(plan, storedPlan)
+
+ // check that old upgraded client state is cleared
+ _, found = suite.chainA.App.UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height)
+ suite.Require().False(found)
+
+ // check that client state was set
+ storedClientState, found := suite.chainA.App.UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height)
+ suite.Require().True(found)
+ clientState, err := types.UnmarshalClientState(suite.chainA.App.AppCodec(), storedClientState)
+ suite.Require().NoError(err)
+ suite.Require().Equal(upgradedClientState, clientState)
+ } else {
+ suite.Require().Error(err)
+
+ // check that the new plan wasn't stored
+ storedPlan, found := suite.chainA.App.UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext())
+ if oldPlan.Height != 0 {
+ // NOTE: this is only true if the ScheduleUpgrade function
+ // returns an error before clearing the old plan
+ suite.Require().True(found)
+ suite.Require().Equal(oldPlan, storedPlan)
+ } else {
+ suite.Require().False(found)
+ suite.Require().Empty(storedPlan)
+ }
+
+ // check that client state was not set
+ _, found = suite.chainA.App.UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height)
+ suite.Require().False(found)
+
+ }
+ })
+ }
+
+}
diff --git a/core/02-client/proposal_handler.go b/core/02-client/proposal_handler.go
index 8a76f16d..cb1426b3 100644
--- a/core/02-client/proposal_handler.go
+++ b/core/02-client/proposal_handler.go
@@ -8,12 +8,14 @@ import (
"github.com/cosmos/ibc-go/core/02-client/types"
)
-// NewClientUpdateProposalHandler defines the client update proposal handler
-func NewClientUpdateProposalHandler(k keeper.Keeper) govtypes.Handler {
+// NewClientProposalHandler defines the 02-client proposal handler
+func NewClientProposalHandler(k keeper.Keeper) govtypes.Handler {
return func(ctx sdk.Context, content govtypes.Content) error {
switch c := content.(type) {
case *types.ClientUpdateProposal:
return k.ClientUpdateProposal(ctx, c)
+ case *types.UpgradeProposal:
+ return k.HandleUpgradeProposal(ctx, c)
default:
return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ibc proposal content type: %T", c)
diff --git a/core/02-client/proposal_handler_test.go b/core/02-client/proposal_handler_test.go
index 047371ec..98480ee2 100644
--- a/core/02-client/proposal_handler_test.go
+++ b/core/02-client/proposal_handler_test.go
@@ -69,7 +69,7 @@ func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
tc.malleate()
- proposalHandler := client.NewClientUpdateProposalHandler(suite.chainA.App.IBCKeeper.ClientKeeper)
+ proposalHandler := client.NewClientProposalHandler(suite.chainA.App.IBCKeeper.ClientKeeper)
err = proposalHandler(suite.chainA.GetContext(), content)
diff --git a/core/02-client/simulation/decoder_test.go b/core/02-client/simulation/decoder_test.go
index 1259409a..56483c7f 100644
--- a/core/02-client/simulation/decoder_test.go
+++ b/core/02-client/simulation/decoder_test.go
@@ -7,12 +7,12 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/core/02-client/simulation"
"github.com/cosmos/ibc-go/core/02-client/types"
host "github.com/cosmos/ibc-go/core/24-host"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/02-client/types/client.pb.go b/core/02-client/types/client.pb.go
index b63fce16..f19ea3ae 100644
--- a/core/02-client/types/client.pb.go
+++ b/core/02-client/types/client.pb.go
@@ -6,6 +6,7 @@ package types
import (
fmt "fmt"
types "github.com/cosmos/cosmos-sdk/codec/types"
+ types1 "github.com/cosmos/cosmos-sdk/x/upgrade/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
@@ -246,6 +247,53 @@ func (m *ClientUpdateProposal) XXX_DiscardUnknown() {
var xxx_messageInfo_ClientUpdateProposal proto.InternalMessageInfo
+// UpgradeProposal is a gov Content type for initiating an IBC breaking
+// upgrade.
+type UpgradeProposal struct {
+ Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Plan types1.Plan `protobuf:"bytes,3,opt,name=plan,proto3" json:"plan"`
+ // An UpgradedClientState must be provided to perform an IBC breaking upgrade.
+ // This will make the chain commit to the correct upgraded (self) client state
+ // before the upgrade occurs, so that connecting chains can verify that the
+ // new upgraded client is valid by verifying a proof on the previous version
+ // of the chain. This will allow IBC connections to persist smoothly across
+ // planned chain upgrades
+ UpgradedClientState *types.Any `protobuf:"bytes,4,opt,name=upgraded_client_state,json=upgradedClientState,proto3" json:"upgraded_client_state,omitempty" yaml:"upgraded_client_state"`
+}
+
+func (m *UpgradeProposal) Reset() { *m = UpgradeProposal{} }
+func (*UpgradeProposal) ProtoMessage() {}
+func (*UpgradeProposal) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3cc2cf764ecc47af, []int{4}
+}
+func (m *UpgradeProposal) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UpgradeProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UpgradeProposal.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *UpgradeProposal) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpgradeProposal.Merge(m, src)
+}
+func (m *UpgradeProposal) XXX_Size() int {
+ return m.Size()
+}
+func (m *UpgradeProposal) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpgradeProposal.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpgradeProposal proto.InternalMessageInfo
+
// Height is a monotonically increasing data type
// that can be compared against another Height for the purposes of updating and
// freezing clients
@@ -266,7 +314,7 @@ type Height struct {
func (m *Height) Reset() { *m = Height{} }
func (*Height) ProtoMessage() {}
func (*Height) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{4}
+ return fileDescriptor_3cc2cf764ecc47af, []int{5}
}
func (m *Height) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -305,7 +353,7 @@ func (m *Params) Reset() { *m = Params{} }
func (m *Params) String() string { return proto.CompactTextString(m) }
func (*Params) ProtoMessage() {}
func (*Params) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{5}
+ return fileDescriptor_3cc2cf764ecc47af, []int{6}
}
func (m *Params) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -346,6 +394,7 @@ func init() {
proto.RegisterType((*ConsensusStateWithHeight)(nil), "ibcgo.core.client.v1.ConsensusStateWithHeight")
proto.RegisterType((*ClientConsensusStates)(nil), "ibcgo.core.client.v1.ClientConsensusStates")
proto.RegisterType((*ClientUpdateProposal)(nil), "ibcgo.core.client.v1.ClientUpdateProposal")
+ proto.RegisterType((*UpgradeProposal)(nil), "ibcgo.core.client.v1.UpgradeProposal")
proto.RegisterType((*Height)(nil), "ibcgo.core.client.v1.Height")
proto.RegisterType((*Params)(nil), "ibcgo.core.client.v1.Params")
}
@@ -353,49 +402,88 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/client/v1/client.proto", fileDescriptor_3cc2cf764ecc47af) }
var fileDescriptor_3cc2cf764ecc47af = []byte{
- // 636 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30,
- 0x18, 0xc6, 0x9b, 0xae, 0xab, 0x56, 0x17, 0xda, 0x11, 0x52, 0xd6, 0x8d, 0xd1, 0x14, 0x9f, 0x7a,
- 0x59, 0xc2, 0xca, 0x6d, 0x37, 0xda, 0x03, 0xdb, 0x01, 0x34, 0x8c, 0x10, 0x88, 0x4b, 0x95, 0x3f,
- 0x5e, 0x6a, 0x94, 0xc6, 0x55, 0xec, 0x0c, 0x95, 0x4f, 0xc0, 0x91, 0x23, 0x07, 0x0e, 0x7c, 0x04,
- 0x3e, 0x05, 0xda, 0x71, 0x17, 0x24, 0x4e, 0x11, 0xda, 0xbe, 0x41, 0x3e, 0x01, 0x8a, 0xed, 0x6c,
- 0x6b, 0xd8, 0xa4, 0x89, 0x9b, 0xf3, 0xfa, 0xf1, 0xef, 0x7d, 0xde, 0x47, 0x8e, 0xc1, 0x63, 0xe2,
- 0x7a, 0x01, 0xb5, 0x3d, 0x1a, 0x63, 0xdb, 0x0b, 0x09, 0x8e, 0xb8, 0x7d, 0xbc, 0xab, 0x56, 0xd6,
- 0x3c, 0xa6, 0x9c, 0xea, 0x86, 0x90, 0x58, 0xb9, 0xc4, 0x52, 0x1b, 0xc7, 0xbb, 0x5b, 0x46, 0x40,
- 0x03, 0x2a, 0x04, 0x76, 0xbe, 0x92, 0xda, 0xad, 0xcd, 0x80, 0xd2, 0x20, 0xc4, 0xb6, 0xf8, 0x72,
- 0x93, 0x23, 0xdb, 0x89, 0x16, 0x72, 0x0b, 0x7e, 0xd3, 0x40, 0xe7, 0xc0, 0xc7, 0x11, 0x27, 0x47,
- 0x04, 0xfb, 0x63, 0x01, 0x7a, 0xcd, 0x1d, 0x8e, 0xf5, 0x5d, 0xd0, 0x90, 0xdc, 0x09, 0xf1, 0xbb,
- 0x5a, 0x5f, 0x1b, 0x34, 0x46, 0x46, 0x96, 0x9a, 0xeb, 0x0b, 0x67, 0x16, 0xee, 0xc1, 0x8b, 0x2d,
- 0x88, 0xd6, 0xe4, 0xfa, 0xc0, 0xd7, 0x0f, 0xc1, 0x1d, 0x55, 0x67, 0x39, 0xa2, 0x5b, 0xed, 0x6b,
- 0x83, 0xe6, 0xd0, 0xb0, 0x64, 0x7b, 0xab, 0x68, 0x6f, 0x3d, 0x8b, 0x16, 0xa3, 0x8d, 0x2c, 0x35,
- 0xef, 0x2f, 0xb1, 0xc4, 0x19, 0x88, 0x9a, 0xde, 0xa5, 0x09, 0xf8, 0x43, 0x03, 0xdd, 0x31, 0x8d,
- 0x18, 0x8e, 0x58, 0xc2, 0x44, 0xe9, 0x2d, 0xe1, 0xd3, 0x7d, 0x4c, 0x82, 0x29, 0xd7, 0xf7, 0x40,
- 0x7d, 0x2a, 0x56, 0xc2, 0x5e, 0x73, 0xb8, 0x6d, 0x5d, 0x97, 0x89, 0x25, 0xd5, 0xa3, 0xda, 0x49,
- 0x6a, 0x56, 0x90, 0x3a, 0xa1, 0xbf, 0x03, 0x6d, 0xaf, 0xe0, 0xde, 0xc2, 0xed, 0x66, 0x96, 0x9a,
- 0x9d, 0xdc, 0x2d, 0x2c, 0x9d, 0x82, 0xa8, 0xe5, 0x2d, 0xf9, 0x83, 0x3f, 0x35, 0xd0, 0x91, 0x39,
- 0x2e, 0x1b, 0x67, 0xff, 0x93, 0xe8, 0x27, 0xb0, 0x5e, 0x6a, 0xc8, 0xba, 0xd5, 0xfe, 0xca, 0xa0,
- 0x39, 0xb4, 0xae, 0x1f, 0xf6, 0xa6, 0xb0, 0x46, 0x66, 0x3e, 0x7e, 0x96, 0x9a, 0x1b, 0xaa, 0x5b,
- 0x89, 0x0a, 0x51, 0x7b, 0x79, 0x0e, 0x06, 0x7f, 0x55, 0x81, 0x21, 0x07, 0x79, 0x33, 0xf7, 0x1d,
- 0x8e, 0x0f, 0x63, 0x3a, 0xa7, 0xcc, 0x09, 0x75, 0x03, 0xac, 0x72, 0xc2, 0x43, 0x2c, 0x67, 0x40,
- 0xf2, 0x43, 0xef, 0x83, 0xa6, 0x8f, 0x99, 0x17, 0x93, 0x39, 0x27, 0x34, 0x12, 0x69, 0x36, 0xd0,
- 0xd5, 0x92, 0xbe, 0x0f, 0xee, 0xb1, 0xc4, 0xfd, 0x80, 0x3d, 0x3e, 0xb9, 0xcc, 0x61, 0x45, 0xe4,
- 0xb0, 0x9d, 0xa5, 0x66, 0x57, 0x3a, 0xfb, 0x47, 0x02, 0x51, 0x5b, 0xd5, 0xc6, 0x45, 0x2c, 0xaf,
- 0x80, 0xc1, 0x12, 0x97, 0x71, 0xc2, 0x13, 0x8e, 0xaf, 0xc0, 0x6a, 0x02, 0x66, 0x66, 0xa9, 0xf9,
- 0xb0, 0x80, 0x31, 0xb7, 0xac, 0x82, 0x48, 0xbf, 0x3c, 0x7c, 0x81, 0x74, 0x41, 0x8b, 0x44, 0x84,
- 0x13, 0x27, 0x9c, 0xa8, 0x4b, 0xb5, 0x7a, 0x8b, 0x4b, 0xf5, 0x48, 0xa5, 0xda, 0x91, 0xed, 0x96,
- 0x09, 0x10, 0xdd, 0x55, 0x05, 0xa9, 0xde, 0xab, 0x7d, 0xfe, 0x6e, 0x56, 0xf2, 0x5f, 0xae, 0xae,
- 0x6e, 0xf0, 0x18, 0xb4, 0x63, 0x7c, 0x4c, 0x18, 0xa1, 0xd1, 0x24, 0x4a, 0x66, 0x2e, 0x8e, 0x45,
- 0xa6, 0xb5, 0xd1, 0x56, 0x96, 0x9a, 0x0f, 0x24, 0xb3, 0x24, 0x80, 0xa8, 0x55, 0x54, 0x5e, 0x8a,
- 0xc2, 0x12, 0x44, 0x59, 0xaf, 0xde, 0x08, 0x29, 0x9c, 0x5d, 0x40, 0x94, 0xb5, 0xb5, 0xdc, 0xda,
- 0xd7, 0xdc, 0xde, 0x0b, 0x50, 0x3f, 0x74, 0x62, 0x67, 0xc6, 0x72, 0xb0, 0x13, 0x86, 0xf4, 0x23,
- 0xf6, 0x55, 0x78, 0xac, 0xab, 0xf5, 0x57, 0x06, 0x8d, 0xab, 0xe0, 0x92, 0x00, 0xa2, 0x96, 0xaa,
- 0xc8, 0x60, 0xd9, 0xe8, 0xf9, 0xc9, 0x59, 0x4f, 0x3b, 0x3d, 0xeb, 0x69, 0x7f, 0xce, 0x7a, 0xda,
- 0x97, 0xf3, 0x5e, 0xe5, 0xf4, 0xbc, 0x57, 0xf9, 0x7d, 0xde, 0xab, 0xbc, 0xdf, 0x09, 0x08, 0x9f,
- 0x26, 0xae, 0xe5, 0xd1, 0x99, 0xed, 0x51, 0x36, 0xa3, 0xcc, 0x26, 0xae, 0xb7, 0x53, 0xbc, 0x7b,
- 0x4f, 0x86, 0x3b, 0xea, 0xe9, 0xe3, 0x8b, 0x39, 0x66, 0x6e, 0x5d, 0xfc, 0x90, 0x4f, 0xff, 0x06,
- 0x00, 0x00, 0xff, 0xff, 0x47, 0x7f, 0x5c, 0x7c, 0x1c, 0x05, 0x00, 0x00,
+ // 736 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xbf, 0x6f, 0xdb, 0x38,
+ 0x14, 0xb6, 0x1c, 0xc7, 0x88, 0xe9, 0x9c, 0x9d, 0x53, 0xec, 0x8b, 0x93, 0xcb, 0x59, 0x3e, 0xe2,
+ 0x06, 0x2f, 0x91, 0xce, 0x3e, 0xe0, 0x06, 0x6f, 0x67, 0x0f, 0x97, 0x0c, 0x77, 0x70, 0x55, 0x04,
+ 0x2d, 0xba, 0x18, 0xfa, 0xc1, 0xc8, 0x0c, 0x64, 0xd1, 0x10, 0x29, 0x17, 0xee, 0x5f, 0xd0, 0xb1,
+ 0x63, 0x87, 0x0e, 0xf9, 0x13, 0xfa, 0x57, 0x14, 0x19, 0xb3, 0x14, 0xe8, 0x24, 0x14, 0xc9, 0xd2,
+ 0x59, 0x6b, 0x97, 0x42, 0x24, 0xe5, 0xd8, 0x6e, 0x52, 0x04, 0xed, 0x46, 0x3e, 0x7e, 0xef, 0x7b,
+ 0xdf, 0xfb, 0xa8, 0x47, 0x81, 0xdf, 0xb1, 0xed, 0x78, 0xc4, 0x70, 0x48, 0x88, 0x0c, 0xc7, 0xc7,
+ 0x28, 0x60, 0xc6, 0xac, 0x23, 0x57, 0xfa, 0x34, 0x24, 0x8c, 0xa8, 0x35, 0x0e, 0xd1, 0x53, 0x88,
+ 0x2e, 0x0f, 0x66, 0x9d, 0x83, 0x9a, 0x47, 0x3c, 0xc2, 0x01, 0x46, 0xba, 0x12, 0xd8, 0x83, 0x7d,
+ 0x8f, 0x10, 0xcf, 0x47, 0x06, 0xdf, 0xd9, 0xd1, 0x99, 0x61, 0x05, 0x73, 0x79, 0xf4, 0x87, 0x43,
+ 0xe8, 0x84, 0x50, 0x23, 0x9a, 0x7a, 0xa1, 0xe5, 0x22, 0x63, 0xd6, 0xb1, 0x11, 0xb3, 0x3a, 0xd9,
+ 0x5e, 0xa0, 0xe0, 0x1b, 0x05, 0xd4, 0x4f, 0x5c, 0x14, 0x30, 0x7c, 0x86, 0x91, 0x3b, 0xe0, 0xe5,
+ 0x1e, 0x33, 0x8b, 0x21, 0xb5, 0x03, 0x4a, 0xa2, 0xfa, 0x08, 0xbb, 0x0d, 0xa5, 0xa5, 0xb4, 0x4b,
+ 0xfd, 0x5a, 0x12, 0x6b, 0x3b, 0x73, 0x6b, 0xe2, 0xf7, 0xe0, 0xe2, 0x08, 0x9a, 0x5b, 0x62, 0x7d,
+ 0xe2, 0xaa, 0x43, 0xb0, 0x2d, 0xe3, 0x34, 0xa5, 0x68, 0xe4, 0x5b, 0x4a, 0xbb, 0xdc, 0xad, 0xe9,
+ 0x42, 0xa4, 0x9e, 0x89, 0xd4, 0xff, 0x09, 0xe6, 0xfd, 0xbd, 0x24, 0xd6, 0x76, 0x57, 0xb8, 0x78,
+ 0x0e, 0x34, 0xcb, 0xce, 0xad, 0x08, 0xf8, 0x56, 0x01, 0x8d, 0x01, 0x09, 0x28, 0x0a, 0x68, 0x44,
+ 0x79, 0xe8, 0x09, 0x66, 0xe3, 0x63, 0x84, 0xbd, 0x31, 0x53, 0x7b, 0xa0, 0x38, 0xe6, 0x2b, 0x2e,
+ 0xaf, 0xdc, 0x3d, 0xd4, 0xef, 0x72, 0x4e, 0x17, 0xe8, 0x7e, 0xe1, 0x32, 0xd6, 0x72, 0xa6, 0xcc,
+ 0x50, 0x9f, 0x82, 0xaa, 0x93, 0xf1, 0x3e, 0x40, 0xed, 0x7e, 0x12, 0x6b, 0xf5, 0x54, 0x2d, 0x5c,
+ 0xcb, 0x82, 0x66, 0xc5, 0x59, 0xd1, 0x07, 0xdf, 0x29, 0xa0, 0x2e, 0x7c, 0x5c, 0x15, 0x4e, 0xbf,
+ 0xc7, 0xd1, 0x17, 0x60, 0x67, 0xad, 0x20, 0x6d, 0xe4, 0x5b, 0x1b, 0xed, 0x72, 0x57, 0xbf, 0xbb,
+ 0xd9, 0xfb, 0xcc, 0xea, 0x6b, 0x69, 0xfb, 0x49, 0xac, 0xed, 0xc9, 0x6a, 0x6b, 0xac, 0xd0, 0xac,
+ 0xae, 0xf6, 0x41, 0xe1, 0xfb, 0x3c, 0xa8, 0x89, 0x46, 0x4e, 0xa7, 0xae, 0xc5, 0xd0, 0x30, 0x24,
+ 0x53, 0x42, 0x2d, 0x5f, 0xad, 0x81, 0x4d, 0x86, 0x99, 0x8f, 0x44, 0x0f, 0xa6, 0xd8, 0xa8, 0x2d,
+ 0x50, 0x76, 0x11, 0x75, 0x42, 0x3c, 0x65, 0x98, 0x04, 0xdc, 0xcd, 0x92, 0xb9, 0x1c, 0x52, 0x8f,
+ 0xc1, 0xcf, 0x34, 0xb2, 0xcf, 0x91, 0xc3, 0x46, 0xb7, 0x3e, 0x6c, 0x70, 0x1f, 0x0e, 0x93, 0x58,
+ 0x6b, 0x08, 0x65, 0x5f, 0x41, 0xa0, 0x59, 0x95, 0xb1, 0x41, 0x66, 0xcb, 0x23, 0x50, 0xa3, 0x91,
+ 0x4d, 0x19, 0x66, 0x11, 0x43, 0x4b, 0x64, 0x05, 0x4e, 0xa6, 0x25, 0xb1, 0xf6, 0x6b, 0x46, 0x46,
+ 0xed, 0x75, 0x14, 0x34, 0xd5, 0xdb, 0xe4, 0x05, 0xa5, 0x0d, 0x2a, 0x38, 0xc0, 0x0c, 0x5b, 0xfe,
+ 0x48, 0x7e, 0x54, 0x9b, 0x0f, 0xf8, 0xa8, 0x7e, 0x93, 0xae, 0xd6, 0x45, 0xb9, 0x55, 0x06, 0x68,
+ 0xfe, 0x24, 0x03, 0x02, 0xdd, 0x2b, 0xbc, 0xbc, 0xd0, 0x72, 0xf0, 0xb3, 0x02, 0xaa, 0xa7, 0x62,
+ 0x08, 0x7f, 0xd8, 0xd2, 0xbf, 0x41, 0x61, 0xea, 0x5b, 0x01, 0x77, 0x31, 0xd5, 0x2a, 0x66, 0x5e,
+ 0xcf, 0x66, 0x5c, 0xce, 0xbc, 0x3e, 0xf4, 0xad, 0x40, 0x0e, 0x00, 0xc7, 0xab, 0xe7, 0xa0, 0x2e,
+ 0x31, 0xee, 0x68, 0x65, 0x64, 0x0b, 0xdf, 0x18, 0x82, 0x56, 0x12, 0x6b, 0x87, 0xa2, 0xd1, 0x3b,
+ 0x93, 0xa1, 0xb9, 0x9b, 0xc5, 0x97, 0x1e, 0x92, 0xde, 0x76, 0xda, 0xf5, 0xeb, 0x0b, 0x2d, 0xf7,
+ 0xe9, 0x42, 0x53, 0xd2, 0x07, 0xa7, 0x28, 0xe7, 0x77, 0x00, 0xaa, 0x21, 0x9a, 0x61, 0x8a, 0x49,
+ 0x30, 0x0a, 0xa2, 0x89, 0x8d, 0x42, 0xde, 0x7e, 0xa1, 0x7f, 0x90, 0xc4, 0xda, 0x2f, 0xa2, 0xd0,
+ 0x1a, 0x00, 0x9a, 0x95, 0x2c, 0xf2, 0x3f, 0x0f, 0xac, 0x90, 0xc8, 0x8b, 0xcb, 0xdf, 0x4b, 0x92,
+ 0xdd, 0xcb, 0x82, 0x44, 0x5e, 0xcc, 0x56, 0x26, 0x11, 0xfe, 0x07, 0x8a, 0x43, 0x2b, 0xb4, 0x26,
+ 0x34, 0x25, 0xb6, 0x7c, 0x9f, 0x3c, 0x5f, 0x34, 0x49, 0x1b, 0x4a, 0x6b, 0xa3, 0x5d, 0x5a, 0x26,
+ 0x5e, 0x03, 0x40, 0xb3, 0x22, 0x23, 0xa2, 0x7f, 0xda, 0xff, 0xf7, 0xf2, 0xba, 0xa9, 0x5c, 0x5d,
+ 0x37, 0x95, 0x8f, 0xd7, 0x4d, 0xe5, 0xd5, 0x4d, 0x33, 0x77, 0x75, 0xd3, 0xcc, 0x7d, 0xb8, 0x69,
+ 0xe6, 0x9e, 0x1d, 0x79, 0x98, 0x8d, 0x23, 0x5b, 0x77, 0xc8, 0xc4, 0x90, 0x2f, 0x35, 0xb6, 0x9d,
+ 0xa3, 0xec, 0xdf, 0xf0, 0x67, 0xf7, 0x48, 0xfe, 0x1e, 0xd8, 0x7c, 0x8a, 0xa8, 0x5d, 0xe4, 0x37,
+ 0xf1, 0xd7, 0x97, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x31, 0x08, 0x26, 0x40, 0x06, 0x00, 0x00,
+}
+
+func (this *UpgradeProposal) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*UpgradeProposal)
+ if !ok {
+ that2, ok := that.(UpgradeProposal)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.Title != that1.Title {
+ return false
+ }
+ if this.Description != that1.Description {
+ return false
+ }
+ if !this.Plan.Equal(&that1.Plan) {
+ return false
+ }
+ if !this.UpgradedClientState.Equal(that1.UpgradedClientState) {
+ return false
+ }
+ return true
}
-
func (m *IdentifiedClientState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -588,6 +676,65 @@ func (m *ClientUpdateProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *UpgradeProposal) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpgradeProposal) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpgradeProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UpgradedClientState != nil {
+ {
+ size, err := m.UpgradedClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ {
+ size, err := m.Plan.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintClient(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ if len(m.Description) > 0 {
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.Description)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Title) > 0 {
+ i -= len(m.Title)
+ copy(dAtA[i:], m.Title)
+ i = encodeVarintClient(dAtA, i, uint64(len(m.Title)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *Height) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -742,6 +889,29 @@ func (m *ClientUpdateProposal) Size() (n int) {
return n
}
+func (m *UpgradeProposal) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Title)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovClient(uint64(l))
+ }
+ l = m.Plan.Size()
+ n += 1 + l + sovClient(uint64(l))
+ if m.UpgradedClientState != nil {
+ l = m.UpgradedClientState.Size()
+ n += 1 + l + sovClient(uint64(l))
+ }
+ return n
+}
+
func (m *Height) Size() (n int) {
if m == nil {
return 0
@@ -1342,6 +1512,189 @@ func (m *ClientUpdateProposal) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *UpgradeProposal) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpgradeProposal: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpgradeProposal: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Title = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpgradedClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowClient
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthClient
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthClient
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpgradedClientState == nil {
+ m.UpgradedClientState = &types.Any{}
+ }
+ if err := m.UpgradedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipClient(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthClient
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *Height) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/core/02-client/types/codec.go b/core/02-client/types/codec.go
index 441846b0..ab378bfd 100644
--- a/core/02-client/types/codec.go
+++ b/core/02-client/types/codec.go
@@ -14,29 +14,30 @@ import (
// RegisterInterfaces registers the client interfaces to protobuf Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibc.core.client.v1.ClientState",
+ "ibcgo.core.client.v1.ClientState",
(*exported.ClientState)(nil),
)
registry.RegisterInterface(
- "ibc.core.client.v1.ConsensusState",
+ "ibcgo.core.client.v1.ConsensusState",
(*exported.ConsensusState)(nil),
)
registry.RegisterInterface(
- "ibc.core.client.v1.Header",
+ "ibcgo.core.client.v1.Header",
(*exported.Header)(nil),
)
registry.RegisterInterface(
- "ibc.core.client.v1.Height",
+ "ibcgo.core.client.v1.Height",
(*exported.Height)(nil),
&Height{},
)
registry.RegisterInterface(
- "ibc.core.client.v1.Misbehaviour",
+ "ibcgo.core.client.v1.Misbehaviour",
(*exported.Misbehaviour)(nil),
)
registry.RegisterImplementations(
(*govtypes.Content)(nil),
&ClientUpdateProposal{},
+ &UpgradeProposal{},
)
registry.RegisterImplementations(
(*sdk.Msg)(nil),
diff --git a/core/02-client/types/errors.go b/core/02-client/types/errors.go
index 5b44cd52..8a956f86 100644
--- a/core/02-client/types/errors.go
+++ b/core/02-client/types/errors.go
@@ -32,4 +32,5 @@ var (
ErrInvalidUpgradeClient = sdkerrors.Register(SubModuleName, 25, "invalid client upgrade")
ErrInvalidHeight = sdkerrors.Register(SubModuleName, 26, "invalid height")
ErrInvalidSubstitute = sdkerrors.Register(SubModuleName, 27, "invalid client state substitute")
+ ErrInvalidUpgradeProposal = sdkerrors.Register(SubModuleName, 28, "invalid upgrade proposal")
)
diff --git a/core/02-client/types/expected_keepers.go b/core/02-client/types/expected_keepers.go
index defc8150..ad007fb8 100644
--- a/core/02-client/types/expected_keepers.go
+++ b/core/02-client/types/expected_keepers.go
@@ -5,6 +5,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// StakingKeeper expected staking keeper
@@ -12,3 +13,13 @@ type StakingKeeper interface {
GetHistoricalInfo(ctx sdk.Context, height int64) (stakingtypes.HistoricalInfo, bool)
UnbondingTime(ctx sdk.Context) time.Duration
}
+
+// UpgradeKeeper expected upgrade keeper
+type UpgradeKeeper interface {
+ ClearIBCState(ctx sdk.Context, lastHeight int64)
+ GetUpgradePlan(ctx sdk.Context) (plan upgradetypes.Plan, havePlan bool)
+ GetUpgradedClient(ctx sdk.Context, height int64) ([]byte, bool)
+ SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error
+ SetUpgradedClient(ctx sdk.Context, planHeight int64, bz []byte) error
+ ScheduleUpgrade(ctx sdk.Context, plan upgradetypes.Plan) error
+}
diff --git a/core/02-client/types/params.go b/core/02-client/types/params.go
index a652aa1a..7a21ad75 100644
--- a/core/02-client/types/params.go
+++ b/core/02-client/types/params.go
@@ -4,8 +4,8 @@ import (
"fmt"
"strings"
- "github.com/cosmos/ibc-go/core/exported"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
var (
diff --git a/core/02-client/types/proposal.go b/core/02-client/types/proposal.go
index 95b10aaf..3141402b 100644
--- a/core/02-client/types/proposal.go
+++ b/core/02-client/types/proposal.go
@@ -1,23 +1,34 @@
package types
import (
+ "fmt"
+
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+ "github.com/cosmos/ibc-go/core/exported"
)
const (
// ProposalTypeClientUpdate defines the type for a ClientUpdateProposal
ProposalTypeClientUpdate = "ClientUpdate"
+ ProposalTypeUpgrade = "IBCUpgrade"
)
-var _ govtypes.Content = &ClientUpdateProposal{}
+var (
+ _ govtypes.Content = &ClientUpdateProposal{}
+ _ govtypes.Content = &UpgradeProposal{}
+ _ codectypes.UnpackInterfacesMessage = &UpgradeProposal{}
+)
func init() {
govtypes.RegisterProposalType(ProposalTypeClientUpdate)
+ govtypes.RegisterProposalType(ProposalTypeUpgrade)
}
// NewClientUpdateProposal creates a new client update proposal.
-func NewClientUpdateProposal(title, description, subjectClientID, substituteClientID string, initialHeight Height) *ClientUpdateProposal {
+func NewClientUpdateProposal(title, description, subjectClientID, substituteClientID string, initialHeight Height) govtypes.Content {
return &ClientUpdateProposal{
Title: title,
Description: description,
@@ -62,3 +73,82 @@ func (cup *ClientUpdateProposal) ValidateBasic() error {
return nil
}
+
+// NewUpgradeProposal creates a new IBC breaking upgrade proposal.
+func NewUpgradeProposal(title, description string, plan upgradetypes.Plan, upgradedClientState exported.ClientState) (govtypes.Content, error) {
+ any, err := PackClientState(upgradedClientState)
+ if err != nil {
+ return nil, err
+ }
+
+ return &UpgradeProposal{
+ Title: title,
+ Description: description,
+ Plan: plan,
+ UpgradedClientState: any,
+ }, nil
+}
+
+// GetTitle returns the title of a upgrade proposal.
+func (up *UpgradeProposal) GetTitle() string { return up.Title }
+
+// GetDescription returns the description of a upgrade proposal.
+func (up *UpgradeProposal) GetDescription() string { return up.Description }
+
+// ProposalRoute returns the routing key of a upgrade proposal.
+func (up *UpgradeProposal) ProposalRoute() string { return RouterKey }
+
+// ProposalType returns the upgrade proposal type.
+func (up *UpgradeProposal) ProposalType() string { return ProposalTypeUpgrade }
+
+// ValidateBasic runs basic stateless validity checks
+func (up *UpgradeProposal) ValidateBasic() error {
+ if err := govtypes.ValidateAbstract(up); err != nil {
+ return err
+ }
+
+ if err := up.Plan.ValidateBasic(); err != nil {
+ return err
+ }
+
+ if up.Plan.Time.Unix() > 0 {
+ return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "IBC chain upgrades must only set height")
+ }
+
+ if up.Plan.Height <= 0 {
+ return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "IBC chain upgrades must set a positive height")
+ }
+
+ if up.UpgradedClientState == nil {
+ return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "upgraded client state cannot be nil")
+ }
+
+ _, err := UnpackClientState(up.UpgradedClientState)
+ if err != nil {
+ return sdkerrors.Wrap(err, "failed to unpack upgraded client state")
+ }
+
+ return nil
+}
+
+// String returns the string representation of the UpgradeProposal.
+func (up UpgradeProposal) String() string {
+ var upgradedClientStr string
+ upgradedClient, err := UnpackClientState(up.UpgradedClientState)
+ if err != nil {
+ upgradedClientStr = "invalid IBC Client State"
+ } else {
+ upgradedClientStr = upgradedClient.String()
+ }
+
+ return fmt.Sprintf(`IBC Upgrade Proposal
+ Title: %s
+ Description: %s
+ %s
+ Upgraded IBC Client: %s`, up.Title, up.Description, up.Plan, upgradedClientStr)
+}
+
+// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
+func (up UpgradeProposal) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(up.UpgradedClientState, new(exported.ClientState))
+}
diff --git a/core/02-client/types/proposal_test.go b/core/02-client/types/proposal_test.go
index f53d891b..5c6bfff8 100644
--- a/core/02-client/types/proposal_test.go
+++ b/core/02-client/types/proposal_test.go
@@ -1,11 +1,16 @@
package types_test
import (
+ "fmt"
+ "time"
+
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -76,7 +81,8 @@ func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() {
cdc := codec.NewProtoCodec(ir)
// marshal message
- bz, err := cdc.MarshalJSON(proposal)
+ content := proposal.(*types.ClientUpdateProposal)
+ bz, err := cdc.MarshalJSON(content)
suite.Require().NoError(err)
// unmarshal proposal
@@ -84,3 +90,145 @@ func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() {
err = cdc.UnmarshalJSON(bz, newProposal)
suite.Require().NoError(err)
}
+
+func (suite *TypesTestSuite) TestUpgradeProposalValidateBasic() {
+ var (
+ proposal govtypes.Content
+ err error
+ )
+
+ client, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ cs := suite.chainA.GetClientState(client)
+ plan := upgradetypes.Plan{
+ Name: "ibc upgrade",
+ Height: 1000,
+ }
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "success", func() {
+ proposal, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, cs)
+ suite.Require().NoError(err)
+ }, true,
+ },
+ {
+ "fails validate abstract - empty title", func() {
+ proposal, err = types.NewUpgradeProposal("", ibctesting.Description, plan, cs)
+ suite.Require().NoError(err)
+
+ }, false,
+ },
+ {
+ "fails plan validate basic, height and time is 0", func() {
+ invalidPlan := upgradetypes.Plan{Name: "ibc upgrade"}
+ proposal, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, invalidPlan, cs)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "plan height is zero", func() {
+ invalidPlan := upgradetypes.Plan{Name: "ibc upgrade", Height: 0}
+ proposal, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, invalidPlan, cs)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "plan time is not set to 0", func() {
+ invalidPlan := upgradetypes.Plan{Name: "ibc upgrade", Time: time.Now()}
+ proposal, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, invalidPlan, cs)
+ suite.Require().NoError(err)
+ }, false,
+ },
+ {
+ "client state is nil", func() {
+ proposal = &types.UpgradeProposal{
+ Title: ibctesting.Title,
+ Description: ibctesting.Description,
+ Plan: plan,
+ UpgradedClientState: nil,
+ }
+ }, false,
+ },
+ {
+ "failed to unpack client state", func() {
+ any, err := types.PackConsensusState(&ibctmtypes.ConsensusState{})
+ suite.Require().NoError(err)
+
+ proposal = &types.UpgradeProposal{
+ Title: ibctesting.Title,
+ Description: ibctesting.Description,
+ Plan: plan,
+ UpgradedClientState: any,
+ }
+ }, false,
+ },
+ }
+
+ for _, tc := range testCases {
+
+ tc.malleate()
+
+ err := proposal.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err, tc.name)
+ } else {
+ suite.Require().Error(err, tc.name)
+ }
+ }
+}
+
+// tests an upgrade proposal can be marshaled and unmarshaled, and the
+// client state can be unpacked
+func (suite *TypesTestSuite) TestMarshalUpgradeProposal() {
+ // create proposal
+ plan := upgradetypes.Plan{
+ Name: "upgrade ibc",
+ Height: 1000,
+ }
+ content, err := types.NewUpgradeProposal("title", "description", plan, &ibctmtypes.ClientState{})
+ suite.Require().NoError(err)
+
+ up, ok := content.(*types.UpgradeProposal)
+ suite.Require().True(ok)
+
+ // create codec
+ ir := codectypes.NewInterfaceRegistry()
+ types.RegisterInterfaces(ir)
+ govtypes.RegisterInterfaces(ir)
+ ibctmtypes.RegisterInterfaces(ir)
+ cdc := codec.NewProtoCodec(ir)
+
+ // marshal message
+ bz, err := cdc.MarshalJSON(up)
+ suite.Require().NoError(err)
+
+ // unmarshal proposal
+ newUp := &types.UpgradeProposal{}
+ err = cdc.UnmarshalJSON(bz, newUp)
+ suite.Require().NoError(err)
+
+ // unpack client state
+ _, err = types.UnpackClientState(newUp.UpgradedClientState)
+ suite.Require().NoError(err)
+
+}
+
+func (suite *TypesTestSuite) TestUpgradeString() {
+ plan := upgradetypes.Plan{
+ Name: "ibc upgrade",
+ Info: "https://foo.bar/baz",
+ Height: 1000,
+ }
+
+ proposal, err := types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, &ibctmtypes.ClientState{})
+ suite.Require().NoError(err)
+
+ expect := fmt.Sprintf("IBC Upgrade Proposal\n Title: title\n Description: description\n Upgrade Plan\n Name: ibc upgrade\n Height: 1000\n Info: https://foo.bar/baz.\n Upgraded IBC Client: %s", &ibctmtypes.ClientState{})
+
+ suite.Require().Equal(expect, proposal.String())
+}
diff --git a/core/02-client/types/query.pb.go b/core/02-client/types/query.pb.go
index bf74f2eb..f586604e 100644
--- a/core/02-client/types/query.pb.go
+++ b/core/02-client/types/query.pb.go
@@ -585,6 +585,110 @@ func (m *QueryClientParamsResponse) GetParams() *Params {
return nil
}
+// QueryUpgradedClientStateRequest is the request type for the
+// Query/UpgradedClientState RPC method
+type QueryUpgradedClientStateRequest struct {
+ // client state unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+ // plan height of the current chain must be sent in request
+ // as this is the height under which upgraded client state is stored
+ PlanHeight int64 `protobuf:"varint,2,opt,name=plan_height,json=planHeight,proto3" json:"plan_height,omitempty"`
+}
+
+func (m *QueryUpgradedClientStateRequest) Reset() { *m = QueryUpgradedClientStateRequest{} }
+func (m *QueryUpgradedClientStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryUpgradedClientStateRequest) ProtoMessage() {}
+func (*QueryUpgradedClientStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{10}
+}
+func (m *QueryUpgradedClientStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUpgradedClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUpgradedClientStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUpgradedClientStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUpgradedClientStateRequest.Merge(m, src)
+}
+func (m *QueryUpgradedClientStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUpgradedClientStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUpgradedClientStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUpgradedClientStateRequest proto.InternalMessageInfo
+
+func (m *QueryUpgradedClientStateRequest) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+func (m *QueryUpgradedClientStateRequest) GetPlanHeight() int64 {
+ if m != nil {
+ return m.PlanHeight
+ }
+ return 0
+}
+
+// QueryUpgradedClientStateResponse is the response type for the
+// Query/UpgradedClientState RPC method.
+type QueryUpgradedClientStateResponse struct {
+ // client state associated with the request identifier
+ UpgradedClientState *types.Any `protobuf:"bytes,1,opt,name=upgraded_client_state,json=upgradedClientState,proto3" json:"upgraded_client_state,omitempty"`
+}
+
+func (m *QueryUpgradedClientStateResponse) Reset() { *m = QueryUpgradedClientStateResponse{} }
+func (m *QueryUpgradedClientStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryUpgradedClientStateResponse) ProtoMessage() {}
+func (*QueryUpgradedClientStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_833c7bc6da1addd1, []int{11}
+}
+func (m *QueryUpgradedClientStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUpgradedClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUpgradedClientStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUpgradedClientStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUpgradedClientStateResponse.Merge(m, src)
+}
+func (m *QueryUpgradedClientStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUpgradedClientStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUpgradedClientStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUpgradedClientStateResponse proto.InternalMessageInfo
+
+func (m *QueryUpgradedClientStateResponse) GetUpgradedClientState() *types.Any {
+ if m != nil {
+ return m.UpgradedClientState
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*QueryClientStateRequest)(nil), "ibcgo.core.client.v1.QueryClientStateRequest")
proto.RegisterType((*QueryClientStateResponse)(nil), "ibcgo.core.client.v1.QueryClientStateResponse")
@@ -596,64 +700,71 @@ func init() {
proto.RegisterType((*QueryConsensusStatesResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStatesResponse")
proto.RegisterType((*QueryClientParamsRequest)(nil), "ibcgo.core.client.v1.QueryClientParamsRequest")
proto.RegisterType((*QueryClientParamsResponse)(nil), "ibcgo.core.client.v1.QueryClientParamsResponse")
+ proto.RegisterType((*QueryUpgradedClientStateRequest)(nil), "ibcgo.core.client.v1.QueryUpgradedClientStateRequest")
+ proto.RegisterType((*QueryUpgradedClientStateResponse)(nil), "ibcgo.core.client.v1.QueryUpgradedClientStateResponse")
}
func init() { proto.RegisterFile("ibcgo/core/client/v1/query.proto", fileDescriptor_833c7bc6da1addd1) }
var fileDescriptor_833c7bc6da1addd1 = []byte{
- // 817 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x4f, 0x1b, 0x47,
- 0x1c, 0xf7, 0xf0, 0x12, 0x8c, 0x0d, 0xae, 0x46, 0x6e, 0x31, 0x0b, 0x35, 0xc6, 0x48, 0xc5, 0x6d,
- 0xe5, 0x19, 0xec, 0x3e, 0x2f, 0x3d, 0x94, 0xaa, 0x50, 0x2e, 0x15, 0x6c, 0x0f, 0x95, 0x7a, 0xb1,
- 0x76, 0xd7, 0xe3, 0xf5, 0x4a, 0xf6, 0xce, 0xe2, 0x59, 0x5b, 0x42, 0x88, 0x0b, 0x1f, 0xa0, 0x8a,
- 0x94, 0x5b, 0xae, 0xb9, 0xe5, 0x84, 0x72, 0xcb, 0x37, 0xe0, 0x88, 0x14, 0x29, 0xca, 0x29, 0x89,
- 0x70, 0x3e, 0x43, 0xce, 0xd1, 0xce, 0xcc, 0x9a, 0x5d, 0xb3, 0x84, 0x25, 0x4a, 0x6e, 0xe3, 0xff,
- 0xf3, 0xf7, 0xfb, 0xbf, 0xbc, 0xb0, 0xec, 0x98, 0x96, 0xcd, 0x88, 0xc5, 0xfa, 0x94, 0x58, 0x5d,
- 0x87, 0xba, 0x3e, 0x19, 0xd6, 0xc9, 0xd1, 0x80, 0xf6, 0x8f, 0xb1, 0xd7, 0x67, 0x3e, 0x43, 0x05,
- 0x61, 0x81, 0x03, 0x0b, 0x2c, 0x2d, 0xf0, 0xb0, 0xae, 0x7d, 0x67, 0x31, 0xde, 0x63, 0x9c, 0x98,
- 0x06, 0xa7, 0xd2, 0x9c, 0x0c, 0xeb, 0x26, 0xf5, 0x8d, 0x3a, 0xf1, 0x0c, 0xdb, 0x71, 0x0d, 0xdf,
- 0x61, 0xae, 0x8c, 0xa0, 0x6d, 0x24, 0xe6, 0x50, 0xb1, 0xa4, 0xc9, 0x8a, 0xcd, 0x98, 0xdd, 0xa5,
- 0x44, 0xfc, 0x32, 0x07, 0x6d, 0x62, 0xb8, 0x2a, 0xbf, 0xb6, 0xa6, 0x54, 0x86, 0xe7, 0x10, 0xc3,
- 0x75, 0x99, 0x2f, 0x42, 0x73, 0xa5, 0x2d, 0xd8, 0xcc, 0x66, 0xe2, 0x49, 0x82, 0x97, 0x94, 0x56,
- 0x7e, 0x86, 0xcb, 0x87, 0x01, 0xa6, 0x3f, 0x44, 0x8e, 0x7f, 0x7c, 0xc3, 0xa7, 0x3a, 0x3d, 0x1a,
- 0x50, 0xee, 0xa3, 0x55, 0xb8, 0x20, 0x33, 0x37, 0x9d, 0x56, 0x11, 0x94, 0x41, 0x75, 0x41, 0x9f,
- 0x97, 0x82, 0xfd, 0x56, 0xe5, 0x1c, 0xc0, 0xe2, 0x4d, 0x47, 0xee, 0x31, 0x97, 0x53, 0xf4, 0x0b,
- 0xcc, 0x29, 0x4f, 0x1e, 0xc8, 0x85, 0x73, 0xb6, 0x51, 0xc0, 0x12, 0x1f, 0x0e, 0xa1, 0xe3, 0xdf,
- 0xdd, 0x63, 0x3d, 0x6b, 0x5d, 0x07, 0x40, 0x05, 0x38, 0xeb, 0xf5, 0x19, 0x6b, 0x17, 0xa7, 0xca,
- 0xa0, 0x9a, 0xd3, 0xe5, 0x0f, 0xf4, 0x27, 0xcc, 0x89, 0x47, 0xb3, 0x43, 0x1d, 0xbb, 0xe3, 0x17,
- 0xa7, 0x45, 0xb8, 0x35, 0x9c, 0x54, 0x6e, 0xfc, 0x97, 0xb0, 0xd9, 0x99, 0xb9, 0x78, 0xb5, 0x9e,
- 0xd1, 0xb3, 0xc2, 0x4f, 0x8a, 0x2a, 0xe6, 0x4d, 0xc4, 0x3c, 0xe4, 0xba, 0x0b, 0xe1, 0x75, 0x33,
- 0x14, 0xde, 0x6f, 0xb0, 0xec, 0x1c, 0x0e, 0x3a, 0x87, 0x65, 0xa3, 0x55, 0xe7, 0xf0, 0x81, 0x61,
- 0x87, 0x75, 0xd2, 0x23, 0x9e, 0x95, 0x17, 0x00, 0xae, 0x24, 0x24, 0x51, 0x75, 0xf1, 0xe0, 0x62,
- 0xb4, 0x2e, 0xbc, 0x08, 0xca, 0xd3, 0xd5, 0x6c, 0xe3, 0xfb, 0x64, 0x26, 0xfb, 0x2d, 0xea, 0xfa,
- 0x4e, 0xdb, 0xa1, 0xad, 0x48, 0xb0, 0x9d, 0x52, 0x40, 0xec, 0xc9, 0xeb, 0xf5, 0xaf, 0x12, 0xd5,
- 0x5c, 0xcf, 0x45, 0xea, 0xc9, 0xd1, 0x5e, 0x8c, 0xd7, 0x94, 0xe0, 0xb5, 0x75, 0x27, 0x2f, 0x09,
- 0x37, 0x46, 0xec, 0x1c, 0x40, 0x4d, 0x12, 0x0b, 0x54, 0x2e, 0x1f, 0xf0, 0xd4, 0xb3, 0x82, 0xb6,
- 0x60, 0xbe, 0x4f, 0x87, 0x0e, 0x77, 0x98, 0xdb, 0x74, 0x07, 0x3d, 0x93, 0xf6, 0x05, 0x92, 0x19,
- 0x7d, 0x29, 0x14, 0xff, 0x2d, 0xa4, 0x31, 0xc3, 0x48, 0xaf, 0x23, 0x86, 0xb2, 0x95, 0x68, 0x13,
- 0x2e, 0x76, 0x03, 0x7e, 0x7e, 0x68, 0x36, 0x53, 0x06, 0xd5, 0x79, 0x3d, 0x27, 0x85, 0xaa, 0xdf,
- 0xcf, 0x00, 0x5c, 0x4d, 0x84, 0xac, 0xba, 0xf1, 0x1b, 0xcc, 0x5b, 0xa1, 0x26, 0xc5, 0xa0, 0x2e,
- 0x59, 0xb1, 0x30, 0x9f, 0x77, 0x56, 0xcf, 0x92, 0xb1, 0xf3, 0x54, 0xf5, 0xde, 0x4d, 0x68, 0xfa,
- 0xc7, 0x0c, 0xf3, 0x05, 0x80, 0x6b, 0xc9, 0x20, 0x54, 0x05, 0x9b, 0xf0, 0x8b, 0x89, 0x0a, 0x86,
- 0x23, 0x8d, 0x93, 0x09, 0xc7, 0x03, 0xfd, 0xeb, 0xf8, 0x9d, 0x58, 0x09, 0xf2, 0xf1, 0x12, 0x7f,
- 0xc2, 0xf1, 0xd5, 0x62, 0xbb, 0x7f, 0x60, 0xf4, 0x8d, 0x5e, 0x58, 0xcb, 0xca, 0x61, 0x6c, 0x65,
- 0x43, 0x9d, 0xa2, 0xf8, 0x23, 0x9c, 0xf3, 0x84, 0x44, 0xcd, 0xc6, 0x2d, 0x9d, 0x54, 0x5e, 0xca,
- 0xb6, 0xf1, 0x6e, 0x0e, 0xce, 0x8a, 0x98, 0xe8, 0x31, 0x80, 0xd9, 0xc8, 0x7e, 0xa2, 0x5a, 0xb2,
- 0xff, 0x2d, 0x37, 0x58, 0xc3, 0x69, 0xcd, 0x25, 0xdc, 0xca, 0x4f, 0x67, 0xcf, 0xdf, 0x3e, 0x9c,
- 0x22, 0xa8, 0x46, 0x1c, 0xd3, 0x4a, 0xfe, 0x1f, 0x51, 0x8d, 0x22, 0x27, 0xe3, 0x01, 0x3a, 0x45,
- 0x8f, 0x00, 0xcc, 0x45, 0xaf, 0x08, 0x4a, 0x99, 0x37, 0xac, 0xa1, 0x46, 0x52, 0xdb, 0x2b, 0xa0,
- 0xdf, 0x0a, 0xa0, 0x9b, 0x68, 0xe3, 0x4e, 0xa0, 0x68, 0x04, 0xe0, 0x52, 0x7c, 0x70, 0xd0, 0xf6,
- 0x87, 0xd2, 0x25, 0x1d, 0x28, 0xad, 0x7e, 0x0f, 0x0f, 0x05, 0xb1, 0x2b, 0x20, 0xb6, 0x51, 0x2b,
- 0x11, 0xe2, 0xc4, 0xdc, 0x47, 0xcb, 0x49, 0xc2, 0x6b, 0x45, 0x4e, 0x26, 0xee, 0xde, 0x29, 0x91,
- 0x67, 0x21, 0xa2, 0x90, 0x82, 0x53, 0xf4, 0x14, 0xc0, 0xfc, 0xc4, 0x9e, 0xa1, 0xf4, 0xa0, 0xc7,
- 0x8d, 0x68, 0xdc, 0xc7, 0x45, 0x11, 0xfd, 0x55, 0x10, 0x6d, 0xa0, 0xed, 0xfb, 0x12, 0x45, 0xff,
- 0x8f, 0xe7, 0x46, 0x2e, 0x40, 0x8a, 0xb9, 0x89, 0xed, 0x5e, 0x8a, 0xb9, 0x89, 0xef, 0x63, 0xe5,
- 0x6b, 0x81, 0x75, 0x19, 0x7d, 0x29, 0xb1, 0x8e, 0x61, 0xca, 0xc5, 0xdb, 0xd9, 0xbb, 0xb8, 0x2a,
- 0x81, 0xcb, 0xab, 0x12, 0x78, 0x73, 0x55, 0x02, 0x0f, 0x46, 0xa5, 0xcc, 0xe5, 0xa8, 0x94, 0x79,
- 0x39, 0x2a, 0x65, 0xfe, 0xab, 0xd9, 0x8e, 0xdf, 0x19, 0x98, 0xd8, 0x62, 0x3d, 0xa2, 0xbe, 0xc8,
- 0x1c, 0xd3, 0xaa, 0x85, 0x5f, 0x5b, 0xdb, 0x8d, 0x9a, 0x0a, 0xe6, 0x1f, 0x7b, 0x94, 0x9b, 0x73,
- 0xe2, 0xf6, 0xff, 0xf0, 0x3e, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x73, 0xec, 0x7c, 0xf6, 0x09, 0x00,
- 0x00,
+ // 904 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x8f, 0xdb, 0x44,
+ 0x18, 0xde, 0xd9, 0xdd, 0x56, 0xdb, 0x49, 0xba, 0x41, 0xd3, 0x94, 0xa6, 0xee, 0x92, 0xa4, 0xae,
+ 0x44, 0x03, 0x28, 0x33, 0x9b, 0x40, 0x97, 0x5e, 0x40, 0x62, 0x11, 0xfd, 0xb8, 0xa0, 0xd6, 0x08,
+ 0x21, 0x71, 0x89, 0x6c, 0x67, 0xe2, 0x58, 0x4a, 0x3c, 0xae, 0xc7, 0x8e, 0xb4, 0xaa, 0xf6, 0xd2,
+ 0x1f, 0x80, 0x90, 0xb8, 0x71, 0xe5, 0xc6, 0xa9, 0xe2, 0xc6, 0x81, 0x1b, 0x87, 0x3d, 0x56, 0x42,
+ 0x42, 0x9c, 0x00, 0x6d, 0xf8, 0x21, 0xc8, 0x33, 0xe3, 0xd4, 0xce, 0x4e, 0xba, 0x0e, 0xa2, 0x37,
+ 0xfb, 0xfd, 0x7c, 0xde, 0xe7, 0xfd, 0xb0, 0x61, 0xdb, 0x77, 0x5c, 0x8f, 0x11, 0x97, 0x45, 0x94,
+ 0xb8, 0x13, 0x9f, 0x06, 0x31, 0x99, 0xf5, 0xc8, 0x93, 0x84, 0x46, 0x47, 0x38, 0x8c, 0x58, 0xcc,
+ 0x50, 0x5d, 0x58, 0xe0, 0xd4, 0x02, 0x4b, 0x0b, 0x3c, 0xeb, 0x19, 0xef, 0xba, 0x8c, 0x4f, 0x19,
+ 0x27, 0x8e, 0xcd, 0xa9, 0x34, 0x27, 0xb3, 0x9e, 0x43, 0x63, 0xbb, 0x47, 0x42, 0xdb, 0xf3, 0x03,
+ 0x3b, 0xf6, 0x59, 0x20, 0x23, 0x18, 0x37, 0xb5, 0x39, 0x54, 0x2c, 0x69, 0x72, 0xdd, 0x63, 0xcc,
+ 0x9b, 0x50, 0x22, 0xde, 0x9c, 0x64, 0x44, 0xec, 0x40, 0xe5, 0x37, 0xf6, 0x94, 0xca, 0x0e, 0x7d,
+ 0x62, 0x07, 0x01, 0x8b, 0x45, 0x68, 0xae, 0xb4, 0x75, 0x8f, 0x79, 0x4c, 0x3c, 0x92, 0xf4, 0x49,
+ 0x4a, 0xcd, 0x03, 0x78, 0xed, 0x71, 0x8a, 0xe9, 0x53, 0x91, 0xe3, 0x8b, 0xd8, 0x8e, 0xa9, 0x45,
+ 0x9f, 0x24, 0x94, 0xc7, 0xe8, 0x06, 0xbc, 0x24, 0x33, 0x0f, 0xfc, 0x61, 0x03, 0xb4, 0x41, 0xe7,
+ 0x92, 0xb5, 0x23, 0x05, 0x0f, 0x87, 0xe6, 0x73, 0x00, 0x1b, 0x67, 0x1d, 0x79, 0xc8, 0x02, 0x4e,
+ 0xd1, 0x87, 0xb0, 0xaa, 0x3c, 0x79, 0x2a, 0x17, 0xce, 0x95, 0x7e, 0x1d, 0x4b, 0x7c, 0x38, 0x83,
+ 0x8e, 0x3f, 0x09, 0x8e, 0xac, 0x8a, 0xfb, 0x32, 0x00, 0xaa, 0xc3, 0x0b, 0x61, 0xc4, 0xd8, 0xa8,
+ 0xb1, 0xd9, 0x06, 0x9d, 0xaa, 0x25, 0x5f, 0xd0, 0x67, 0xb0, 0x2a, 0x1e, 0x06, 0x63, 0xea, 0x7b,
+ 0xe3, 0xb8, 0xb1, 0x25, 0xc2, 0xed, 0x61, 0x1d, 0xdd, 0xf8, 0x81, 0xb0, 0x39, 0xdc, 0x3e, 0xf9,
+ 0xb3, 0xb5, 0x61, 0x55, 0x84, 0x9f, 0x14, 0x99, 0xce, 0x59, 0xc4, 0x3c, 0xab, 0xf5, 0x1e, 0x84,
+ 0x2f, 0x9b, 0xa1, 0xf0, 0xbe, 0x8d, 0x65, 0xe7, 0x70, 0xda, 0x39, 0x2c, 0x1b, 0xad, 0x3a, 0x87,
+ 0x1f, 0xd9, 0x5e, 0xc6, 0x93, 0x95, 0xf3, 0x34, 0x7f, 0x07, 0xf0, 0xba, 0x26, 0x89, 0xe2, 0x25,
+ 0x84, 0x97, 0xf3, 0xbc, 0xf0, 0x06, 0x68, 0x6f, 0x75, 0x2a, 0xfd, 0xf7, 0xf4, 0x95, 0x3c, 0x1c,
+ 0xd2, 0x20, 0xf6, 0x47, 0x3e, 0x1d, 0xe6, 0x82, 0x1d, 0x36, 0xd3, 0xc2, 0x7e, 0xfc, 0xab, 0xf5,
+ 0xa6, 0x56, 0xcd, 0xad, 0x6a, 0x8e, 0x4f, 0x8e, 0xee, 0x17, 0xea, 0xda, 0x14, 0x75, 0xdd, 0x3e,
+ 0xb7, 0x2e, 0x09, 0xb7, 0x50, 0xd8, 0x73, 0x00, 0x0d, 0x59, 0x58, 0xaa, 0x0a, 0x78, 0xc2, 0x4b,
+ 0xcf, 0x0a, 0xba, 0x0d, 0x6b, 0x11, 0x9d, 0xf9, 0xdc, 0x67, 0xc1, 0x20, 0x48, 0xa6, 0x0e, 0x8d,
+ 0x04, 0x92, 0x6d, 0x6b, 0x37, 0x13, 0x7f, 0x2e, 0xa4, 0x05, 0xc3, 0x5c, 0xaf, 0x73, 0x86, 0xb2,
+ 0x95, 0xe8, 0x16, 0xbc, 0x3c, 0x49, 0xeb, 0x8b, 0x33, 0xb3, 0xed, 0x36, 0xe8, 0xec, 0x58, 0x55,
+ 0x29, 0x54, 0xfd, 0xfe, 0x19, 0xc0, 0x1b, 0x5a, 0xc8, 0xaa, 0x1b, 0x1f, 0xc1, 0x9a, 0x9b, 0x69,
+ 0x4a, 0x0c, 0xea, 0xae, 0x5b, 0x08, 0xf3, 0x7a, 0x67, 0xf5, 0x99, 0x1e, 0x3b, 0x2f, 0xc5, 0xf7,
+ 0x3d, 0x4d, 0xd3, 0xff, 0xcb, 0x30, 0x9f, 0x00, 0xb8, 0xa7, 0x07, 0xa1, 0x18, 0x1c, 0xc0, 0x37,
+ 0x96, 0x18, 0xcc, 0x46, 0x1a, 0xeb, 0x0b, 0x2e, 0x06, 0xfa, 0xca, 0x8f, 0xc7, 0x05, 0x0a, 0x6a,
+ 0x45, 0x8a, 0xff, 0xc7, 0xf1, 0x35, 0x0a, 0xbb, 0xff, 0xc8, 0x8e, 0xec, 0x69, 0xc6, 0xa5, 0xf9,
+ 0xb8, 0xb0, 0xb2, 0x99, 0x4e, 0x95, 0xf8, 0x01, 0xbc, 0x18, 0x0a, 0x89, 0x9a, 0x8d, 0x15, 0x9d,
+ 0x54, 0x5e, 0xca, 0xd6, 0x1c, 0xc0, 0x96, 0x08, 0xf9, 0x65, 0xe8, 0x45, 0xf6, 0xb0, 0xb0, 0xa1,
+ 0xa5, 0x3a, 0xd8, 0x82, 0x95, 0x70, 0x62, 0x2f, 0x96, 0x20, 0x2d, 0x7c, 0xcb, 0x82, 0xa9, 0x48,
+ 0xcd, 0xc7, 0x04, 0xb6, 0x57, 0x27, 0x50, 0xd0, 0x1f, 0xc0, 0xab, 0x89, 0x52, 0x0f, 0x4a, 0x9f,
+ 0xe3, 0x2b, 0xc9, 0xd9, 0x88, 0xfd, 0x5f, 0x76, 0xe0, 0x05, 0x91, 0x0e, 0xfd, 0x00, 0x60, 0x25,
+ 0xa7, 0x41, 0x5d, 0x3d, 0x1d, 0x2b, 0x3e, 0x29, 0x06, 0x2e, 0x6b, 0x2e, 0x4b, 0x30, 0xef, 0x3c,
+ 0xfb, 0xed, 0x9f, 0xef, 0x36, 0x09, 0xea, 0x12, 0xdf, 0x71, 0xf5, 0x9f, 0x45, 0x35, 0x77, 0xe4,
+ 0xe9, 0x82, 0xcd, 0x63, 0xf4, 0x3d, 0x80, 0xd5, 0xfc, 0x51, 0x44, 0x25, 0xf3, 0x66, 0x23, 0x61,
+ 0x90, 0xd2, 0xf6, 0x0a, 0xe8, 0x3b, 0x02, 0xe8, 0x2d, 0x74, 0xf3, 0x5c, 0xa0, 0x68, 0x0e, 0xe0,
+ 0x6e, 0x71, 0x0f, 0xd0, 0xfe, 0xab, 0xd2, 0xe9, 0xee, 0xad, 0xd1, 0x5b, 0xc3, 0x43, 0x41, 0x9c,
+ 0x08, 0x88, 0x23, 0x34, 0xd4, 0x42, 0x5c, 0x5a, 0xe3, 0x3c, 0x9d, 0x24, 0x3b, 0xbe, 0xe4, 0xe9,
+ 0xd2, 0x19, 0x3f, 0x26, 0x72, 0x40, 0x73, 0x0a, 0x29, 0x38, 0x46, 0x3f, 0x01, 0x58, 0x5b, 0x3a,
+ 0x1b, 0xa8, 0x3c, 0xe8, 0x45, 0x23, 0xfa, 0xeb, 0xb8, 0xa8, 0x42, 0xef, 0x8a, 0x42, 0xfb, 0x68,
+ 0x7f, 0xdd, 0x42, 0xd1, 0x37, 0x8b, 0xb9, 0x91, 0xfb, 0x5c, 0x62, 0x6e, 0x0a, 0xa7, 0xa4, 0xc4,
+ 0xdc, 0x14, 0xcf, 0x8b, 0xf9, 0x96, 0xc0, 0x7a, 0x0d, 0x5d, 0x95, 0x58, 0x17, 0x30, 0xe5, 0x1d,
+ 0x41, 0xbf, 0x02, 0x78, 0x45, 0xb3, 0xe2, 0xe8, 0xce, 0x2b, 0xf2, 0xac, 0xbe, 0x39, 0xc6, 0xc1,
+ 0xba, 0x6e, 0x0a, 0xe5, 0xc7, 0x02, 0xe5, 0x5d, 0x74, 0xa0, 0x63, 0x54, 0x7b, 0x63, 0x0a, 0xbc,
+ 0x1e, 0xde, 0x3f, 0x39, 0x6d, 0x82, 0x17, 0xa7, 0x4d, 0xf0, 0xf7, 0x69, 0x13, 0x7c, 0x3b, 0x6f,
+ 0x6e, 0xbc, 0x98, 0x37, 0x37, 0xfe, 0x98, 0x37, 0x37, 0xbe, 0xee, 0x7a, 0x7e, 0x3c, 0x4e, 0x1c,
+ 0xec, 0xb2, 0x29, 0x51, 0xff, 0xc9, 0xbe, 0xe3, 0x76, 0xb3, 0x7f, 0xe0, 0xfd, 0x7e, 0x57, 0x25,
+ 0x8a, 0x8f, 0x42, 0xca, 0x9d, 0x8b, 0xe2, 0x56, 0xbd, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xa1, 0x2d, 0x40, 0x65, 0x8c, 0x0b, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -680,6 +791,8 @@ type QueryClient interface {
ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error)
// ClientParams queries all parameters of the ibc client.
ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error)
+ // UpgradedClientState queries an Upgraded IBC light client.
+ UpgradedClientState(ctx context.Context, in *QueryUpgradedClientStateRequest, opts ...grpc.CallOption) (*QueryUpgradedClientStateResponse, error)
}
type queryClient struct {
@@ -735,6 +848,15 @@ func (c *queryClient) ClientParams(ctx context.Context, in *QueryClientParamsReq
return out, nil
}
+func (c *queryClient) UpgradedClientState(ctx context.Context, in *QueryUpgradedClientStateRequest, opts ...grpc.CallOption) (*QueryUpgradedClientStateResponse, error) {
+ out := new(QueryUpgradedClientStateResponse)
+ err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/UpgradedClientState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// QueryServer is the server API for Query service.
type QueryServer interface {
// ClientState queries an IBC light client.
@@ -749,6 +871,8 @@ type QueryServer interface {
ConsensusStates(context.Context, *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error)
// ClientParams queries all parameters of the ibc client.
ClientParams(context.Context, *QueryClientParamsRequest) (*QueryClientParamsResponse, error)
+ // UpgradedClientState queries an Upgraded IBC light client.
+ UpgradedClientState(context.Context, *QueryUpgradedClientStateRequest) (*QueryUpgradedClientStateResponse, error)
}
// UnimplementedQueryServer can be embedded to have forward compatible implementations.
@@ -770,6 +894,9 @@ func (*UnimplementedQueryServer) ConsensusStates(ctx context.Context, req *Query
func (*UnimplementedQueryServer) ClientParams(ctx context.Context, req *QueryClientParamsRequest) (*QueryClientParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ClientParams not implemented")
}
+func (*UnimplementedQueryServer) UpgradedClientState(ctx context.Context, req *QueryUpgradedClientStateRequest) (*QueryUpgradedClientStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpgradedClientState not implemented")
+}
func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
s.RegisterService(&_Query_serviceDesc, srv)
@@ -865,6 +992,24 @@ func _Query_ClientParams_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
+func _Query_UpgradedClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryUpgradedClientStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).UpgradedClientState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibcgo.core.client.v1.Query/UpgradedClientState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).UpgradedClientState(ctx, req.(*QueryUpgradedClientStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Query_serviceDesc = grpc.ServiceDesc{
ServiceName: "ibcgo.core.client.v1.Query",
HandlerType: (*QueryServer)(nil),
@@ -889,6 +1034,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{
MethodName: "ClientParams",
Handler: _Query_ClientParams_Handler,
},
+ {
+ MethodName: "UpgradedClientState",
+ Handler: _Query_UpgradedClientState_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "ibcgo/core/client/v1/query.proto",
@@ -1311,6 +1460,76 @@ func (m *QueryClientParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, erro
return len(dAtA) - i, nil
}
+func (m *QueryUpgradedClientStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUpgradedClientStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUpgradedClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PlanHeight != 0 {
+ i = encodeVarintQuery(dAtA, i, uint64(m.PlanHeight))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryUpgradedClientStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUpgradedClientStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUpgradedClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UpgradedClientState != nil {
+ {
+ size, err := m.UpgradedClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
offset -= sovQuery(v)
base := offset
@@ -1485,6 +1704,35 @@ func (m *QueryClientParamsResponse) Size() (n int) {
return n
}
+func (m *QueryUpgradedClientStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ if m.PlanHeight != 0 {
+ n += 1 + sovQuery(uint64(m.PlanHeight))
+ }
+ return n
+}
+
+func (m *QueryUpgradedClientStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.UpgradedClientState != nil {
+ l = m.UpgradedClientState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
func sovQuery(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -2599,6 +2847,193 @@ func (m *QueryClientParamsResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *QueryUpgradedClientStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUpgradedClientStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUpgradedClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PlanHeight", wireType)
+ }
+ m.PlanHeight = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PlanHeight |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUpgradedClientStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUpgradedClientStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUpgradedClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UpgradedClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UpgradedClientState == nil {
+ m.UpgradedClientState = &types.Any{}
+ }
+ if err := m.UpgradedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipQuery(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/core/02-client/types/query.pb.gw.go b/core/02-client/types/query.pb.gw.go
index ceef5c32..24c1e508 100644
--- a/core/02-client/types/query.pb.gw.go
+++ b/core/02-client/types/query.pb.gw.go
@@ -327,6 +327,78 @@ func local_request_Query_ClientParams_0(ctx context.Context, marshaler runtime.M
}
+var (
+ filter_Query_UpgradedClientState_0 = &utilities.DoubleArray{Encoding: map[string]int{"client_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
+)
+
+func request_Query_UpgradedClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUpgradedClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_UpgradedClientState_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UpgradedClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_UpgradedClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUpgradedClientStateRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_UpgradedClientState_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.UpgradedClientState(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
// UnaryRPC :call QueryServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -433,6 +505,26 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
})
+ mux.Handle("GET", pattern_Query_UpgradedClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_UpgradedClientState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UpgradedClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
return nil
}
@@ -574,6 +666,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie
})
+ mux.Handle("GET", pattern_Query_UpgradedClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_UpgradedClientState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UpgradedClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
return nil
}
@@ -587,6 +699,8 @@ var (
pattern_Query_ConsensusStates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "consensus_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_Query_ClientParams_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"ibc", "client", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_UpgradedClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "upgraded_client_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
)
var (
@@ -599,4 +713,6 @@ var (
forward_Query_ConsensusStates_0 = runtime.ForwardResponseMessage
forward_Query_ClientParams_0 = runtime.ForwardResponseMessage
+
+ forward_Query_UpgradedClientState_0 = runtime.ForwardResponseMessage
)
diff --git a/core/03-connection/simulation/decoder_test.go b/core/03-connection/simulation/decoder_test.go
index 1d670299..ff567c19 100644
--- a/core/03-connection/simulation/decoder_test.go
+++ b/core/03-connection/simulation/decoder_test.go
@@ -6,11 +6,11 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/core/03-connection/simulation"
"github.com/cosmos/ibc-go/core/03-connection/types"
host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/03-connection/types/codec.go b/core/03-connection/types/codec.go
index 960f259a..bd87d414 100644
--- a/core/03-connection/types/codec.go
+++ b/core/03-connection/types/codec.go
@@ -12,17 +12,17 @@ import (
// Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibc.core.connection.v1.ConnectionI",
+ "ibcgo.core.connection.v1.ConnectionI",
(*exported.ConnectionI)(nil),
&ConnectionEnd{},
)
registry.RegisterInterface(
- "ibc.core.connection.v1.CounterpartyConnectionI",
+ "ibcgo.core.connection.v1.CounterpartyConnectionI",
(*exported.CounterpartyConnectionI)(nil),
&Counterparty{},
)
registry.RegisterInterface(
- "ibc.core.connection.v1.Version",
+ "ibcgo.core.connection.v1.Version",
(*exported.Version)(nil),
&Version{},
)
diff --git a/core/03-connection/types/msgs_test.go b/core/03-connection/types/msgs_test.go
index 627cdab2..d0b14966 100644
--- a/core/03-connection/types/msgs_test.go
+++ b/core/03-connection/types/msgs_test.go
@@ -10,7 +10,6 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
- "github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
@@ -20,6 +19,7 @@ import (
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
var (
diff --git a/core/04-channel/simulation/decoder_test.go b/core/04-channel/simulation/decoder_test.go
index 10cdcb0b..9212acd6 100644
--- a/core/04-channel/simulation/decoder_test.go
+++ b/core/04-channel/simulation/decoder_test.go
@@ -6,12 +6,12 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/core/04-channel/simulation"
"github.com/cosmos/ibc-go/core/04-channel/types"
host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/04-channel/types/acknowledgement.go b/core/04-channel/types/acknowledgement.go
new file mode 100644
index 00000000..a3f677ab
--- /dev/null
+++ b/core/04-channel/types/acknowledgement.go
@@ -0,0 +1,50 @@
+package types
+
+import (
+ "strings"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+)
+
+// NewResultAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Result
+// type in the Response field.
+func NewResultAcknowledgement(result []byte) Acknowledgement {
+ return Acknowledgement{
+ Response: &Acknowledgement_Result{
+ Result: result,
+ },
+ }
+}
+
+// NewErrorAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Error
+// type in the Response field.
+func NewErrorAcknowledgement(err string) Acknowledgement {
+ return Acknowledgement{
+ Response: &Acknowledgement_Error{
+ Error: err,
+ },
+ }
+}
+
+// GetBytes is a helper for serialising acknowledgements
+func (ack Acknowledgement) GetBytes() []byte {
+ return sdk.MustSortJSON(SubModuleCdc.MustMarshalJSON(&ack))
+}
+
+// ValidateBasic performs a basic validation of the acknowledgement
+func (ack Acknowledgement) ValidateBasic() error {
+ switch resp := ack.Response.(type) {
+ case *Acknowledgement_Result:
+ if len(resp.Result) == 0 {
+ return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement result cannot be empty")
+ }
+ case *Acknowledgement_Error:
+ if strings.TrimSpace(resp.Error) == "" {
+ return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement error cannot be empty")
+ }
+ default:
+ return sdkerrors.Wrapf(ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", resp)
+ }
+ return nil
+}
diff --git a/core/04-channel/types/acknowledgement_test.go b/core/04-channel/types/acknowledgement_test.go
new file mode 100644
index 00000000..6906540b
--- /dev/null
+++ b/core/04-channel/types/acknowledgement_test.go
@@ -0,0 +1,63 @@
+package types_test
+
+import "github.com/cosmos/ibc-go/core/04-channel/types"
+
+// tests acknowledgement.ValidateBasic and acknowledgement.GetBytes
+func (suite TypesTestSuite) TestAcknowledgement() {
+ testCases := []struct {
+ name string
+ ack types.Acknowledgement
+ expPass bool
+ }{
+ {
+ "valid successful ack",
+ types.NewResultAcknowledgement([]byte("success")),
+ true,
+ },
+ {
+ "valid failed ack",
+ types.NewErrorAcknowledgement("error"),
+ true,
+ },
+ {
+ "empty successful ack",
+ types.NewResultAcknowledgement([]byte{}),
+ false,
+ },
+ {
+ "empty faied ack",
+ types.NewErrorAcknowledgement(" "),
+ false,
+ },
+ {
+ "nil response",
+ types.Acknowledgement{
+ Response: nil,
+ },
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+
+ suite.Run(tc.name, func() {
+ suite.SetupTest()
+
+ err := tc.ack.ValidateBasic()
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ } else {
+ suite.Require().Error(err)
+ }
+
+ // expect all acks to be able to be marshaled
+ suite.NotPanics(func() {
+ bz := tc.ack.GetBytes()
+ suite.Require().NotNil(bz)
+ })
+ })
+ }
+
+}
diff --git a/core/04-channel/types/channel.go b/core/04-channel/types/channel.go
index 6036942f..a902d46e 100644
--- a/core/04-channel/types/channel.go
+++ b/core/04-channel/types/channel.go
@@ -1,9 +1,6 @@
package types
import (
- "strings"
-
- sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
host "github.com/cosmos/ibc-go/core/24-host"
"github.com/cosmos/ibc-go/core/exported"
@@ -128,45 +125,3 @@ func (ic IdentifiedChannel) ValidateBasic() error {
channel := NewChannel(ic.State, ic.Ordering, ic.Counterparty, ic.ConnectionHops, ic.Version)
return channel.ValidateBasic()
}
-
-// NewResultAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Result
-// type in the Response field.
-func NewResultAcknowledgement(result []byte) Acknowledgement {
- return Acknowledgement{
- Response: &Acknowledgement_Result{
- Result: result,
- },
- }
-}
-
-// NewErrorAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Error
-// type in the Response field.
-func NewErrorAcknowledgement(err string) Acknowledgement {
- return Acknowledgement{
- Response: &Acknowledgement_Error{
- Error: err,
- },
- }
-}
-
-// GetBytes is a helper for serialising acknowledgements
-func (ack Acknowledgement) GetBytes() []byte {
- return sdk.MustSortJSON(SubModuleCdc.MustMarshalJSON(&ack))
-}
-
-// ValidateBasic performs a basic validation of the acknowledgement
-func (ack Acknowledgement) ValidateBasic() error {
- switch resp := ack.Response.(type) {
- case *Acknowledgement_Result:
- if len(resp.Result) == 0 {
- return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement result cannot be empty")
- }
- case *Acknowledgement_Error:
- if strings.TrimSpace(resp.Error) == "" {
- return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement error cannot be empty")
- }
- default:
- return sdkerrors.Wrapf(ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", resp)
- }
- return nil
-}
diff --git a/core/04-channel/types/channel_test.go b/core/04-channel/types/channel_test.go
index 3f42c5c0..c392a0ba 100644
--- a/core/04-channel/types/channel_test.go
+++ b/core/04-channel/types/channel_test.go
@@ -57,63 +57,3 @@ func TestCounterpartyValidateBasic(t *testing.T) {
}
}
}
-
-// tests acknowledgement.ValidateBasic and acknowledgement.GetBytes
-func (suite TypesTestSuite) TestAcknowledgement() {
- testCases := []struct {
- name string
- ack types.Acknowledgement
- expPass bool
- }{
- {
- "valid successful ack",
- types.NewResultAcknowledgement([]byte("success")),
- true,
- },
- {
- "valid failed ack",
- types.NewErrorAcknowledgement("error"),
- true,
- },
- {
- "empty successful ack",
- types.NewResultAcknowledgement([]byte{}),
- false,
- },
- {
- "empty faied ack",
- types.NewErrorAcknowledgement(" "),
- false,
- },
- {
- "nil response",
- types.Acknowledgement{
- Response: nil,
- },
- false,
- },
- }
-
- for _, tc := range testCases {
- tc := tc
-
- suite.Run(tc.name, func() {
- suite.SetupTest()
-
- err := tc.ack.ValidateBasic()
-
- if tc.expPass {
- suite.Require().NoError(err)
- } else {
- suite.Require().Error(err)
- }
-
- // expect all acks to be able to be marshaled
- suite.NotPanics(func() {
- bz := tc.ack.GetBytes()
- suite.Require().NotNil(bz)
- })
- })
- }
-
-}
diff --git a/core/04-channel/types/codec.go b/core/04-channel/types/codec.go
index fb83e09c..477e4620 100644
--- a/core/04-channel/types/codec.go
+++ b/core/04-channel/types/codec.go
@@ -12,26 +12,17 @@ import (
// Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibc.core.channel.v1.ChannelI",
- (*exported.ChannelI)(nil),
- )
- registry.RegisterInterface(
- "ibc.core.channel.v1.CounterpartyChannelI",
- (*exported.CounterpartyChannelI)(nil),
- )
- registry.RegisterInterface(
- "ibc.core.channel.v1.PacketI",
- (*exported.PacketI)(nil),
- )
- registry.RegisterImplementations(
+ "ibcgo.core.channel.v1.ChannelI",
(*exported.ChannelI)(nil),
&Channel{},
)
- registry.RegisterImplementations(
+ registry.RegisterInterface(
+ "ibcgo.core.channel.v1.CounterpartyChannelI",
(*exported.CounterpartyChannelI)(nil),
&Counterparty{},
)
- registry.RegisterImplementations(
+ registry.RegisterInterface(
+ "ibcgo.core.channel.v1.PacketI",
(*exported.PacketI)(nil),
&Packet{},
)
diff --git a/core/04-channel/types/msgs_test.go b/core/04-channel/types/msgs_test.go
index 37bcbbbb..d0b5e2d4 100644
--- a/core/04-channel/types/msgs_test.go
+++ b/core/04-channel/types/msgs_test.go
@@ -9,7 +9,6 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
- "github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
@@ -18,6 +17,7 @@ import (
"github.com/cosmos/ibc-go/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
"github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/core/05-port/keeper/keeper_test.go b/core/05-port/keeper/keeper_test.go
index e27938a1..2d519aba 100644
--- a/core/05-port/keeper/keeper_test.go
+++ b/core/05-port/keeper/keeper_test.go
@@ -7,9 +7,9 @@ import (
"github.com/stretchr/testify/suite"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
- "github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/ibc-go/core/05-port/keeper"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
var (
diff --git a/core/23-commitment/types/codec.go b/core/23-commitment/types/codec.go
index 11389f2d..ed96411d 100644
--- a/core/23-commitment/types/codec.go
+++ b/core/23-commitment/types/codec.go
@@ -8,19 +8,19 @@ import (
// RegisterInterfaces registers the commitment interfaces to protobuf Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibc.core.commitment.v1.Root",
+ "ibcgo.core.commitment.v1.Root",
(*exported.Root)(nil),
)
registry.RegisterInterface(
- "ibc.core.commitment.v1.Prefix",
+ "ibcgo.core.commitment.v1.Prefix",
(*exported.Prefix)(nil),
)
registry.RegisterInterface(
- "ibc.core.commitment.v1.Path",
+ "ibcgo.core.commitment.v1.Path",
(*exported.Path)(nil),
)
registry.RegisterInterface(
- "ibc.core.commitment.v1.Proof",
+ "ibcgo.core.commitment.v1.Proof",
(*exported.Proof)(nil),
)
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 3e6e6003..0b94f6da 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -8,7 +8,6 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/cosmos-sdk/simapp"
ibc "github.com/cosmos/ibc-go/core"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
@@ -19,6 +18,7 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/core/keeper/grpc_query.go b/core/keeper/grpc_query.go
index 98eecb31..21361b3b 100644
--- a/core/keeper/grpc_query.go
+++ b/core/keeper/grpc_query.go
@@ -33,6 +33,11 @@ func (q Keeper) ClientParams(c context.Context, req *clienttypes.QueryClientPara
return q.ClientKeeper.ClientParams(c, req)
}
+// UpgradedClientState implements the IBC QueryServer interface
+func (q Keeper) UpgradedClientState(c context.Context, req *clienttypes.QueryUpgradedClientStateRequest) (*clienttypes.QueryUpgradedClientStateResponse, error) {
+ return q.ClientKeeper.UpgradedClientState(c, req)
+}
+
// Connection implements the IBC QueryServer interface
func (q Keeper) Connection(c context.Context, req *connectiontypes.QueryConnectionRequest) (*connectiontypes.QueryConnectionResponse, error) {
return q.ConnectionKeeper.Connection(c, req)
diff --git a/core/keeper/keeper.go b/core/keeper/keeper.go
index 47735993..109cb4ab 100644
--- a/core/keeper/keeper.go
+++ b/core/keeper/keeper.go
@@ -4,6 +4,7 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
clientkeeper "github.com/cosmos/ibc-go/core/02-client/keeper"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectionkeeper "github.com/cosmos/ibc-go/core/03-connection/keeper"
@@ -11,7 +12,6 @@ import (
portkeeper "github.com/cosmos/ibc-go/core/05-port/keeper"
porttypes "github.com/cosmos/ibc-go/core/05-port/types"
"github.com/cosmos/ibc-go/core/types"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
var _ types.QueryServer = (*Keeper)(nil)
@@ -33,9 +33,10 @@ type Keeper struct {
// NewKeeper creates a new ibc Keeper
func NewKeeper(
cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace,
- stakingKeeper clienttypes.StakingKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,
+ stakingKeeper clienttypes.StakingKeeper, upgradeKeeper clienttypes.UpgradeKeeper,
+ scopedKeeper capabilitykeeper.ScopedKeeper,
) *Keeper {
- clientKeeper := clientkeeper.NewKeeper(cdc, key, paramSpace, stakingKeeper)
+ clientKeeper := clientkeeper.NewKeeper(cdc, key, paramSpace, stakingKeeper, upgradeKeeper)
connectionKeeper := connectionkeeper.NewKeeper(cdc, key, clientKeeper)
portKeeper := portkeeper.NewKeeper(scopedKeeper)
channelKeeper := channelkeeper.NewKeeper(cdc, key, clientKeeper, connectionKeeper, portKeeper, scopedKeeper)
diff --git a/core/keeper/msg_server_test.go b/core/keeper/msg_server_test.go
index 1fd7d9e1..1f41abb4 100644
--- a/core/keeper/msg_server_test.go
+++ b/core/keeper/msg_server_test.go
@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
@@ -15,7 +16,6 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const height = 10
@@ -639,14 +639,18 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+ upgradedClientBz, err := clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
+ upgradedConsStateBz, err := clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState)
+ suite.Require().NoError(err)
+
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
-
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
suite.Require().NoError(err)
cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
@@ -676,14 +680,18 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+ upgradedClientBz, err := clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
+ upgradedConsStateBz, err := clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState)
+ suite.Require().NoError(err)
+
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
-
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
suite.Require().NoError(err)
msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress())
diff --git a/core/module.go b/core/module.go
index 5907190c..097f7791 100644
--- a/core/module.go
+++ b/core/module.go
@@ -170,8 +170,6 @@ func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.V
return []abci.ValidatorUpdate{}
}
-//____________________________________________________________________________
-
// AppModuleSimulation functions
// GenerateGenesisState creates a randomized GenState of the ibc module.
diff --git a/core/simulation/decoder_test.go b/core/simulation/decoder_test.go
index 192dc9a8..b397cc29 100644
--- a/core/simulation/decoder_test.go
+++ b/core/simulation/decoder_test.go
@@ -6,7 +6,6 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
@@ -14,6 +13,7 @@ import (
host "github.com/cosmos/ibc-go/core/24-host"
"github.com/cosmos/ibc-go/core/simulation"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 1c9f5c29..504b4d1f 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -279,6 +279,10 @@
M Params
+
+ M UpgradeProposal
+
+
@@ -652,6 +656,14 @@
M QueryConsensusStatesResponse
+
+ M QueryUpgradedClientStateRequest
+
+
+
+ M QueryUpgradedClientStateResponse
+
+
@@ -1651,6 +1663,56 @@ subject
+ UpgradeProposal
+ UpgradeProposal is a gov Content type for initiating an IBC breaking
upgrade.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ title
+ string
+
+
+
+
+
+ description
+ string
+
+
+
+
+
+ plan
+ cosmos.upgrade.v1beta1.Plan
+
+
+
+
+
+ upgraded_client_state
+ google.protobuf.Any
+
+ An UpgradedClientState must be provided to perform an IBC breaking upgrade.
+This will make the chain commit to the correct upgraded (self) client state
+before the upgrade occurs, so that connecting chains can verify that the
+new upgraded client is valid by verifying a proof on the previous version
+of the chain. This will allow IBC connections to persist smoothly across
+planned chain upgrades
+
+
+
+
+
+
+
+
+
@@ -4623,6 +4685,62 @@ ConsensusState
+ QueryUpgradedClientStateRequest
+ QueryUpgradedClientStateRequest is the request type for the
Query/UpgradedClientState RPC method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client state unique identifier
+
+
+
+ plan_height
+ int64
+
+ plan height of the current chain must be sent in request
+as this is the height under which upgraded client state is stored
+
+
+
+
+
+
+
+
+
+ QueryUpgradedClientStateResponse
+ QueryUpgradedClientStateResponse is the response type for the
Query/UpgradedClientState RPC method.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ upgraded_client_state
+ google.protobuf.Any
+
+ client state associated with the request identifier
+
+
+
+
+
+
+
+
+
@@ -4674,6 +4792,13 @@ client.
ClientParams queries all parameters of the ibc client.
+
+ UpgradedClientState
+ QueryUpgradedClientStateRequest
+ QueryUpgradedClientStateResponse
+ UpgradedClientState queries an Upgraded IBC light client.
+
+
@@ -4742,6 +4867,16 @@ client.
+
+
+
+ UpgradedClientState
+ GET
+ /ibc/core/client/v1/upgraded_client_states/{client_id}
+
+
+
+
diff --git a/go.mod b/go.mod
index 34d93acc..3b9b5de9 100644
--- a/go.mod
+++ b/go.mod
@@ -7,17 +7,20 @@ replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alp
require (
github.com/armon/go-metrics v0.3.6
github.com/confio/ics23/go v0.6.3
- github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733
+ github.com/cosmos/cosmos-sdk v0.42.0-alpha1.0.20210301172302-05ce78935a9b
github.com/gogo/protobuf v1.3.3
github.com/golang/protobuf v1.4.3
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/pkg/errors v0.9.1
+ github.com/rakyll/statik v0.1.7
+ github.com/spf13/cast v1.3.1
github.com/spf13/cobra v1.1.3
github.com/spf13/pflag v1.0.5
+ github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.7.0
- github.com/tendermint/tendermint v0.34.7
+ github.com/tendermint/tendermint v0.34.8
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
- google.golang.org/grpc v1.35.0
+ google.golang.org/grpc v1.36.0
)
diff --git a/go.sum b/go.sum
index 667a3c40..8461dc8a 100644
--- a/go.sum
+++ b/go.sum
@@ -11,6 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+filippo.io/edwards25519 v1.0.0-beta.2 h1:/BZRNzm8N4K4eWfK28dL4yescorxtO7YG1yun8fy+pI=
+filippo.io/edwards25519 v1.0.0-beta.2/go.mod h1:X+pm78QAUPtFLi1z9PYIlS/bdDnvbCOGKtZ+ACWEf7o=
github.com/99designs/keyring v1.1.6 h1:kVDC2uCgVwecxCk+9zoCt2uEL6dt+dfVzMvGgnVcIuM=
github.com/99designs/keyring v1.1.6/go.mod h1:16e0ds7LGQQcT59QqkTg72Hh5ShM51Byv5PEmW6uoRU=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
@@ -26,7 +28,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg=
@@ -126,8 +127,8 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733 h1:C6B8uY33CgpL3yJt1vxOUFRPDtAjEIjbaFLL0av/8Y0=
-github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733/go.mod h1:J7BQ+xrmuiF5xG+F/Ep+d30XUQmlpIjcPX4Lp0u4oks=
+github.com/cosmos/cosmos-sdk v0.42.0-alpha1.0.20210301172302-05ce78935a9b h1:zKLvd77wFDC+1mcSOW0sZ3TYYWLcj+GmMwneI+SLkOA=
+github.com/cosmos/cosmos-sdk v0.42.0-alpha1.0.20210301172302-05ce78935a9b/go.mod h1:7mfToqDfAuY5qgVxJaB5DKksOIewhoFMPDEjV/4cu8A=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
@@ -260,7 +261,6 @@ github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -279,17 +279,14 @@ github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -366,6 +363,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hdevalence/ed25519consensus v0.0.0-20210204194344-59a8610d2b87 h1:uUjLpLt6bVvZ72SQc/B4dXcPBw4Vgd7soowdRl52qEM=
+github.com/hdevalence/ed25519consensus v0.0.0-20210204194344-59a8610d2b87/go.mod h1:XGsKKeXxeRr95aEOgipvluMPlgjr7dGlk9ZTWOjcUcg=
github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
@@ -460,7 +459,6 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -552,7 +550,6 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
@@ -571,10 +568,9 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA=
-github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y=
+github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -637,7 +633,6 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
-github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
@@ -687,8 +682,8 @@ github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoM
github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4=
github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg=
github.com/tendermint/tendermint v0.34.0/go.mod h1:Aj3PIipBFSNO21r+Lq3TtzQ+uKESxkbA3yo/INM4QwQ=
-github.com/tendermint/tendermint v0.34.7 h1:lvBJFNqpDuEzKfLZKtUXOL5dMOpqHonHlO6LCujyl6E=
-github.com/tendermint/tendermint v0.34.7/go.mod h1:JVuu3V1ZexOaZG8VJMRl8lnfrGw6hEB2TVnoUwKRbss=
+github.com/tendermint/tendermint v0.34.8 h1:PMWgUx47FrNTsfhxCWzoiIlVAC1SE9+WBlnsF9oQW0I=
+github.com/tendermint/tendermint v0.34.8/go.mod h1:JVuu3V1ZexOaZG8VJMRl8lnfrGw6hEB2TVnoUwKRbss=
github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI=
github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8=
github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ=
@@ -870,9 +865,7 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs=
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -941,7 +934,6 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73 h1:+yTMTeazSO5iBqU9NR53hgriivQQbYa5Uuaj8r3qKII=
google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -964,7 +956,6 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
@@ -972,8 +963,9 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -996,7 +988,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
-gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10=
gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -1017,7 +1008,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/light-clients/07-tendermint/types/tendermint_test.go b/light-clients/07-tendermint/types/tendermint_test.go
index be09ddb8..c9833951 100644
--- a/light-clients/07-tendermint/types/tendermint_test.go
+++ b/light-clients/07-tendermint/types/tendermint_test.go
@@ -10,12 +10,12 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/light-clients/07-tendermint/types/upgrade.go b/light-clients/07-tendermint/types/upgrade.go
index 65017a2f..144b5219 100644
--- a/light-clients/07-tendermint/types/upgrade.go
+++ b/light-clients/07-tendermint/types/upgrade.go
@@ -6,10 +6,10 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
"github.com/cosmos/ibc-go/core/exported"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// VerifyUpgradeAndUpdateState checks if the upgraded client has been committed by the current client
@@ -40,8 +40,8 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
upgradedClient.GetLatestHeight(), lastHeight)
}
- // counterparty chain must commit the upgraded client with all client-customizable fields zeroed out
- // at the upgrade path specified by current client
+ // upgraded client state and consensus state must be IBC tendermint client state and consensus state
+ // this may be modified in the future to upgrade to a new IBC tendermint type
// counterparty must also commit to the upgraded consensus state at a sub-path under the upgrade path specified
tmUpgradeClient, ok := upgradedClient.(*ClientState)
if !ok {
diff --git a/light-clients/07-tendermint/types/upgrade_test.go b/light-clients/07-tendermint/types/upgrade_test.go
index f7a851a0..27ea3331 100644
--- a/light-clients/07-tendermint/types/upgrade_test.go
+++ b/light-clients/07-tendermint/types/upgrade_test.go
@@ -1,11 +1,11 @@
package types_test
import (
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
"github.com/cosmos/ibc-go/core/exported"
"github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *TendermintTestSuite) TestVerifyUpgrade() {
@@ -15,6 +15,8 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight clienttypes.Height
clientA string
proofUpgradedClient, proofUpgradedConsState []byte
+ upgradedClientBz, upgradedConsStateBz []byte
+ err error
)
testCases := []struct {
@@ -25,18 +27,12 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "successful upgrade",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -57,16 +53,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
setup: func() {
upgradedHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+2))
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, upgradedHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
+ upgradedClient = upgradedClient.ZeroCustomFields()
+ upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -86,18 +82,12 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: upgrade height revision height is more than the current client revision height",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is 10 blocks from now
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+10))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -114,20 +104,43 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
expPass: false,
},
{
- name: "unsuccessful upgrade: chain-specified parameters do not match committed client",
+ name: "unsuccessful upgrade: committed client does not have zeroed custom fields",
setup: func() {
-
+ // non-zeroed upgrade client
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
+ upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+
+ // commit upgrade store changes and update clients
+
+ suite.coordinator.CommitBlock(suite.chainB)
+ err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.Require().NoError(err)
+
+ cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ suite.Require().True(found)
+
+ proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
+ },
+ expPass: false,
+ },
+ {
+ name: "unsuccessful upgrade: chain-specified parameters do not match committed client",
+ setup: func() {
+ // upgrade Height is at next block
+ lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
+
+ // zero custom fields and store in upgrade store
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
upgradedClient = types.NewClientState("wrongchainID", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, true)
@@ -147,15 +160,9 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: client-specified parameters do not match previous client",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, false)
@@ -175,18 +182,12 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: relayer-submitted consensus state does not match counterparty-committed consensus state",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change submitted upgradedConsensusState
upgradedConsState = &types.ConsensusState{
@@ -210,11 +211,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: client proof unmarshal failed",
setup: func() {
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
suite.Require().True(found)
@@ -228,12 +225,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: consensus state proof unmarshal failed",
setup: func() {
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
suite.Require().True(found)
@@ -247,16 +239,12 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: client proof verification failed",
setup: func() {
- // create but do not store upgraded client
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
+ // do not store upgraded client
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
suite.Require().True(found)
@@ -269,16 +257,12 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: consensus state proof verification failed",
setup: func() {
- // create but do not store upgraded client
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
+ // do not store upgraded client
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
suite.Require().True(found)
@@ -291,17 +275,11 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: upgrade path is empty",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
@@ -325,17 +303,11 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: upgraded height is not greater than current height",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
@@ -354,17 +326,11 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: consensus state for upgrade height cannot be found",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+100))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
@@ -383,14 +349,8 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: client is expired",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
@@ -412,17 +372,11 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: updated unbonding period is equal to trusting period",
setup: func() {
-
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
-
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
@@ -441,19 +395,17 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: final client is not valid",
setup: func() {
-
// new client has smaller unbonding period such that old trusting period is no longer valid
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- upgradedConsState = &types.ConsensusState{
- NextValidatorsHash: []byte("nextValsHash"),
- }
+ upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
@@ -478,6 +430,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
suite.SetupTest()
clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedClient = upgradedClient.ZeroCustomFields()
+ upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
+ suite.Require().NoError(err)
+
+ upgradedConsState = &types.ConsensusState{
+ NextValidatorsHash: []byte("nextValsHash"),
+ }
+ upgradedConsStateBz, err = clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState)
+ suite.Require().NoError(err)
tc.setup()
diff --git a/light-clients/09-localhost/types/localhost_test.go b/light-clients/09-localhost/types/localhost_test.go
index 73356dcd..c3d03c9a 100644
--- a/light-clients/09-localhost/types/localhost_test.go
+++ b/light-clients/09-localhost/types/localhost_test.go
@@ -7,10 +7,10 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/proto/ibcgo/core/client/v1/client.proto b/proto/ibcgo/core/client/v1/client.proto
index 6e036bed..36ef478c 100644
--- a/proto/ibcgo/core/client/v1/client.proto
+++ b/proto/ibcgo/core/client/v1/client.proto
@@ -6,6 +6,7 @@ option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
+import "cosmos/upgrade/v1beta1/upgrade.proto";
// IdentifiedClientState defines a client state with an additional client
// identifier field.
@@ -66,6 +67,27 @@ message ClientUpdateProposal {
];
}
+// UpgradeProposal is a gov Content type for initiating an IBC breaking
+// upgrade.
+message UpgradeProposal {
+ option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_stringer) = false;
+ option (gogoproto.equal) = true;
+
+ string title = 1;
+ string description = 2;
+ cosmos.upgrade.v1beta1.Plan plan = 3 [ (gogoproto.nullable) = false ];
+
+ // An UpgradedClientState must be provided to perform an IBC breaking upgrade.
+ // This will make the chain commit to the correct upgraded (self) client state
+ // before the upgrade occurs, so that connecting chains can verify that the
+ // new upgraded client is valid by verifying a proof on the previous version
+ // of the chain. This will allow IBC connections to persist smoothly across
+ // planned chain upgrades
+ google.protobuf.Any upgraded_client_state = 4
+ [ (gogoproto.moretags) = "yaml:\"upgraded_client_state\"" ];
+}
+
// Height is a monotonically increasing data type
// that can be compared against another Height for the purposes of updating and
// freezing clients
diff --git a/proto/ibcgo/core/client/v1/query.proto b/proto/ibcgo/core/client/v1/query.proto
index fe218af4..22c61ea3 100644
--- a/proto/ibcgo/core/client/v1/query.proto
+++ b/proto/ibcgo/core/client/v1/query.proto
@@ -46,6 +46,13 @@ service Query {
returns (QueryClientParamsResponse) {
option (google.api.http).get = "/ibc/client/v1/params";
}
+
+ // UpgradedClientState queries an Upgraded IBC light client.
+ rpc UpgradedClientState(QueryUpgradedClientStateRequest)
+ returns (QueryUpgradedClientStateResponse) {
+ option (google.api.http).get =
+ "/ibc/core/client/v1/upgraded_client_states/{client_id}";
+ }
}
// QueryClientStateRequest is the request type for the Query/ClientState RPC
@@ -141,3 +148,19 @@ message QueryClientParamsResponse {
// params defines the parameters of the module.
Params params = 1;
}
+// QueryUpgradedClientStateRequest is the request type for the
+// Query/UpgradedClientState RPC method
+message QueryUpgradedClientStateRequest {
+ // client state unique identifier
+ string client_id = 1;
+ // plan height of the current chain must be sent in request
+ // as this is the height under which upgraded client state is stored
+ int64 plan_height = 2;
+}
+
+// QueryUpgradedClientStateResponse is the response type for the
+// Query/UpgradedClientState RPC method.
+message QueryUpgradedClientStateResponse {
+ // client state associated with the request identifier
+ google.protobuf.Any upgraded_client_state = 1;
+}
diff --git a/testing/chain.go b/testing/chain.go
index 69f0c94c..be337f4f 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -19,12 +19,13 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
- "github.com/cosmos/cosmos-sdk/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/cosmos/cosmos-sdk/x/staking/teststaking"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
@@ -35,8 +36,7 @@ import (
"github.com/cosmos/ibc-go/core/types"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
"github.com/cosmos/ibc-go/testing/mock"
- "github.com/cosmos/cosmos-sdk/x/staking/teststaking"
- stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index 97a03c8b..5df4d542 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -124,8 +124,6 @@ func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.V
return []abci.ValidatorUpdate{}
}
-//____________________________________________________________________________
-
// OnChanOpenInit implements the IBCModule interface.
func (am AppModule) OnChanOpenInit(
ctx sdk.Context, _ channeltypes.Order, _ []string, portID string,
diff --git a/testing/sdk_test.go b/testing/sdk_test.go
new file mode 100644
index 00000000..c71c47a2
--- /dev/null
+++ b/testing/sdk_test.go
@@ -0,0 +1,334 @@
+package ibctesting_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/crypto/hd"
+ "github.com/cosmos/cosmos-sdk/crypto/keyring"
+ kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ storetypes "github.com/cosmos/cosmos-sdk/store/types"
+ "github.com/cosmos/cosmos-sdk/testutil"
+ clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli"
+ "github.com/cosmos/cosmos-sdk/testutil/network"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/rest"
+ txtypes "github.com/cosmos/cosmos-sdk/types/tx"
+ authcli "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
+ authrest "github.com/cosmos/cosmos-sdk/x/auth/client/rest"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/ibc-go/testing/simapp/params"
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/suite"
+ tmrand "github.com/tendermint/tendermint/libs/rand"
+ dbm "github.com/tendermint/tm-db"
+
+ ibcclientcli "github.com/cosmos/ibc-go/core/02-client/client/cli"
+ ibccli "github.com/cosmos/ibc-go/core/04-channel/client/cli"
+ "github.com/cosmos/ibc-go/testing/simapp"
+)
+
+/*
+ This file contains tests from the SDK which had to deleted during the migration of
+ the IBC module from the SDK into this repository. https://github.com/cosmos/cosmos-sdk/pull/8735
+
+ They can be removed once the SDK deprecates amino.
+*/
+
+type IntegrationTestSuite struct {
+ suite.Suite
+
+ cfg network.Config
+ network *network.Network
+}
+
+func (s *IntegrationTestSuite) SetupSuite() {
+ s.T().Log("setting up integration test suite")
+
+ cfg := DefaultConfig()
+
+ cfg.NumValidators = 2
+
+ s.cfg = cfg
+ s.network = network.New(s.T(), cfg)
+
+ kb := s.network.Validators[0].ClientCtx.Keyring
+ _, _, err := kb.NewMnemonic("newAccount", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1)
+ s.Require().NoError(err)
+
+ account1, _, err := kb.NewMnemonic("newAccount1", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1)
+ s.Require().NoError(err)
+
+ account2, _, err := kb.NewMnemonic("newAccount2", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1)
+ s.Require().NoError(err)
+
+ multi := kmultisig.NewLegacyAminoPubKey(2, []cryptotypes.PubKey{account1.GetPubKey(), account2.GetPubKey()})
+ _, err = kb.SaveMultisig("multi", multi)
+ s.Require().NoError(err)
+
+ _, err = s.network.WaitForHeight(1)
+ s.Require().NoError(err)
+
+ s.Require().NoError(s.network.WaitForNextBlock())
+}
+
+func TestIntegrationTestSuite(t *testing.T) {
+ suite.Run(t, new(IntegrationTestSuite))
+}
+
+// NewAppConstructor returns a new simapp AppConstructor
+func NewAppConstructor(encodingCfg params.EncodingConfig) network.AppConstructor {
+ return func(val network.Validator) servertypes.Application {
+ return simapp.NewSimApp(
+ val.Ctx.Logger, dbm.NewMemDB(), nil, true, make(map[int64]bool), val.Ctx.Config.RootDir, 0,
+ encodingCfg,
+ simapp.EmptyAppOptions{},
+ baseapp.SetPruning(storetypes.NewPruningOptionsFromString(val.AppConfig.Pruning)),
+ baseapp.SetMinGasPrices(val.AppConfig.MinGasPrices),
+ )
+ }
+}
+
+// DefaultConfig returns a sane default configuration suitable for nearly all
+// testing requirements.
+func DefaultConfig() network.Config {
+ encCfg := simapp.MakeTestEncodingConfig()
+
+ return network.Config{
+ Codec: encCfg.Marshaler,
+ TxConfig: encCfg.TxConfig,
+ LegacyAmino: encCfg.Amino,
+ InterfaceRegistry: encCfg.InterfaceRegistry,
+ AccountRetriever: authtypes.AccountRetriever{},
+ AppConstructor: NewAppConstructor(encCfg),
+ GenesisState: simapp.ModuleBasics.DefaultGenesis(encCfg.Marshaler),
+ TimeoutCommit: 2 * time.Second,
+ ChainID: "chain-" + tmrand.NewRand().Str(6),
+ NumValidators: 4,
+ BondDenom: sdk.DefaultBondDenom,
+ MinGasPrices: fmt.Sprintf("0.000006%s", sdk.DefaultBondDenom),
+ AccountTokens: sdk.TokensFromConsensusPower(1000),
+ StakingTokens: sdk.TokensFromConsensusPower(500),
+ BondedTokens: sdk.TokensFromConsensusPower(100),
+ PruningStrategy: storetypes.PruningOptionNothing,
+ CleanupDir: true,
+ SigningAlgo: string(hd.Secp256k1Type),
+ KeyringOptions: []keyring.Option{},
+ }
+}
+
+func (s *IntegrationTestSuite) TearDownSuite() {
+ s.T().Log("tearing down integration test suite")
+ s.network.Cleanup()
+}
+
+// Create an IBC tx that's encoded as amino-JSON. Since we can't amino-marshal
+// a tx with "cosmos-sdk/MsgTransfer" using the SDK, we just hardcode the tx
+// here. But external clients might, see https://github.com/cosmos/cosmos-sdk/issues/8022.
+func mkIBCStdTx() []byte {
+ ibcTx := `{
+ "account_number": "68",
+ "chain_id": "stargate-4",
+ "fee": {
+ "amount": [
+ {
+ "amount": "3500",
+ "denom": "umuon"
+ }
+ ],
+ "gas": "350000"
+ },
+ "memo": "",
+ "msg": [
+ {
+ "type": "cosmos-sdk/MsgTransfer",
+ "value": {
+ "receiver": "cosmos1q9wtnlwdjrhwtcjmt2uq77jrgx7z3usrq2yz7z",
+ "sender": "cosmos1q9wtnlwdjrhwtcjmt2uq77jrgx7z3usrq2yz7z",
+ "source_channel": "channel-0",
+ "source_port": "transfer",
+ "token": {
+ "amount": "1000000",
+ "denom": "umuon"
+ }
+ }
+ }
+ ],
+ "sequence": "24"
+ }`
+ req := fmt.Sprintf(`{"tx":%s,"mode":"async"}`, ibcTx)
+
+ return []byte(req)
+}
+
+func (s *IntegrationTestSuite) TestEncodeIBCTx() {
+ val := s.network.Validators[0]
+
+ req := mkIBCStdTx()
+ res, err := rest.PostRequest(fmt.Sprintf("%s/txs/encode", val.APIAddress), "application/json", []byte(req))
+ s.Require().NoError(err)
+
+ s.Require().Contains(string(res), authrest.ErrEncodeDecode.Error())
+}
+
+func (s *IntegrationTestSuite) TestBroadcastIBCTxRequest() {
+ val := s.network.Validators[0]
+
+ req := mkIBCStdTx()
+ res, err := rest.PostRequest(fmt.Sprintf("%s/txs", val.APIAddress), "application/json", []byte(req))
+ s.Require().NoError(err)
+
+ s.Require().NotContains(string(res), "this transaction cannot be broadcasted via legacy REST endpoints", string(res))
+}
+
+// TestLegacyRestErrMessages creates two IBC txs, one that fails, one that
+// succeeds, and make sure we cannot query any of them (with pretty error msg).
+// Our intension is to test the error message of querying a message which is
+// signed with proto, since IBC won't support legacy amino at all we are
+// considering a message from IBC module.
+func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
+ val := s.network.Validators[0]
+
+ // Write client state json to temp file, used for an IBC message.
+ // Generated by printing the result of cdc.MarshalIntefaceJSON on
+ // a solo machine client state
+ clientStateJSON := testutil.WriteToNewTempFile(
+ s.T(),
+ `{"@type":"/ibcgo.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}`,
+ )
+
+ // Write consensus json to temp file, used for an IBC message.
+ // Generated by printing the result of cdc.MarshalIntefaceJSON on
+ // a solo machine consensus state
+ consensusJSON := testutil.WriteToNewTempFile(
+ s.T(),
+ `{"@type":"/ibcgo.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
+ )
+
+ testCases := []struct {
+ desc string
+ cmd *cobra.Command
+ args []string
+ code uint32
+ }{
+ {
+ "Failing IBC message",
+ ibccli.NewChannelCloseInitCmd(),
+ []string{
+ "121", // dummy port-id
+ "channel-0", // dummy channel-id
+ fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
+ fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
+ fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
+ fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
+ fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
+ fmt.Sprintf("--%s=foobar", flags.FlagMemo),
+ },
+ uint32(7),
+ },
+ {
+ "Successful IBC message",
+ ibcclientcli.NewCreateClientCmd(),
+ []string{
+ clientStateJSON.Name(), // path to client state json
+ consensusJSON.Name(), // path to consensus json,
+ fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
+ fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
+ fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
+ fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
+ fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
+ fmt.Sprintf("--%s=foobar", flags.FlagMemo),
+ },
+ uint32(0),
+ },
+ }
+
+ for _, tc := range testCases {
+ s.Run(fmt.Sprintf("Case %s", tc.desc), func() {
+ out, err := clitestutil.ExecTestCLICmd(val.ClientCtx, tc.cmd, tc.args)
+ s.Require().NoError(err)
+ var txRes sdk.TxResponse
+ s.Require().NoError(val.ClientCtx.JSONMarshaler.UnmarshalJSON(out.Bytes(), &txRes))
+ s.Require().Equal(tc.code, txRes.Code)
+
+ s.Require().NoError(s.network.WaitForNextBlock())
+
+ s.testQueryIBCTx(txRes, tc.cmd, tc.args)
+ })
+ }
+}
+
+// testQueryIBCTx is a helper function to test querying txs which:
+// - show an error message on legacy REST endpoints
+// - succeed using gRPC
+// In practice, we call this function on IBC txs.
+func (s *IntegrationTestSuite) testQueryIBCTx(txRes sdk.TxResponse, cmd *cobra.Command, args []string) {
+ val := s.network.Validators[0]
+
+ errMsg := "this transaction cannot be displayed via legacy REST endpoints, because it does not support" +
+ " Amino serialization. Please either use CLI, gRPC, gRPC-gateway, or directly query the Tendermint RPC" +
+ " endpoint to query this transaction. The new REST endpoint (via gRPC-gateway) is "
+
+ // Test that legacy endpoint return the above error message on IBC txs.
+ testCases := []struct {
+ desc string
+ url string
+ }{
+ {
+ "Query by hash",
+ fmt.Sprintf("%s/txs/%s", val.APIAddress, txRes.TxHash),
+ },
+ {
+ "Query by height",
+ fmt.Sprintf("%s/txs?tx.height=%d", val.APIAddress, txRes.Height),
+ },
+ }
+
+ for _, tc := range testCases {
+ s.Run(fmt.Sprintf("Case %s", tc.desc), func() {
+ txJSON, err := rest.GetRequest(tc.url)
+ s.Require().NoError(err)
+
+ var errResp rest.ErrorResponse
+ s.Require().NoError(val.ClientCtx.LegacyAmino.UnmarshalJSON(txJSON, &errResp))
+
+ s.Require().Contains(errResp.Error, errMsg)
+ })
+ }
+
+ // try fetching the txn using gRPC req, it will fetch info since it has proto codec.
+ grpcJSON, err := rest.GetRequest(fmt.Sprintf("%s/cosmos/tx/v1beta1/txs/%s", val.APIAddress, txRes.TxHash))
+ s.Require().NoError(err)
+
+ var getTxRes txtypes.GetTxResponse
+ s.Require().NoError(val.ClientCtx.JSONMarshaler.UnmarshalJSON(grpcJSON, &getTxRes))
+ s.Require().Equal(getTxRes.Tx.Body.Memo, "foobar")
+
+ // generate broadcast only txn.
+ args = append(args, fmt.Sprintf("--%s=true", flags.FlagGenerateOnly))
+ out, err := clitestutil.ExecTestCLICmd(val.ClientCtx, cmd, args)
+ s.Require().NoError(err)
+
+ txFile := testutil.WriteToNewTempFile(s.T(), string(out.Bytes()))
+ txFileName := txFile.Name()
+
+ // encode the generated txn.
+ out, err = clitestutil.ExecTestCLICmd(val.ClientCtx, authcli.GetEncodeCommand(), []string{txFileName})
+ s.Require().NoError(err)
+
+ bz, err := val.ClientCtx.LegacyAmino.MarshalJSON(authrest.DecodeReq{Tx: string(out.Bytes())})
+ s.Require().NoError(err)
+
+ // try to decode the txn using legacy rest, it fails.
+ res, err := rest.PostRequest(fmt.Sprintf("%s/txs/decode", val.APIAddress), "application/json", bz)
+ s.Require().NoError(err)
+
+ var errResp rest.ErrorResponse
+ s.Require().NoError(val.ClientCtx.LegacyAmino.UnmarshalJSON(res, &errResp))
+ s.Require().Contains(errResp.Error, errMsg)
+}
diff --git a/testing/simapp/README.md b/testing/simapp/README.md
new file mode 100644
index 00000000..fc449f7f
--- /dev/null
+++ b/testing/simapp/README.md
@@ -0,0 +1,51 @@
+---
+order: false
+---
+
+# simapp
+
+simapp is an application built using the Cosmos SDK for testing and educational purposes.
+
+## Running testnets with `simd`
+
+If you want to spin up a quick testnet with your friends, you can follow these steps.
+Unless otherwise noted, every step must be done by everyone who wants to participate
+in this testnet.
+
+1. `$ make build`. This will build the `simd` binary and install it in your Cosmos SDK repo,
+ inside a new `build` directory. The following instructions are run from inside
+ that directory.
+2. If you've run `simd` before, you may need to reset your database before starting a new
+ testnet: `$ ./simd unsafe-reset-all`
+3. `$ ./simd init [moniker]`. This will initialize a new working directory, by default at
+ `~/.simapp`. You need a provide a "moniker," but it doesn't matter what it is.
+4. `$ ./simd keys add [key_name]`. This will create a new key, with a name of your choosing.
+ Save the output of this command somewhere; you'll need the address generated here later.
+5. `$ ./simd add-genesis-account $(simd keys show [key_name] -a) [amount]`, where `key_name`
+ is the same key name as before; and `amount` is something like `10000000000000000000000000stake`.
+6. `$ ./simd gentx [key_name] [amount] --chain-id [chain-id]`. This will create the
+ genesis transaction for your new chain.
+7. Now, one person needs to create the genesis file `genesis.json` using the genesis transactions
+ from every participant, by gathering all the genesis transactions under `config/gentx` and then
+ calling `./simd collect-gentxs`. This will create a new `genesis.json` file that includes data
+ from all the validators (we sometimes call it the "super genesis file" to distinguish it from
+ single-validator genesis files).
+8. Once you've received the super genesis file, overwrite your original `genesis.json` file with
+ the new super `genesis.json`.
+9. Modify your `config/config.toml` (in the simapp working directory) to include the other participants as
+ persistent peers:
+
+ ```
+ # Comma separated list of nodes to keep persistent connections to
+ persistent_peers = "[validator address]@[ip address]:[port],[validator address]@[ip address]:[port]"
+ ```
+
+ You can find `validator address` by running `./simd tendermint show-node-id`. (It will be hex-encoded.)
+ By default, `port` is 26656.
+10. Now you can start your nodes: `$ ./simd start`.
+
+Now you have a small testnet that you can use to try out changes to the Cosmos SDK or Tendermint!
+
+NOTE: Sometimes creating the network through the `collect-gentxs` will fail, and validators will start
+in a funny state (and then panic). If this happens, you can try to create and start the network first
+with a single validator and then add additional validators using a `create-validator` transaction.
\ No newline at end of file
diff --git a/testing/simapp/app.go b/testing/simapp/app.go
new file mode 100644
index 00000000..872d1609
--- /dev/null
+++ b/testing/simapp/app.go
@@ -0,0 +1,666 @@
+package simapp
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+
+ "github.com/gorilla/mux"
+ "github.com/rakyll/statik/fs"
+ "github.com/spf13/cast"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/libs/log"
+ tmos "github.com/tendermint/tendermint/libs/os"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/grpc/tmservice"
+ "github.com/cosmos/cosmos-sdk/client/rpc"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/server/api"
+ "github.com/cosmos/cosmos-sdk/server/config"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/testutil/testdata"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/version"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/ante"
+ authrest "github.com/cosmos/cosmos-sdk/x/auth/client/rest"
+ authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
+ authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
+ authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting"
+ "github.com/cosmos/cosmos-sdk/x/bank"
+ bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "github.com/cosmos/cosmos-sdk/x/capability"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ simappparams "github.com/cosmos/ibc-go/testing/simapp/params"
+
+ "github.com/cosmos/cosmos-sdk/x/crisis"
+ crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper"
+ crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types"
+ distr "github.com/cosmos/cosmos-sdk/x/distribution"
+ distrclient "github.com/cosmos/cosmos-sdk/x/distribution/client"
+ distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ "github.com/cosmos/cosmos-sdk/x/evidence"
+ evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper"
+ evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
+ feegrant "github.com/cosmos/cosmos-sdk/x/feegrant"
+ feegrantante "github.com/cosmos/cosmos-sdk/x/feegrant/ante"
+ feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper"
+ feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ "github.com/cosmos/cosmos-sdk/x/params"
+ paramsclient "github.com/cosmos/cosmos-sdk/x/params/client"
+ paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper"
+ paramstypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal"
+ "github.com/cosmos/cosmos-sdk/x/slashing"
+ slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/cosmos/cosmos-sdk/x/upgrade"
+ upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client"
+ upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+ transfer "github.com/cosmos/ibc-go/apps/transfer"
+ ibctransferkeeper "github.com/cosmos/ibc-go/apps/transfer/keeper"
+ ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
+ ibc "github.com/cosmos/ibc-go/core"
+ ibcclient "github.com/cosmos/ibc-go/core/02-client"
+ ibcclientclient "github.com/cosmos/ibc-go/core/02-client/client"
+ ibcclienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ porttypes "github.com/cosmos/ibc-go/core/05-port/types"
+ ibchost "github.com/cosmos/ibc-go/core/24-host"
+ ibckeeper "github.com/cosmos/ibc-go/core/keeper"
+ ibcmock "github.com/cosmos/ibc-go/testing/mock"
+
+ authz "github.com/cosmos/cosmos-sdk/x/authz"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
+ authztypes "github.com/cosmos/cosmos-sdk/x/authz/types"
+
+ // unnamed import of statik for swagger UI support
+ _ "github.com/cosmos/cosmos-sdk/client/docs/statik"
+)
+
+const appName = "SimApp"
+
+var (
+ // DefaultNodeHome default home directories for the application daemon
+ DefaultNodeHome string
+
+ // ModuleBasics defines the module BasicManager is in charge of setting up basic,
+ // non-dependant module elements, such as codec registration
+ // and genesis verification.
+ ModuleBasics = module.NewBasicManager(
+ auth.AppModuleBasic{},
+ genutil.AppModuleBasic{},
+ bank.AppModuleBasic{},
+ capability.AppModuleBasic{},
+ staking.AppModuleBasic{},
+ mint.AppModuleBasic{},
+ distr.AppModuleBasic{},
+ gov.NewAppModuleBasic(
+ paramsclient.ProposalHandler, distrclient.ProposalHandler, upgradeclient.ProposalHandler, upgradeclient.CancelProposalHandler,
+ ibcclientclient.UpdateClientProposalHandler, ibcclientclient.UpgradeProposalHandler,
+ ),
+ params.AppModuleBasic{},
+ crisis.AppModuleBasic{},
+ slashing.AppModuleBasic{},
+ ibc.AppModuleBasic{},
+ feegrant.AppModuleBasic{},
+ upgrade.AppModuleBasic{},
+ evidence.AppModuleBasic{},
+ transfer.AppModuleBasic{},
+ authz.AppModuleBasic{},
+ vesting.AppModuleBasic{},
+ )
+
+ // module account permissions
+ maccPerms = map[string][]string{
+ authtypes.FeeCollectorName: nil,
+ distrtypes.ModuleName: nil,
+ minttypes.ModuleName: {authtypes.Minter},
+ stakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking},
+ stakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking},
+ govtypes.ModuleName: {authtypes.Burner},
+ ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner},
+ }
+
+ // module accounts that are allowed to receive tokens
+ allowedReceivingModAcc = map[string]bool{
+ distrtypes.ModuleName: true,
+ }
+)
+
+var (
+ _ App = (*SimApp)(nil)
+ _ servertypes.Application = (*SimApp)(nil)
+)
+
+// SimApp extends an ABCI application, but with most of its parameters exported.
+// They are exported for convenience in creating helper functions, as object
+// capabilities aren't needed for testing.
+type SimApp struct {
+ *baseapp.BaseApp
+ legacyAmino *codec.LegacyAmino
+ appCodec codec.Marshaler
+ interfaceRegistry types.InterfaceRegistry
+
+ invCheckPeriod uint
+
+ // keys to access the substores
+ keys map[string]*sdk.KVStoreKey
+ tkeys map[string]*sdk.TransientStoreKey
+ memKeys map[string]*sdk.MemoryStoreKey
+
+ // keepers
+ AccountKeeper authkeeper.AccountKeeper
+ BankKeeper bankkeeper.Keeper
+ CapabilityKeeper *capabilitykeeper.Keeper
+ StakingKeeper stakingkeeper.Keeper
+ SlashingKeeper slashingkeeper.Keeper
+ MintKeeper mintkeeper.Keeper
+ DistrKeeper distrkeeper.Keeper
+ GovKeeper govkeeper.Keeper
+ CrisisKeeper crisiskeeper.Keeper
+ UpgradeKeeper upgradekeeper.Keeper
+ ParamsKeeper paramskeeper.Keeper
+ AuthzKeeper authzkeeper.Keeper
+ IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
+ EvidenceKeeper evidencekeeper.Keeper
+ TransferKeeper ibctransferkeeper.Keeper
+ FeeGrantKeeper feegrantkeeper.Keeper
+
+ // make scoped keepers public for test purposes
+ ScopedIBCKeeper capabilitykeeper.ScopedKeeper
+ ScopedTransferKeeper capabilitykeeper.ScopedKeeper
+ ScopedIBCMockKeeper capabilitykeeper.ScopedKeeper
+
+ // the module manager
+ mm *module.Manager
+
+ // simulation manager
+ sm *module.SimulationManager
+
+ // the configurator
+ configurator module.Configurator
+}
+
+func init() {
+ userHomeDir, err := os.UserHomeDir()
+ if err != nil {
+ panic(err)
+ }
+
+ DefaultNodeHome = filepath.Join(userHomeDir, ".simapp")
+}
+
+// NewSimApp returns a reference to an initialized SimApp.
+func NewSimApp(
+ logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool,
+ homePath string, invCheckPeriod uint, encodingConfig simappparams.EncodingConfig,
+ appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp),
+) *SimApp {
+
+ appCodec := encodingConfig.Marshaler
+ legacyAmino := encodingConfig.Amino
+ interfaceRegistry := encodingConfig.InterfaceRegistry
+
+ bApp := baseapp.NewBaseApp(appName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...)
+ bApp.SetCommitMultiStoreTracer(traceStore)
+ bApp.SetAppVersion(version.Version)
+ bApp.SetInterfaceRegistry(interfaceRegistry)
+
+ keys := sdk.NewKVStoreKeys(
+ authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
+ minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey,
+ govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegranttypes.StoreKey,
+ evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey,
+ authztypes.StoreKey,
+ )
+ tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey)
+ memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
+
+ app := &SimApp{
+ BaseApp: bApp,
+ legacyAmino: legacyAmino,
+ appCodec: appCodec,
+ interfaceRegistry: interfaceRegistry,
+ invCheckPeriod: invCheckPeriod,
+ keys: keys,
+ tkeys: tkeys,
+ memKeys: memKeys,
+ }
+
+ app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey])
+
+ // set the BaseApp's parameter store
+ bApp.SetParamStore(app.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramskeeper.ConsensusParamsKeyTable()))
+
+ // add capability keeper and ScopeToModule for ibc module
+ app.CapabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey])
+ scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName)
+ scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName)
+ // NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do
+ // note replicate if you do not need to test core IBC or light clients.
+ scopedIBCMockKeeper := app.CapabilityKeeper.ScopeToModule(ibcmock.ModuleName)
+
+ // add keepers
+ app.AccountKeeper = authkeeper.NewAccountKeeper(
+ appCodec, keys[authtypes.StoreKey], app.GetSubspace(authtypes.ModuleName), authtypes.ProtoBaseAccount, maccPerms,
+ )
+ app.BankKeeper = bankkeeper.NewBaseKeeper(
+ appCodec, keys[banktypes.StoreKey], app.AccountKeeper, app.GetSubspace(banktypes.ModuleName), app.BlockedAddrs(),
+ )
+ stakingKeeper := stakingkeeper.NewKeeper(
+ appCodec, keys[stakingtypes.StoreKey], app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName),
+ )
+ app.MintKeeper = mintkeeper.NewKeeper(
+ appCodec, keys[minttypes.StoreKey], app.GetSubspace(minttypes.ModuleName), &stakingKeeper,
+ app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName,
+ )
+ app.DistrKeeper = distrkeeper.NewKeeper(
+ appCodec, keys[distrtypes.StoreKey], app.GetSubspace(distrtypes.ModuleName), app.AccountKeeper, app.BankKeeper,
+ &stakingKeeper, authtypes.FeeCollectorName, app.ModuleAccountAddrs(),
+ )
+ app.SlashingKeeper = slashingkeeper.NewKeeper(
+ appCodec, keys[slashingtypes.StoreKey], &stakingKeeper, app.GetSubspace(slashingtypes.ModuleName),
+ )
+ app.CrisisKeeper = crisiskeeper.NewKeeper(
+ app.GetSubspace(crisistypes.ModuleName), invCheckPeriod, app.BankKeeper, authtypes.FeeCollectorName,
+ )
+
+ app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, keys[feegranttypes.StoreKey], app.AccountKeeper)
+ app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath)
+
+ // register the staking hooks
+ // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks
+ app.StakingKeeper = *stakingKeeper.SetHooks(
+ stakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks()),
+ )
+
+ // Create IBC Keeper
+ app.IBCKeeper = ibckeeper.NewKeeper(
+ appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper,
+ )
+
+ app.AuthzKeeper = authzkeeper.NewKeeper(keys[authztypes.StoreKey], appCodec, app.BaseApp.MsgServiceRouter())
+
+ // register the proposal types
+ govRouter := govtypes.NewRouter()
+ govRouter.AddRoute(govtypes.RouterKey, govtypes.ProposalHandler).
+ AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)).
+ AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.DistrKeeper)).
+ AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper)).
+ AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper))
+ app.GovKeeper = govkeeper.NewKeeper(
+ appCodec, keys[govtypes.StoreKey], app.GetSubspace(govtypes.ModuleName), app.AccountKeeper, app.BankKeeper,
+ &stakingKeeper, govRouter,
+ )
+
+ // Create Transfer Keepers
+ app.TransferKeeper = ibctransferkeeper.NewKeeper(
+ appCodec, keys[ibctransfertypes.StoreKey], app.GetSubspace(ibctransfertypes.ModuleName),
+ app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper,
+ app.AccountKeeper, app.BankKeeper, scopedTransferKeeper,
+ )
+ transferModule := transfer.NewAppModule(app.TransferKeeper)
+
+ // NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do
+ // note replicate if you do not need to test core IBC or light clients.
+ mockModule := ibcmock.NewAppModule(scopedIBCMockKeeper)
+
+ // Create static IBC router, add transfer route, then set and seal it
+ ibcRouter := porttypes.NewRouter()
+ ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferModule)
+ ibcRouter.AddRoute(ibcmock.ModuleName, mockModule)
+ app.IBCKeeper.SetRouter(ibcRouter)
+
+ // create evidence keeper with router
+ evidenceKeeper := evidencekeeper.NewKeeper(
+ appCodec, keys[evidencetypes.StoreKey], &app.StakingKeeper, app.SlashingKeeper,
+ )
+ // If evidence needs to be handled for the app, set routes in router here and seal
+ app.EvidenceKeeper = *evidenceKeeper
+
+ /**** Module Options ****/
+
+ // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment
+ // we prefer to be more strict in what arguments the modules expect.
+ var skipGenesisInvariants = cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants))
+
+ // NOTE: Any module instantiated in the module manager that is later modified
+ // must be passed by reference here.
+ app.mm = module.NewManager(
+ genutil.NewAppModule(
+ app.AccountKeeper, app.StakingKeeper, app.BaseApp.DeliverTx,
+ encodingConfig.TxConfig,
+ ),
+ auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts),
+ vesting.NewAppModule(app.AccountKeeper, app.BankKeeper),
+ bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper),
+ capability.NewAppModule(appCodec, *app.CapabilityKeeper),
+ crisis.NewAppModule(&app.CrisisKeeper, skipGenesisInvariants),
+ feegrant.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper),
+ mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper),
+ slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper),
+ distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper),
+ staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper),
+ upgrade.NewAppModule(app.UpgradeKeeper),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ ibc.NewAppModule(app.IBCKeeper),
+ params.NewAppModule(app.ParamsKeeper),
+ authz.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ transferModule,
+ )
+
+ // During begin block slashing happens after distr.BeginBlocker so that
+ // there is nothing left over in the validator fee pool, so as to keep the
+ // CanWithdrawInvariant invariant.
+ // NOTE: staking module is required if HistoricalEntries param > 0
+ app.mm.SetOrderBeginBlockers(
+ upgradetypes.ModuleName, minttypes.ModuleName, distrtypes.ModuleName, slashingtypes.ModuleName,
+ evidencetypes.ModuleName, stakingtypes.ModuleName, ibchost.ModuleName,
+ )
+ app.mm.SetOrderEndBlockers(crisistypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName)
+
+ // NOTE: The genutils module must occur after staking so that pools are
+ // properly initialized with tokens from genesis accounts.
+ // NOTE: Capability module must occur first so that it can initialize any capabilities
+ // so that other modules that want to create or claim capabilities afterwards in InitChain
+ // can do so safely.
+ app.mm.SetOrderInitGenesis(
+ capabilitytypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName,
+ slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName,
+ ibchost.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authztypes.ModuleName, ibctransfertypes.ModuleName,
+ feegranttypes.ModuleName,
+ )
+
+ app.mm.RegisterInvariants(&app.CrisisKeeper)
+ app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino)
+ app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter())
+ app.mm.RegisterServices(app.configurator)
+
+ // add test gRPC service for testing gRPC queries in isolation
+ testdata.RegisterQueryServer(app.GRPCQueryRouter(), testdata.QueryImpl{})
+
+ // create the simulation manager and define the order of the modules for deterministic simulations
+ //
+ // NOTE: this is not required apps that don't use the simulator for fuzz testing
+ // transactions
+ app.sm = module.NewSimulationManager(
+ auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts),
+ bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper),
+ capability.NewAppModule(appCodec, *app.CapabilityKeeper),
+ feegrant.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper),
+ mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper),
+ staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper),
+ distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper),
+ slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper),
+ params.NewAppModule(app.ParamsKeeper),
+ evidence.NewAppModule(app.EvidenceKeeper),
+ authz.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ ibc.NewAppModule(app.IBCKeeper),
+ transferModule,
+ )
+
+ app.sm.RegisterStoreDecoders()
+
+ // initialize stores
+ app.MountKVStores(keys)
+ app.MountTransientStores(tkeys)
+ app.MountMemoryStores(memKeys)
+
+ // initialize BaseApp
+ app.SetInitChainer(app.InitChainer)
+ app.SetBeginBlocker(app.BeginBlocker)
+ app.SetAnteHandler(
+ feegrantante.NewAnteHandler(
+ app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, ante.DefaultSigVerificationGasConsumer,
+ encodingConfig.TxConfig.SignModeHandler(),
+ ),
+ )
+ app.SetEndBlocker(app.EndBlocker)
+
+ if loadLatest {
+ if err := app.LoadLatestVersion(); err != nil {
+ tmos.Exit(err.Error())
+ }
+
+ // Initialize and seal the capability keeper so all persistent capabilities
+ // are loaded in-memory and prevent any further modules from creating scoped
+ // sub-keepers.
+ // This must be done during creation of baseapp rather than in InitChain so
+ // that in-memory capabilities get regenerated on app restart.
+ // Note that since this reads from the store, we can only perform it when
+ // `loadLatest` is set to true.
+ ctx := app.BaseApp.NewUncachedContext(true, tmproto.Header{})
+ app.CapabilityKeeper.InitializeAndSeal(ctx)
+ }
+
+ app.ScopedIBCKeeper = scopedIBCKeeper
+ app.ScopedTransferKeeper = scopedTransferKeeper
+
+ // NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do
+ // note replicate if you do not need to test core IBC or light clients.
+ app.ScopedIBCMockKeeper = scopedIBCMockKeeper
+
+ return app
+}
+
+// Name returns the name of the App
+func (app *SimApp) Name() string { return app.BaseApp.Name() }
+
+// BeginBlocker application updates every begin block
+func (app *SimApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock {
+ return app.mm.BeginBlock(ctx, req)
+}
+
+// EndBlocker application updates every end block
+func (app *SimApp) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock {
+ return app.mm.EndBlock(ctx, req)
+}
+
+// InitChainer application update at chain initialization
+func (app *SimApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain {
+ var genesisState GenesisState
+ if err := json.Unmarshal(req.AppStateBytes, &genesisState); err != nil {
+ panic(err)
+ }
+ return app.mm.InitGenesis(ctx, app.appCodec, genesisState)
+}
+
+// LoadHeight loads a particular height
+func (app *SimApp) LoadHeight(height int64) error {
+ return app.LoadVersion(height)
+}
+
+// ModuleAccountAddrs returns all the app's module account addresses.
+func (app *SimApp) ModuleAccountAddrs() map[string]bool {
+ modAccAddrs := make(map[string]bool)
+ for acc := range maccPerms {
+ modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true
+ }
+
+ return modAccAddrs
+}
+
+// BlockedAddrs returns all the app's module account addresses that are not
+// allowed to receive external tokens.
+func (app *SimApp) BlockedAddrs() map[string]bool {
+ blockedAddrs := make(map[string]bool)
+ for acc := range maccPerms {
+ blockedAddrs[authtypes.NewModuleAddress(acc).String()] = !allowedReceivingModAcc[acc]
+ }
+
+ return blockedAddrs
+}
+
+// LegacyAmino returns SimApp's amino codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp) LegacyAmino() *codec.LegacyAmino {
+ return app.legacyAmino
+}
+
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp) AppCodec() codec.Marshaler {
+ return app.appCodec
+}
+
+// InterfaceRegistry returns SimApp's InterfaceRegistry
+func (app *SimApp) InterfaceRegistry() types.InterfaceRegistry {
+ return app.interfaceRegistry
+}
+
+// GetKey returns the KVStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp) GetKey(storeKey string) *sdk.KVStoreKey {
+ return app.keys[storeKey]
+}
+
+// GetTKey returns the TransientStoreKey for the provided store key.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp) GetTKey(storeKey string) *sdk.TransientStoreKey {
+ return app.tkeys[storeKey]
+}
+
+// GetMemKey returns the MemStoreKey for the provided mem key.
+//
+// NOTE: This is solely used for testing purposes.
+func (app *SimApp) GetMemKey(storeKey string) *sdk.MemoryStoreKey {
+ return app.memKeys[storeKey]
+}
+
+// GetSubspace returns a param subspace for a given module name.
+//
+// NOTE: This is solely to be used for testing purposes.
+func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace {
+ subspace, _ := app.ParamsKeeper.GetSubspace(moduleName)
+ return subspace
+}
+
+// SimulationManager implements the SimulationApp interface
+func (app *SimApp) SimulationManager() *module.SimulationManager {
+ return app.sm
+}
+
+// RegisterAPIRoutes registers all application module routes with the provided
+// API server.
+func (app *SimApp) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) {
+ clientCtx := apiSvr.ClientCtx
+ rpc.RegisterRoutes(clientCtx, apiSvr.Router)
+ // Register legacy tx routes.
+ authrest.RegisterTxRoutes(clientCtx, apiSvr.Router)
+ // Register new tx routes from grpc-gateway.
+ authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+ // Register new tendermint queries routes from grpc-gateway.
+ tmservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // Register legacy and grpc-gateway routes for all modules.
+ ModuleBasics.RegisterRESTRoutes(clientCtx, apiSvr.Router)
+ ModuleBasics.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter)
+
+ // register swagger API from root so that other applications can override easily
+ if apiConfig.Swagger {
+ RegisterSwaggerAPI(clientCtx, apiSvr.Router)
+ }
+}
+
+// RegisterTxService implements the Application.RegisterTxService method.
+func (app *SimApp) RegisterTxService(clientCtx client.Context) {
+ authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry)
+}
+
+// RegisterTendermintService implements the Application.RegisterTendermintService method.
+func (app *SimApp) RegisterTendermintService(clientCtx client.Context) {
+ tmservice.RegisterTendermintService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.interfaceRegistry)
+}
+
+// RunMigrations performs in-place store migrations for all modules. This
+// function MUST be only called by x/upgrade UpgradeHandler.
+//
+// `migrateFromVersions` is a map of moduleName to fromVersion (unit64), where
+// fromVersion denotes the version from which we should migrate the module, the
+// target version being the module's latest ConsensusVersion.
+//
+// Example:
+// cfg := module.NewConfigurator(...)
+// app.UpgradeKeeper.SetUpgradeHandler("store-migration", func(ctx sdk.Context, plan upgradetypes.Plan) {
+// err := app.RunMigrations(ctx, module.MigrationMap{
+// "bank": 1, // Migrate x/bank from v1 to current x/bank's ConsensusVersion
+// "staking": 8, // Migrate x/staking from v8 to current x/staking's ConsensusVersion
+// })
+// if err != nil {
+// panic(err)
+// }
+// })
+func (app *SimApp) RunMigrations(ctx sdk.Context, migrateFromVersions module.MigrationMap) error {
+ return app.mm.RunMigrations(ctx, app.configurator, migrateFromVersions)
+}
+
+// RegisterSwaggerAPI registers swagger route with API Server
+func RegisterSwaggerAPI(ctx client.Context, rtr *mux.Router) {
+ statikFS, err := fs.New()
+ if err != nil {
+ panic(err)
+ }
+
+ staticServer := http.FileServer(statikFS)
+ rtr.PathPrefix("/swagger/").Handler(http.StripPrefix("/swagger/", staticServer))
+}
+
+// GetMaccPerms returns a copy of the module account permissions
+func GetMaccPerms() map[string][]string {
+ dupMaccPerms := make(map[string][]string)
+ for k, v := range maccPerms {
+ dupMaccPerms[k] = v
+ }
+ return dupMaccPerms
+}
+
+// initParamsKeeper init params keeper and its subspaces
+func initParamsKeeper(appCodec codec.BinaryMarshaler, legacyAmino *codec.LegacyAmino, key, tkey sdk.StoreKey) paramskeeper.Keeper {
+ paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey)
+
+ paramsKeeper.Subspace(authtypes.ModuleName)
+ paramsKeeper.Subspace(banktypes.ModuleName)
+ paramsKeeper.Subspace(stakingtypes.ModuleName)
+ paramsKeeper.Subspace(minttypes.ModuleName)
+ paramsKeeper.Subspace(distrtypes.ModuleName)
+ paramsKeeper.Subspace(slashingtypes.ModuleName)
+ paramsKeeper.Subspace(govtypes.ModuleName).WithKeyTable(govtypes.ParamKeyTable())
+ paramsKeeper.Subspace(crisistypes.ModuleName)
+ paramsKeeper.Subspace(ibctransfertypes.ModuleName)
+ paramsKeeper.Subspace(ibchost.ModuleName)
+
+ return paramsKeeper
+}
diff --git a/testing/simapp/app_test.go b/testing/simapp/app_test.go
new file mode 100644
index 00000000..38adb14a
--- /dev/null
+++ b/testing/simapp/app_test.go
@@ -0,0 +1,193 @@
+package simapp
+
+import (
+ "encoding/json"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/libs/log"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/auth"
+ "github.com/cosmos/cosmos-sdk/x/auth/vesting"
+ "github.com/cosmos/cosmos-sdk/x/authz"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "github.com/cosmos/cosmos-sdk/x/capability"
+ "github.com/cosmos/cosmos-sdk/x/crisis"
+ "github.com/cosmos/cosmos-sdk/x/distribution"
+ "github.com/cosmos/cosmos-sdk/x/evidence"
+ feegrant "github.com/cosmos/cosmos-sdk/x/feegrant"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ "github.com/cosmos/cosmos-sdk/x/gov"
+ "github.com/cosmos/cosmos-sdk/x/mint"
+ "github.com/cosmos/cosmos-sdk/x/params"
+ "github.com/cosmos/cosmos-sdk/x/slashing"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ "github.com/cosmos/cosmos-sdk/x/upgrade"
+ transfer "github.com/cosmos/ibc-go/apps/transfer"
+ ibc "github.com/cosmos/ibc-go/core"
+)
+
+func TestSimAppExportAndBlockedAddrs(t *testing.T) {
+ encCfg := MakeTestEncodingConfig()
+ db := dbm.NewMemDB()
+ app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{})
+
+ for acc := range maccPerms {
+ require.Equal(t, !allowedReceivingModAcc[acc], app.BankKeeper.BlockedAddr(app.AccountKeeper.GetModuleAddress(acc)),
+ "ensure that blocked addresses are properly set in bank keeper")
+ }
+
+ genesisState := NewDefaultGenesisState(encCfg.Marshaler)
+ stateBytes, err := json.MarshalIndent(genesisState, "", " ")
+ require.NoError(t, err)
+
+ // Initialize the chain
+ app.InitChain(
+ abci.RequestInitChain{
+ Validators: []abci.ValidatorUpdate{},
+ AppStateBytes: stateBytes,
+ },
+ )
+ app.Commit()
+
+ // Making a new app object with the db, so that initchain hasn't been called
+ app2 := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{})
+ _, err = app2.ExportAppStateAndValidators(false, []string{})
+ require.NoError(t, err, "ExportAppStateAndValidators should not have an error")
+}
+
+func TestGetMaccPerms(t *testing.T) {
+ dup := GetMaccPerms()
+ require.Equal(t, maccPerms, dup, "duplicated module account permissions differed from actual module account permissions")
+}
+
+func TestRunMigrations(t *testing.T) {
+ db := dbm.NewMemDB()
+ encCfg := MakeTestEncodingConfig()
+ logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{})
+
+ // Create a new baseapp and configurator for the purpose of this test.
+ bApp := baseapp.NewBaseApp(appName, logger, db, encCfg.TxConfig.TxDecoder())
+ bApp.SetCommitMultiStoreTracer(nil)
+ bApp.SetInterfaceRegistry(encCfg.InterfaceRegistry)
+ app.BaseApp = bApp
+ app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter())
+
+ // We register all modules on the Configurator, except x/bank. x/bank will
+ // serve as the test subject on which we run the migration tests.
+ //
+ // The loop below is the same as calling `RegisterServices` on
+ // ModuleManager, except that we skip x/bank.
+ for _, module := range app.mm.Modules {
+ if module.Name() == banktypes.ModuleName {
+ continue
+ }
+
+ module.RegisterServices(app.configurator)
+ }
+
+ // Initialize the chain
+ app.InitChain(abci.RequestInitChain{})
+ app.Commit()
+
+ testCases := []struct {
+ name string
+ moduleName string
+ forVersion uint64
+ expRegErr bool // errors while registering migration
+ expRegErrMsg string
+ expRunErr bool // errors while running migration
+ expRunErrMsg string
+ expCalled int
+ }{
+ {
+ "cannot register migration for version 0",
+ "bank", 0,
+ true, "module migration versions should start at 1: invalid version", false, "", 0,
+ },
+ {
+ "throws error on RunMigrations if no migration registered for bank",
+ "", 1,
+ false, "", true, "no migrations found for module bank: not found", 0,
+ },
+ {
+ "can register and run migration handler for x/bank",
+ "bank", 1,
+ false, "", false, "", 1,
+ },
+ {
+ "cannot register migration handler for same module & forVersion",
+ "bank", 1,
+ true, "another migration for module bank and version 1 already exists: internal logic error", false, "", 0,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ var err error
+
+ // Since it's very hard to test actual in-place store migrations in
+ // tests (due to the difficulty of maintaining multiple versions of a
+ // module), we're just testing here that the migration logic is
+ // called.
+ called := 0
+
+ if tc.moduleName != "" {
+ // Register migration for module from version `forVersion` to `forVersion+1`.
+ err = app.configurator.RegisterMigration(tc.moduleName, tc.forVersion, func(sdk.Context) error {
+ called++
+
+ return nil
+ })
+
+ if tc.expRegErr {
+ require.EqualError(t, err, tc.expRegErrMsg)
+
+ return
+ }
+ }
+ require.NoError(t, err)
+
+ // Run migrations only for bank. That's why we put the initial
+ // version for bank as 1, and for all other modules, we put as
+ // their latest ConsensusVersion.
+ err = app.RunMigrations(
+ app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()}),
+ module.MigrationMap{
+ "bank": 1,
+ "auth": auth.AppModule{}.ConsensusVersion(),
+ "authz": authz.AppModule{}.ConsensusVersion(),
+ "staking": staking.AppModule{}.ConsensusVersion(),
+ "mint": mint.AppModule{}.ConsensusVersion(),
+ "distribution": distribution.AppModule{}.ConsensusVersion(),
+ "slashing": slashing.AppModule{}.ConsensusVersion(),
+ "gov": gov.AppModule{}.ConsensusVersion(),
+ "params": params.AppModule{}.ConsensusVersion(),
+ "ibc": ibc.AppModule{}.ConsensusVersion(),
+ "upgrade": upgrade.AppModule{}.ConsensusVersion(),
+ "vesting": vesting.AppModule{}.ConsensusVersion(),
+ "feegrant": feegrant.AppModule{}.ConsensusVersion(),
+ "transfer": transfer.AppModule{}.ConsensusVersion(),
+ "evidence": evidence.AppModule{}.ConsensusVersion(),
+ "crisis": crisis.AppModule{}.ConsensusVersion(),
+ "genutil": genutil.AppModule{}.ConsensusVersion(),
+ "capability": capability.AppModule{}.ConsensusVersion(),
+ },
+ )
+ if tc.expRunErr {
+ require.EqualError(t, err, tc.expRunErrMsg)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.expCalled, called)
+ }
+ })
+ }
+}
diff --git a/testing/simapp/config.go b/testing/simapp/config.go
new file mode 100644
index 00000000..98df982b
--- /dev/null
+++ b/testing/simapp/config.go
@@ -0,0 +1,75 @@
+package simapp
+
+import (
+ "flag"
+
+ "github.com/cosmos/cosmos-sdk/types/simulation"
+)
+
+// List of available flags for the simulator
+var (
+ FlagGenesisFileValue string
+ FlagParamsFileValue string
+ FlagExportParamsPathValue string
+ FlagExportParamsHeightValue int
+ FlagExportStatePathValue string
+ FlagExportStatsPathValue string
+ FlagSeedValue int64
+ FlagInitialBlockHeightValue int
+ FlagNumBlocksValue int
+ FlagBlockSizeValue int
+ FlagLeanValue bool
+ FlagCommitValue bool
+ FlagOnOperationValue bool // TODO: Remove in favor of binary search for invariant violation
+ FlagAllInvariantsValue bool
+
+ FlagEnabledValue bool
+ FlagVerboseValue bool
+ FlagPeriodValue uint
+ FlagGenesisTimeValue int64
+)
+
+// GetSimulatorFlags gets the values of all the available simulation flags
+func GetSimulatorFlags() {
+ // config fields
+ flag.StringVar(&FlagGenesisFileValue, "Genesis", "", "custom simulation genesis file; cannot be used with params file")
+ flag.StringVar(&FlagParamsFileValue, "Params", "", "custom simulation params file which overrides any random params; cannot be used with genesis")
+ flag.StringVar(&FlagExportParamsPathValue, "ExportParamsPath", "", "custom file path to save the exported params JSON")
+ flag.IntVar(&FlagExportParamsHeightValue, "ExportParamsHeight", 0, "height to which export the randomly generated params")
+ flag.StringVar(&FlagExportStatePathValue, "ExportStatePath", "", "custom file path to save the exported app state JSON")
+ flag.StringVar(&FlagExportStatsPathValue, "ExportStatsPath", "", "custom file path to save the exported simulation statistics JSON")
+ flag.Int64Var(&FlagSeedValue, "Seed", 42, "simulation random seed")
+ flag.IntVar(&FlagInitialBlockHeightValue, "InitialBlockHeight", 1, "initial block to start the simulation")
+ flag.IntVar(&FlagNumBlocksValue, "NumBlocks", 500, "number of new blocks to simulate from the initial block height")
+ flag.IntVar(&FlagBlockSizeValue, "BlockSize", 200, "operations per block")
+ flag.BoolVar(&FlagLeanValue, "Lean", false, "lean simulation log output")
+ flag.BoolVar(&FlagCommitValue, "Commit", false, "have the simulation commit")
+ flag.BoolVar(&FlagOnOperationValue, "SimulateEveryOperation", false, "run slow invariants every operation")
+ flag.BoolVar(&FlagAllInvariantsValue, "PrintAllInvariants", false, "print all invariants if a broken invariant is found")
+
+ // simulation flags
+ flag.BoolVar(&FlagEnabledValue, "Enabled", false, "enable the simulation")
+ flag.BoolVar(&FlagVerboseValue, "Verbose", false, "verbose log output")
+ flag.UintVar(&FlagPeriodValue, "Period", 0, "run slow invariants only once every period assertions")
+ flag.Int64Var(&FlagGenesisTimeValue, "GenesisTime", 0, "override genesis UNIX time instead of using a random UNIX time")
+}
+
+// NewConfigFromFlags creates a simulation from the retrieved values of the flags.
+func NewConfigFromFlags() simulation.Config {
+ return simulation.Config{
+ GenesisFile: FlagGenesisFileValue,
+ ParamsFile: FlagParamsFileValue,
+ ExportParamsPath: FlagExportParamsPathValue,
+ ExportParamsHeight: FlagExportParamsHeightValue,
+ ExportStatePath: FlagExportStatePathValue,
+ ExportStatsPath: FlagExportStatsPathValue,
+ Seed: FlagSeedValue,
+ InitialBlockHeight: FlagInitialBlockHeightValue,
+ NumBlocks: FlagNumBlocksValue,
+ BlockSize: FlagBlockSizeValue,
+ Lean: FlagLeanValue,
+ Commit: FlagCommitValue,
+ OnOperation: FlagOnOperationValue,
+ AllInvariants: FlagAllInvariantsValue,
+ }
+}
diff --git a/testing/simapp/encoding.go b/testing/simapp/encoding.go
new file mode 100644
index 00000000..d94d5a8d
--- /dev/null
+++ b/testing/simapp/encoding.go
@@ -0,0 +1,19 @@
+package simapp
+
+import (
+ "github.com/cosmos/cosmos-sdk/std"
+ simappparams "github.com/cosmos/ibc-go/testing/simapp/params"
+)
+
+// MakeTestEncodingConfig creates an EncodingConfig for testing. This function
+// should be used only in tests or when creating a new app instance (NewApp*()).
+// App user shouldn't create new codecs - use the app.AppCodec instead.
+// [DEPRECATED]
+func MakeTestEncodingConfig() simappparams.EncodingConfig {
+ encodingConfig := simappparams.MakeTestEncodingConfig()
+ std.RegisterLegacyAminoCodec(encodingConfig.Amino)
+ std.RegisterInterfaces(encodingConfig.InterfaceRegistry)
+ ModuleBasics.RegisterLegacyAminoCodec(encodingConfig.Amino)
+ ModuleBasics.RegisterInterfaces(encodingConfig.InterfaceRegistry)
+ return encodingConfig
+}
diff --git a/testing/simapp/export.go b/testing/simapp/export.go
new file mode 100644
index 00000000..8d09e333
--- /dev/null
+++ b/testing/simapp/export.go
@@ -0,0 +1,193 @@
+package simapp
+
+import (
+ "encoding/json"
+ "log"
+
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ "github.com/cosmos/cosmos-sdk/x/staking"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+// ExportAppStateAndValidators exports the state of the application for a genesis
+// file.
+func (app *SimApp) ExportAppStateAndValidators(
+ forZeroHeight bool, jailAllowedAddrs []string,
+) (servertypes.ExportedApp, error) {
+ // as if they could withdraw from the start of the next block
+ ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()})
+
+ // We export at last height + 1, because that's the height at which
+ // Tendermint will start InitChain.
+ height := app.LastBlockHeight() + 1
+ if forZeroHeight {
+ height = 0
+ app.prepForZeroHeightGenesis(ctx, jailAllowedAddrs)
+ }
+
+ genState := app.mm.ExportGenesis(ctx, app.appCodec)
+ appState, err := json.MarshalIndent(genState, "", " ")
+ if err != nil {
+ return servertypes.ExportedApp{}, err
+ }
+
+ validators, err := staking.WriteValidators(ctx, app.StakingKeeper)
+ return servertypes.ExportedApp{
+ AppState: appState,
+ Validators: validators,
+ Height: height,
+ ConsensusParams: app.BaseApp.GetConsensusParams(ctx),
+ }, err
+}
+
+// prepare for fresh start at zero height
+// NOTE zero height genesis is a temporary feature which will be deprecated
+// in favour of export at a block height
+func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) {
+ applyAllowedAddrs := false
+
+ // check if there is a allowed address list
+ if len(jailAllowedAddrs) > 0 {
+ applyAllowedAddrs = true
+ }
+
+ allowedAddrsMap := make(map[string]bool)
+
+ for _, addr := range jailAllowedAddrs {
+ _, err := sdk.ValAddressFromBech32(addr)
+ if err != nil {
+ log.Fatal(err)
+ }
+ allowedAddrsMap[addr] = true
+ }
+
+ /* Just to be safe, assert the invariants on current state. */
+ app.CrisisKeeper.AssertInvariants(ctx)
+
+ /* Handle fee distribution state. */
+
+ // withdraw all validator commission
+ app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) {
+ _, _ = app.DistrKeeper.WithdrawValidatorCommission(ctx, val.GetOperator())
+ return false
+ })
+
+ // withdraw all delegator rewards
+ dels := app.StakingKeeper.GetAllDelegations(ctx)
+ for _, delegation := range dels {
+ valAddr, err := sdk.ValAddressFromBech32(delegation.ValidatorAddress)
+ if err != nil {
+ panic(err)
+ }
+
+ delAddr, err := sdk.AccAddressFromBech32(delegation.DelegatorAddress)
+ if err != nil {
+ panic(err)
+ }
+ _, _ = app.DistrKeeper.WithdrawDelegationRewards(ctx, delAddr, valAddr)
+ }
+
+ // clear validator slash events
+ app.DistrKeeper.DeleteAllValidatorSlashEvents(ctx)
+
+ // clear validator historical rewards
+ app.DistrKeeper.DeleteAllValidatorHistoricalRewards(ctx)
+
+ // set context height to zero
+ height := ctx.BlockHeight()
+ ctx = ctx.WithBlockHeight(0)
+
+ // reinitialize all validators
+ app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) {
+ // donate any unwithdrawn outstanding reward fraction tokens to the community pool
+ scraps := app.DistrKeeper.GetValidatorOutstandingRewardsCoins(ctx, val.GetOperator())
+ feePool := app.DistrKeeper.GetFeePool(ctx)
+ feePool.CommunityPool = feePool.CommunityPool.Add(scraps...)
+ app.DistrKeeper.SetFeePool(ctx, feePool)
+
+ app.DistrKeeper.Hooks().AfterValidatorCreated(ctx, val.GetOperator())
+ return false
+ })
+
+ // reinitialize all delegations
+ for _, del := range dels {
+ valAddr, err := sdk.ValAddressFromBech32(del.ValidatorAddress)
+ if err != nil {
+ panic(err)
+ }
+ delAddr, err := sdk.AccAddressFromBech32(del.DelegatorAddress)
+ if err != nil {
+ panic(err)
+ }
+ app.DistrKeeper.Hooks().BeforeDelegationCreated(ctx, delAddr, valAddr)
+ app.DistrKeeper.Hooks().AfterDelegationModified(ctx, delAddr, valAddr)
+ }
+
+ // reset context height
+ ctx = ctx.WithBlockHeight(height)
+
+ /* Handle staking state. */
+
+ // iterate through redelegations, reset creation height
+ app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) {
+ for i := range red.Entries {
+ red.Entries[i].CreationHeight = 0
+ }
+ app.StakingKeeper.SetRedelegation(ctx, red)
+ return false
+ })
+
+ // iterate through unbonding delegations, reset creation height
+ app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) {
+ for i := range ubd.Entries {
+ ubd.Entries[i].CreationHeight = 0
+ }
+ app.StakingKeeper.SetUnbondingDelegation(ctx, ubd)
+ return false
+ })
+
+ // Iterate through validators by power descending, reset bond heights, and
+ // update bond intra-tx counters.
+ store := ctx.KVStore(app.keys[stakingtypes.StoreKey])
+ iter := sdk.KVStoreReversePrefixIterator(store, stakingtypes.ValidatorsKey)
+ counter := int16(0)
+
+ for ; iter.Valid(); iter.Next() {
+ addr := sdk.ValAddress(stakingtypes.AddressFromValidatorsKey(iter.Key()))
+ validator, found := app.StakingKeeper.GetValidator(ctx, addr)
+ if !found {
+ panic("expected validator, not found")
+ }
+
+ validator.UnbondingHeight = 0
+ if applyAllowedAddrs && !allowedAddrsMap[addr.String()] {
+ validator.Jailed = true
+ }
+
+ app.StakingKeeper.SetValidator(ctx, validator)
+ counter++
+ }
+
+ iter.Close()
+
+ _, err := app.StakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ /* Handle slashing state. */
+
+ // reset start height on signing infos
+ app.SlashingKeeper.IterateValidatorSigningInfos(
+ ctx,
+ func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) {
+ info.StartHeight = 0
+ app.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info)
+ return false
+ },
+ )
+}
diff --git a/testing/simapp/genesis.go b/testing/simapp/genesis.go
new file mode 100644
index 00000000..dbb4e01c
--- /dev/null
+++ b/testing/simapp/genesis.go
@@ -0,0 +1,21 @@
+package simapp
+
+import (
+ "encoding/json"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+)
+
+// The genesis state of the blockchain is represented here as a map of raw json
+// messages key'd by a identifier string.
+// The identifier is used to determine which module genesis information belongs
+// to so it may be appropriately routed during init chain.
+// Within this application default genesis information is retrieved from
+// the ModuleBasicManager which populates json from each BasicModule
+// object provided to it during init.
+type GenesisState map[string]json.RawMessage
+
+// NewDefaultGenesisState generates the default state for the application.
+func NewDefaultGenesisState(cdc codec.JSONMarshaler) GenesisState {
+ return ModuleBasics.DefaultGenesis(cdc)
+}
diff --git a/testing/simapp/genesis_account.go b/testing/simapp/genesis_account.go
new file mode 100644
index 00000000..5c9c7f9a
--- /dev/null
+++ b/testing/simapp/genesis_account.go
@@ -0,0 +1,47 @@
+package simapp
+
+import (
+ "errors"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+var _ authtypes.GenesisAccount = (*SimGenesisAccount)(nil)
+
+// SimGenesisAccount defines a type that implements the GenesisAccount interface
+// to be used for simulation accounts in the genesis state.
+type SimGenesisAccount struct {
+ *authtypes.BaseAccount
+
+ // vesting account fields
+ OriginalVesting sdk.Coins `json:"original_vesting" yaml:"original_vesting"` // total vesting coins upon initialization
+ DelegatedFree sdk.Coins `json:"delegated_free" yaml:"delegated_free"` // delegated vested coins at time of delegation
+ DelegatedVesting sdk.Coins `json:"delegated_vesting" yaml:"delegated_vesting"` // delegated vesting coins at time of delegation
+ StartTime int64 `json:"start_time" yaml:"start_time"` // vesting start time (UNIX Epoch time)
+ EndTime int64 `json:"end_time" yaml:"end_time"` // vesting end time (UNIX Epoch time)
+
+ // module account fields
+ ModuleName string `json:"module_name" yaml:"module_name"` // name of the module account
+ ModulePermissions []string `json:"module_permissions" yaml:"module_permissions"` // permissions of module account
+}
+
+// Validate checks for errors on the vesting and module account parameters
+func (sga SimGenesisAccount) Validate() error {
+ if !sga.OriginalVesting.IsZero() {
+ if sga.StartTime >= sga.EndTime {
+ return errors.New("vesting start-time cannot be before end-time")
+ }
+ }
+
+ if sga.ModuleName != "" {
+ ma := authtypes.ModuleAccount{
+ BaseAccount: sga.BaseAccount, Name: sga.ModuleName, Permissions: sga.ModulePermissions,
+ }
+ if err := ma.Validate(); err != nil {
+ return err
+ }
+ }
+
+ return sga.BaseAccount.Validate()
+}
diff --git a/testing/simapp/genesis_account_test.go b/testing/simapp/genesis_account_test.go
new file mode 100644
index 00000000..a9d65ad7
--- /dev/null
+++ b/testing/simapp/genesis_account_test.go
@@ -0,0 +1,88 @@
+package simapp_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
+
+ "github.com/stretchr/testify/require"
+ "github.com/tendermint/tendermint/crypto"
+)
+
+func TestSimGenesisAccountValidate(t *testing.T) {
+ pubkey := secp256k1.GenPrivKey().PubKey()
+ addr := sdk.AccAddress(pubkey.Address())
+
+ vestingStart := time.Now().UTC()
+
+ coins := sdk.NewCoins(sdk.NewInt64Coin("test", 1000))
+ baseAcc := authtypes.NewBaseAccount(addr, pubkey, 0, 0)
+
+ testCases := []struct {
+ name string
+ sga simapp.SimGenesisAccount
+ wantErr bool
+ }{
+ {
+ "valid basic account",
+ simapp.SimGenesisAccount{
+ BaseAccount: baseAcc,
+ },
+ false,
+ },
+ {
+ "invalid basic account with mismatching address/pubkey",
+ simapp.SimGenesisAccount{
+ BaseAccount: authtypes.NewBaseAccount(addr, secp256k1.GenPrivKey().PubKey(), 0, 0),
+ },
+ true,
+ },
+ {
+ "valid basic account with module name",
+ simapp.SimGenesisAccount{
+ BaseAccount: authtypes.NewBaseAccount(sdk.AccAddress(crypto.AddressHash([]byte("testmod"))), nil, 0, 0),
+ ModuleName: "testmod",
+ },
+ false,
+ },
+ {
+ "valid basic account with invalid module name/pubkey pair",
+ simapp.SimGenesisAccount{
+ BaseAccount: baseAcc,
+ ModuleName: "testmod",
+ },
+ true,
+ },
+ {
+ "valid basic account with valid vesting attributes",
+ simapp.SimGenesisAccount{
+ BaseAccount: baseAcc,
+ OriginalVesting: coins,
+ StartTime: vestingStart.Unix(),
+ EndTime: vestingStart.Add(1 * time.Hour).Unix(),
+ },
+ false,
+ },
+ {
+ "valid basic account with invalid vesting end time",
+ simapp.SimGenesisAccount{
+ BaseAccount: baseAcc,
+ OriginalVesting: coins,
+ StartTime: vestingStart.Add(2 * time.Hour).Unix(),
+ EndTime: vestingStart.Add(1 * time.Hour).Unix(),
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ require.Equal(t, tc.wantErr, tc.sga.Validate() != nil)
+ })
+ }
+}
diff --git a/testing/simapp/helpers/test_helpers.go b/testing/simapp/helpers/test_helpers.go
new file mode 100644
index 00000000..9ccecbd9
--- /dev/null
+++ b/testing/simapp/helpers/test_helpers.go
@@ -0,0 +1,80 @@
+package helpers
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/types/tx/signing"
+ authsign "github.com/cosmos/cosmos-sdk/x/auth/signing"
+)
+
+// SimAppChainID hardcoded chainID for simulation
+const (
+ DefaultGenTxGas = 1000000
+ SimAppChainID = "simulation-app"
+)
+
+// GenTx generates a signed mock transaction.
+func GenTx(gen client.TxConfig, msgs []sdk.Msg, feeAmt sdk.Coins, gas uint64, chainID string, accNums, accSeqs []uint64, priv ...cryptotypes.PrivKey) (sdk.Tx, error) {
+ sigs := make([]signing.SignatureV2, len(priv))
+
+ // create a random length memo
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ memo := simulation.RandStringOfLength(r, simulation.RandIntBetween(r, 0, 100))
+
+ signMode := gen.SignModeHandler().DefaultMode()
+
+ // 1st round: set SignatureV2 with empty signatures, to set correct
+ // signer infos.
+ for i, p := range priv {
+ sigs[i] = signing.SignatureV2{
+ PubKey: p.PubKey(),
+ Data: &signing.SingleSignatureData{
+ SignMode: signMode,
+ },
+ Sequence: accSeqs[i],
+ }
+ }
+
+ tx := gen.NewTxBuilder()
+ err := tx.SetMsgs(msgs...)
+ if err != nil {
+ return nil, err
+ }
+ err = tx.SetSignatures(sigs...)
+ if err != nil {
+ return nil, err
+ }
+ tx.SetMemo(memo)
+ tx.SetFeeAmount(feeAmt)
+ tx.SetGasLimit(gas)
+
+ // 2nd round: once all signer infos are set, every signer can sign.
+ for i, p := range priv {
+ signerData := authsign.SignerData{
+ ChainID: chainID,
+ AccountNumber: accNums[i],
+ Sequence: accSeqs[i],
+ }
+ signBytes, err := gen.SignModeHandler().GetSignBytes(signMode, signerData, tx.GetTx())
+ if err != nil {
+ panic(err)
+ }
+ sig, err := p.Sign(signBytes)
+ if err != nil {
+ panic(err)
+ }
+ sigs[i].Data.(*signing.SingleSignatureData).Signature = sig
+ err = tx.SetSignatures(sigs...)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ return tx.GetTx(), nil
+}
diff --git a/testing/simapp/params/amino.go b/testing/simapp/params/amino.go
new file mode 100644
index 00000000..440c29f8
--- /dev/null
+++ b/testing/simapp/params/amino.go
@@ -0,0 +1,26 @@
+// +build test_amino
+
+package params
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/legacy/legacytx"
+)
+
+// MakeTestEncodingConfig creates an EncodingConfig for an amino based test configuration.
+// This function should be used only internally (in the SDK).
+// App user should'nt create new codecs - use the app.AppCodec instead.
+// [DEPRECATED]
+func MakeTestEncodingConfig() EncodingConfig {
+ cdc := codec.NewLegacyAmino()
+ interfaceRegistry := types.NewInterfaceRegistry()
+ marshaler := codec.NewAminoCodec(cdc)
+
+ return EncodingConfig{
+ InterfaceRegistry: interfaceRegistry,
+ Marshaler: marshaler,
+ TxConfig: legacytx.StdTxConfig{Cdc: cdc},
+ Amino: cdc,
+ }
+}
diff --git a/testing/simapp/params/doc.go b/testing/simapp/params/doc.go
new file mode 100644
index 00000000..1c721342
--- /dev/null
+++ b/testing/simapp/params/doc.go
@@ -0,0 +1,19 @@
+/*
+Package params defines the simulation parameters in the simapp.
+
+It contains the default weights used for each transaction used on the module's
+simulation. These weights define the chance for a transaction to be simulated at
+any gived operation.
+
+You can repace the default values for the weights by providing a params.json
+file with the weights defined for each of the transaction operations:
+
+ {
+ "op_weight_msg_send": 60,
+ "op_weight_msg_delegate": 100,
+ }
+
+In the example above, the `MsgSend` has 60% chance to be simulated, while the
+`MsgDelegate` will always be simulated.
+*/
+package params
diff --git a/testing/simapp/params/encoding.go b/testing/simapp/params/encoding.go
new file mode 100644
index 00000000..698408da
--- /dev/null
+++ b/testing/simapp/params/encoding.go
@@ -0,0 +1,16 @@
+package params
+
+import (
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+)
+
+// EncodingConfig specifies the concrete encoding types to use for a given app.
+// This is provided for compatibility between protobuf and amino implementations.
+type EncodingConfig struct {
+ InterfaceRegistry types.InterfaceRegistry
+ Marshaler codec.Marshaler
+ TxConfig client.TxConfig
+ Amino *codec.LegacyAmino
+}
diff --git a/testing/simapp/params/params.go b/testing/simapp/params/params.go
new file mode 100644
index 00000000..b6aa5fb5
--- /dev/null
+++ b/testing/simapp/params/params.go
@@ -0,0 +1,7 @@
+package params
+
+// Simulation parameter constants
+const (
+ StakePerAccount = "stake_per_account"
+ InitiallyBondedValidators = "initially_bonded_validators"
+)
diff --git a/testing/simapp/params/proto.go b/testing/simapp/params/proto.go
new file mode 100644
index 00000000..04aa524b
--- /dev/null
+++ b/testing/simapp/params/proto.go
@@ -0,0 +1,26 @@
+// +build !test_amino
+
+package params
+
+import (
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/x/auth/tx"
+)
+
+// MakeTestEncodingConfig creates an EncodingConfig for a non-amino based test configuration.
+// This function should be used only internally (in the SDK).
+// App user should'nt create new codecs - use the app.AppCodec instead.
+// [DEPRECATED]
+func MakeTestEncodingConfig() EncodingConfig {
+ cdc := codec.NewLegacyAmino()
+ interfaceRegistry := types.NewInterfaceRegistry()
+ marshaler := codec.NewProtoCodec(interfaceRegistry)
+
+ return EncodingConfig{
+ InterfaceRegistry: interfaceRegistry,
+ Marshaler: marshaler,
+ TxConfig: tx.NewTxConfig(marshaler, tx.DefaultSignModes),
+ Amino: cdc,
+ }
+}
diff --git a/testing/simapp/params/weights.go b/testing/simapp/params/weights.go
new file mode 100644
index 00000000..81400a2f
--- /dev/null
+++ b/testing/simapp/params/weights.go
@@ -0,0 +1,28 @@
+package params
+
+// Default simulation operation weights for messages and gov proposals
+const (
+ DefaultWeightMsgSend int = 100
+ DefaultWeightMsgMultiSend int = 10
+ DefaultWeightMsgSetWithdrawAddress int = 50
+ DefaultWeightMsgWithdrawDelegationReward int = 50
+ DefaultWeightMsgWithdrawValidatorCommission int = 50
+ DefaultWeightMsgFundCommunityPool int = 50
+ DefaultWeightMsgDeposit int = 100
+ DefaultWeightMsgVote int = 67
+ DefaultWeightMsgVoteWeighted int = 33
+ DefaultWeightMsgUnjail int = 100
+ DefaultWeightMsgCreateValidator int = 100
+ DefaultWeightMsgEditValidator int = 5
+ DefaultWeightMsgDelegate int = 100
+ DefaultWeightMsgUndelegate int = 100
+ DefaultWeightMsgBeginRedelegate int = 100
+
+ DefaultWeightCommunitySpendProposal int = 5
+ DefaultWeightTextProposal int = 5
+ DefaultWeightParamChangeProposal int = 5
+
+ // feegrant
+ DefaultWeightGrantFeeAllowance int = 100
+ DefaultWeightRevokeFeeAllowance int = 100
+)
diff --git a/testing/simapp/sim_bench_test.go b/testing/simapp/sim_bench_test.go
new file mode 100644
index 00000000..3a422ecb
--- /dev/null
+++ b/testing/simapp/sim_bench_test.go
@@ -0,0 +1,122 @@
+package simapp
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+)
+
+// Profile with:
+// /usr/local/go/bin/go test -benchmem -run=^$ github.com/cosmos/ibc-go/testing/simapp -bench ^BenchmarkFullAppSimulation$ -Commit=true -cpuprofile cpu.out
+func BenchmarkFullAppSimulation(b *testing.B) {
+ b.ReportAllocs()
+ config, db, dir, logger, _, err := SetupSimulation("goleveldb-app-sim", "Simulation")
+ if err != nil {
+ b.Fatalf("simulation setup failed: %s", err.Error())
+ }
+
+ defer func() {
+ db.Close()
+ err = os.RemoveAll(dir)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, interBlockCacheOpt())
+
+ // run randomized simulation
+ _, simParams, simErr := simulation.SimulateFromSeed(
+ b,
+ os.Stdout,
+ app.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(app, app.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+
+ // export state and simParams before the simulation error is checked
+ if err = CheckExportSimulation(app, config, simParams); err != nil {
+ b.Fatal(err)
+ }
+
+ if simErr != nil {
+ b.Fatal(simErr)
+ }
+
+ if config.Commit {
+ PrintStats(db)
+ }
+}
+
+func BenchmarkInvariants(b *testing.B) {
+ b.ReportAllocs()
+ config, db, dir, logger, _, err := SetupSimulation("leveldb-app-invariant-bench", "Simulation")
+ if err != nil {
+ b.Fatalf("simulation setup failed: %s", err.Error())
+ }
+
+ config.AllInvariants = false
+
+ defer func() {
+ db.Close()
+ err = os.RemoveAll(dir)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }()
+
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, interBlockCacheOpt())
+
+ // run randomized simulation
+ _, simParams, simErr := simulation.SimulateFromSeed(
+ b,
+ os.Stdout,
+ app.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(app, app.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+
+ // export state and simParams before the simulation error is checked
+ if err = CheckExportSimulation(app, config, simParams); err != nil {
+ b.Fatal(err)
+ }
+
+ if simErr != nil {
+ b.Fatal(simErr)
+ }
+
+ if config.Commit {
+ PrintStats(db)
+ }
+
+ ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight() + 1})
+
+ // 3. Benchmark each invariant separately
+ //
+ // NOTE: We use the crisis keeper as it has all the invariants registered with
+ // their respective metadata which makes it useful for testing/benchmarking.
+ for _, cr := range app.CrisisKeeper.Routes() {
+ cr := cr
+ b.Run(fmt.Sprintf("%s/%s", cr.ModuleName, cr.Route), func(b *testing.B) {
+ if res, stop := cr.Invar(ctx); stop {
+ b.Fatalf(
+ "broken invariant at block %d of %d\n%s",
+ ctx.BlockHeight()-1, config.NumBlocks, res,
+ )
+ }
+ })
+ }
+}
diff --git a/testing/simapp/sim_test.go b/testing/simapp/sim_test.go
new file mode 100644
index 00000000..c1423655
--- /dev/null
+++ b/testing/simapp/sim_test.go
@@ -0,0 +1,339 @@
+package simapp
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/libs/log"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/store"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authztypes "github.com/cosmos/cosmos-sdk/x/authz/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
+ evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
+ govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ "github.com/cosmos/cosmos-sdk/x/simulation"
+ slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
+ ibchost "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/testing/simapp/helpers"
+)
+
+// Get flags every time the simulator is run
+func init() {
+ GetSimulatorFlags()
+}
+
+type StoreKeysPrefixes struct {
+ A sdk.StoreKey
+ B sdk.StoreKey
+ Prefixes [][]byte
+}
+
+// fauxMerkleModeOpt returns a BaseApp option to use a dbStoreAdapter instead of
+// an IAVLStore for faster simulation speed.
+func fauxMerkleModeOpt(bapp *baseapp.BaseApp) {
+ bapp.SetFauxMerkleMode()
+}
+
+// interBlockCacheOpt returns a BaseApp option function that sets the persistent
+// inter-block write-through cache.
+func interBlockCacheOpt() func(*baseapp.BaseApp) {
+ return baseapp.SetInterBlockCache(store.NewCommitKVStoreCacheManager())
+}
+
+func TestFullAppSimulation(t *testing.T) {
+ config, db, dir, logger, skip, err := SetupSimulation("leveldb-app-sim", "Simulation")
+ if skip {
+ t.Skip("skipping application simulation")
+ }
+ require.NoError(t, err, "simulation setup failed")
+
+ defer func() {
+ db.Close()
+ require.NoError(t, os.RemoveAll(dir))
+ }()
+
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, fauxMerkleModeOpt)
+ require.Equal(t, "SimApp", app.Name())
+
+ // run randomized simulation
+ _, simParams, simErr := simulation.SimulateFromSeed(
+ t,
+ os.Stdout,
+ app.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(app, app.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+
+ // export state and simParams before the simulation error is checked
+ err = CheckExportSimulation(app, config, simParams)
+ require.NoError(t, err)
+ require.NoError(t, simErr)
+
+ if config.Commit {
+ PrintStats(db)
+ }
+}
+
+func TestAppImportExport(t *testing.T) {
+ config, db, dir, logger, skip, err := SetupSimulation("leveldb-app-sim", "Simulation")
+ if skip {
+ t.Skip("skipping application import/export simulation")
+ }
+ require.NoError(t, err, "simulation setup failed")
+
+ defer func() {
+ db.Close()
+ require.NoError(t, os.RemoveAll(dir))
+ }()
+
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, fauxMerkleModeOpt)
+ require.Equal(t, "SimApp", app.Name())
+
+ // Run randomized simulation
+ _, simParams, simErr := simulation.SimulateFromSeed(
+ t,
+ os.Stdout,
+ app.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(app, app.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+
+ // export state and simParams before the simulation error is checked
+ err = CheckExportSimulation(app, config, simParams)
+ require.NoError(t, err)
+ require.NoError(t, simErr)
+
+ if config.Commit {
+ PrintStats(db)
+ }
+
+ fmt.Printf("exporting genesis...\n")
+
+ exported, err := app.ExportAppStateAndValidators(false, []string{})
+ require.NoError(t, err)
+
+ fmt.Printf("importing genesis...\n")
+
+ _, newDB, newDir, _, _, err := SetupSimulation("leveldb-app-sim-2", "Simulation-2")
+ require.NoError(t, err, "simulation setup failed")
+
+ defer func() {
+ newDB.Close()
+ require.NoError(t, os.RemoveAll(newDir))
+ }()
+
+ newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, fauxMerkleModeOpt)
+ require.Equal(t, "SimApp", newApp.Name())
+
+ var genesisState GenesisState
+ err = json.Unmarshal(exported.AppState, &genesisState)
+ require.NoError(t, err)
+
+ ctxA := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()})
+ ctxB := newApp.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()})
+ newApp.mm.InitGenesis(ctxB, app.AppCodec(), genesisState)
+ newApp.StoreConsensusParams(ctxB, exported.ConsensusParams)
+
+ fmt.Printf("comparing stores...\n")
+
+ storeKeysPrefixes := []StoreKeysPrefixes{
+ {app.keys[authtypes.StoreKey], newApp.keys[authtypes.StoreKey], [][]byte{}},
+ {app.keys[stakingtypes.StoreKey], newApp.keys[stakingtypes.StoreKey],
+ [][]byte{
+ stakingtypes.UnbondingQueueKey, stakingtypes.RedelegationQueueKey, stakingtypes.ValidatorQueueKey,
+ stakingtypes.HistoricalInfoKey,
+ }}, // ordering may change but it doesn't matter
+ {app.keys[slashingtypes.StoreKey], newApp.keys[slashingtypes.StoreKey], [][]byte{}},
+ {app.keys[minttypes.StoreKey], newApp.keys[minttypes.StoreKey], [][]byte{}},
+ {app.keys[distrtypes.StoreKey], newApp.keys[distrtypes.StoreKey], [][]byte{}},
+ {app.keys[banktypes.StoreKey], newApp.keys[banktypes.StoreKey], [][]byte{banktypes.BalancesPrefix}},
+ {app.keys[paramtypes.StoreKey], newApp.keys[paramtypes.StoreKey], [][]byte{}},
+ {app.keys[govtypes.StoreKey], newApp.keys[govtypes.StoreKey], [][]byte{}},
+ {app.keys[evidencetypes.StoreKey], newApp.keys[evidencetypes.StoreKey], [][]byte{}},
+ {app.keys[capabilitytypes.StoreKey], newApp.keys[capabilitytypes.StoreKey], [][]byte{}},
+ {app.keys[ibchost.StoreKey], newApp.keys[ibchost.StoreKey], [][]byte{}},
+ {app.keys[ibctransfertypes.StoreKey], newApp.keys[ibctransfertypes.StoreKey], [][]byte{}},
+ {app.keys[authztypes.StoreKey], newApp.keys[authztypes.StoreKey], [][]byte{}},
+ }
+
+ for _, skp := range storeKeysPrefixes {
+ storeA := ctxA.KVStore(skp.A)
+ storeB := ctxB.KVStore(skp.B)
+
+ failedKVAs, failedKVBs := sdk.DiffKVStores(storeA, storeB, skp.Prefixes)
+ require.Equal(t, len(failedKVAs), len(failedKVBs), "unequal sets of key-values to compare")
+
+ fmt.Printf("compared %d different key/value pairs between %s and %s\n", len(failedKVAs), skp.A, skp.B)
+ require.Equal(t, len(failedKVAs), 0, GetSimulationLog(skp.A.Name(), app.SimulationManager().StoreDecoders, failedKVAs, failedKVBs))
+ }
+}
+
+func TestAppSimulationAfterImport(t *testing.T) {
+ config, db, dir, logger, skip, err := SetupSimulation("leveldb-app-sim", "Simulation")
+ if skip {
+ t.Skip("skipping application simulation after import")
+ }
+ require.NoError(t, err, "simulation setup failed")
+
+ defer func() {
+ db.Close()
+ require.NoError(t, os.RemoveAll(dir))
+ }()
+
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, fauxMerkleModeOpt)
+ require.Equal(t, "SimApp", app.Name())
+
+ // Run randomized simulation
+ stopEarly, simParams, simErr := simulation.SimulateFromSeed(
+ t,
+ os.Stdout,
+ app.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(app, app.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+
+ // export state and simParams before the simulation error is checked
+ err = CheckExportSimulation(app, config, simParams)
+ require.NoError(t, err)
+ require.NoError(t, simErr)
+
+ if config.Commit {
+ PrintStats(db)
+ }
+
+ if stopEarly {
+ fmt.Println("can't export or import a zero-validator genesis, exiting test...")
+ return
+ }
+
+ fmt.Printf("exporting genesis...\n")
+
+ exported, err := app.ExportAppStateAndValidators(true, []string{})
+ require.NoError(t, err)
+
+ fmt.Printf("importing genesis...\n")
+
+ _, newDB, newDir, _, _, err := SetupSimulation("leveldb-app-sim-2", "Simulation-2")
+ require.NoError(t, err, "simulation setup failed")
+
+ defer func() {
+ newDB.Close()
+ require.NoError(t, os.RemoveAll(newDir))
+ }()
+
+ newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, fauxMerkleModeOpt)
+ require.Equal(t, "SimApp", newApp.Name())
+
+ newApp.InitChain(abci.RequestInitChain{
+ AppStateBytes: exported.AppState,
+ })
+
+ _, _, err = simulation.SimulateFromSeed(
+ t,
+ os.Stdout,
+ newApp.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(newApp, newApp.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+ require.NoError(t, err)
+}
+
+// TODO: Make another test for the fuzzer itself, which just has noOp txs
+// and doesn't depend on the application.
+func TestAppStateDeterminism(t *testing.T) {
+ if !FlagEnabledValue {
+ t.Skip("skipping application simulation")
+ }
+
+ config := NewConfigFromFlags()
+ config.InitialBlockHeight = 1
+ config.ExportParamsPath = ""
+ config.OnOperation = false
+ config.AllInvariants = false
+ config.ChainID = helpers.SimAppChainID
+
+ numSeeds := 3
+ numTimesToRunPerSeed := 5
+ appHashList := make([]json.RawMessage, numTimesToRunPerSeed)
+
+ for i := 0; i < numSeeds; i++ {
+ config.Seed = rand.Int63()
+
+ for j := 0; j < numTimesToRunPerSeed; j++ {
+ var logger log.Logger
+ if FlagVerboseValue {
+ logger = log.TestingLogger()
+ } else {
+ logger = log.NewNopLogger()
+ }
+
+ db := dbm.NewMemDB()
+ app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, interBlockCacheOpt())
+
+ fmt.Printf(
+ "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n",
+ config.Seed, i+1, numSeeds, j+1, numTimesToRunPerSeed,
+ )
+
+ _, _, err := simulation.SimulateFromSeed(
+ t,
+ os.Stdout,
+ app.BaseApp,
+ AppStateFn(app.AppCodec(), app.SimulationManager()),
+ simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
+ SimulationOperations(app, app.AppCodec(), config),
+ app.ModuleAccountAddrs(),
+ config,
+ app.AppCodec(),
+ )
+ require.NoError(t, err)
+
+ if config.Commit {
+ PrintStats(db)
+ }
+
+ appHash := app.LastCommitID().Hash
+ appHashList[j] = appHash
+
+ if j != 0 {
+ require.Equal(
+ t, string(appHashList[0]), string(appHashList[j]),
+ "non-determinism in seed %d: %d/%d, attempt: %d/%d\n", config.Seed, i+1, numSeeds, j+1, numTimesToRunPerSeed,
+ )
+ }
+ }
+ }
+}
diff --git a/testing/simapp/simd/cmd/cmd_test.go b/testing/simapp/simd/cmd/cmd_test.go
new file mode 100644
index 00000000..1ae137a6
--- /dev/null
+++ b/testing/simapp/simd/cmd/cmd_test.go
@@ -0,0 +1,24 @@
+package cmd_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ svrcmd "github.com/cosmos/cosmos-sdk/server/cmd"
+ "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
+ "github.com/cosmos/ibc-go/testing/simapp"
+ "github.com/cosmos/ibc-go/testing/simapp/simd/cmd"
+)
+
+func TestInitCmd(t *testing.T) {
+ rootCmd, _ := cmd.NewRootCmd()
+ rootCmd.SetArgs([]string{
+ "init", // Test the init cmd
+ "simapp-test", // Moniker
+ fmt.Sprintf("--%s=%s", cli.FlagOverwrite, "true"), // Overwrite genesis.json, in case it already exists
+ })
+
+ require.NoError(t, svrcmd.Execute(rootCmd, simapp.DefaultNodeHome))
+}
diff --git a/testing/simapp/simd/cmd/genaccounts.go b/testing/simapp/simd/cmd/genaccounts.go
new file mode 100644
index 00000000..57de144c
--- /dev/null
+++ b/testing/simapp/simd/cmd/genaccounts.go
@@ -0,0 +1,181 @@
+package cmd
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/crypto/keyring"
+ "github.com/cosmos/cosmos-sdk/server"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ authvesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+)
+
+const (
+ flagVestingStart = "vesting-start-time"
+ flagVestingEnd = "vesting-end-time"
+ flagVestingAmt = "vesting-amount"
+)
+
+// AddGenesisAccountCmd returns add-genesis-account cobra Command.
+func AddGenesisAccountCmd(defaultNodeHome string) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "add-genesis-account [address_or_key_name] [coin][,[coin]]",
+ Short: "Add a genesis account to genesis.json",
+ Long: `Add a genesis account to genesis.json. The provided account must specify
+the account address or key name and a list of initial coins. If a key name is given,
+the address will be looked up in the local Keybase. The list of initial tokens must
+contain valid denominations. Accounts may optionally be supplied with vesting parameters.
+`,
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx := client.GetClientContextFromCmd(cmd)
+ depCdc := clientCtx.JSONMarshaler
+ cdc := depCdc.(codec.Marshaler)
+
+ serverCtx := server.GetServerContextFromCmd(cmd)
+ config := serverCtx.Config
+
+ config.SetRoot(clientCtx.HomeDir)
+
+ addr, err := sdk.AccAddressFromBech32(args[0])
+ if err != nil {
+ inBuf := bufio.NewReader(cmd.InOrStdin())
+ keyringBackend, _ := cmd.Flags().GetString(flags.FlagKeyringBackend)
+
+ // attempt to lookup address from Keybase if no address was provided
+ kb, err := keyring.New(sdk.KeyringServiceName(), keyringBackend, clientCtx.HomeDir, inBuf)
+ if err != nil {
+ return err
+ }
+
+ info, err := kb.Key(args[0])
+ if err != nil {
+ return fmt.Errorf("failed to get address from Keybase: %w", err)
+ }
+
+ addr = info.GetAddress()
+ }
+
+ coins, err := sdk.ParseCoinsNormalized(args[1])
+ if err != nil {
+ return fmt.Errorf("failed to parse coins: %w", err)
+ }
+
+ vestingStart, _ := cmd.Flags().GetInt64(flagVestingStart)
+ vestingEnd, _ := cmd.Flags().GetInt64(flagVestingEnd)
+ vestingAmtStr, _ := cmd.Flags().GetString(flagVestingAmt)
+
+ vestingAmt, err := sdk.ParseCoinsNormalized(vestingAmtStr)
+ if err != nil {
+ return fmt.Errorf("failed to parse vesting amount: %w", err)
+ }
+
+ // create concrete account type based on input parameters
+ var genAccount authtypes.GenesisAccount
+
+ balances := banktypes.Balance{Address: addr.String(), Coins: coins.Sort()}
+ baseAccount := authtypes.NewBaseAccount(addr, nil, 0, 0)
+
+ if !vestingAmt.IsZero() {
+ baseVestingAccount := authvesting.NewBaseVestingAccount(baseAccount, vestingAmt.Sort(), vestingEnd)
+
+ if (balances.Coins.IsZero() && !baseVestingAccount.OriginalVesting.IsZero()) ||
+ baseVestingAccount.OriginalVesting.IsAnyGT(balances.Coins) {
+ return errors.New("vesting amount cannot be greater than total amount")
+ }
+
+ switch {
+ case vestingStart != 0 && vestingEnd != 0:
+ genAccount = authvesting.NewContinuousVestingAccountRaw(baseVestingAccount, vestingStart)
+
+ case vestingEnd != 0:
+ genAccount = authvesting.NewDelayedVestingAccountRaw(baseVestingAccount)
+
+ default:
+ return errors.New("invalid vesting parameters; must supply start and end time or end time")
+ }
+ } else {
+ genAccount = baseAccount
+ }
+
+ if err := genAccount.Validate(); err != nil {
+ return fmt.Errorf("failed to validate new genesis account: %w", err)
+ }
+
+ genFile := config.GenesisFile()
+ appState, genDoc, err := genutiltypes.GenesisStateFromGenFile(genFile)
+ if err != nil {
+ return fmt.Errorf("failed to unmarshal genesis state: %w", err)
+ }
+
+ authGenState := authtypes.GetGenesisStateFromAppState(cdc, appState)
+
+ accs, err := authtypes.UnpackAccounts(authGenState.Accounts)
+ if err != nil {
+ return fmt.Errorf("failed to get accounts from any: %w", err)
+ }
+
+ if accs.Contains(addr) {
+ return fmt.Errorf("cannot add account at existing address %s", addr)
+ }
+
+ // Add the new account to the set of genesis accounts and sanitize the
+ // accounts afterwards.
+ accs = append(accs, genAccount)
+ accs = authtypes.SanitizeGenesisAccounts(accs)
+
+ genAccs, err := authtypes.PackAccounts(accs)
+ if err != nil {
+ return fmt.Errorf("failed to convert accounts into any's: %w", err)
+ }
+ authGenState.Accounts = genAccs
+
+ authGenStateBz, err := cdc.MarshalJSON(&authGenState)
+ if err != nil {
+ return fmt.Errorf("failed to marshal auth genesis state: %w", err)
+ }
+
+ appState[authtypes.ModuleName] = authGenStateBz
+
+ bankGenState := banktypes.GetGenesisStateFromAppState(depCdc, appState)
+ bankGenState.Balances = append(bankGenState.Balances, balances)
+ bankGenState.Balances = banktypes.SanitizeGenesisBalances(bankGenState.Balances)
+ bankGenState.Supply = bankGenState.Supply.Add(balances.Coins...)
+
+ bankGenStateBz, err := cdc.MarshalJSON(bankGenState)
+ if err != nil {
+ return fmt.Errorf("failed to marshal bank genesis state: %w", err)
+ }
+
+ appState[banktypes.ModuleName] = bankGenStateBz
+
+ appStateJSON, err := json.Marshal(appState)
+ if err != nil {
+ return fmt.Errorf("failed to marshal application genesis state: %w", err)
+ }
+
+ genDoc.AppState = appStateJSON
+ return genutil.ExportGenesisFile(genDoc, genFile)
+ },
+ }
+
+ cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The application home directory")
+ cmd.Flags().String(flags.FlagKeyringBackend, flags.DefaultKeyringBackend, "Select keyring's backend (os|file|kwallet|pass|test)")
+ cmd.Flags().String(flagVestingAmt, "", "amount of coins for vesting accounts")
+ cmd.Flags().Int64(flagVestingStart, 0, "schedule start time (unix epoch) for vesting accounts")
+ cmd.Flags().Int64(flagVestingEnd, 0, "schedule end time (unix epoch) for vesting accounts")
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
diff --git a/testing/simapp/simd/cmd/genaccounts_test.go b/testing/simapp/simd/cmd/genaccounts_test.go
new file mode 100644
index 00000000..d1265b1f
--- /dev/null
+++ b/testing/simapp/simd/cmd/genaccounts_test.go
@@ -0,0 +1,85 @@
+package cmd_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/spf13/viper"
+ "github.com/stretchr/testify/require"
+ "github.com/tendermint/tendermint/libs/log"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/server"
+ "github.com/cosmos/cosmos-sdk/testutil/testdata"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltest "github.com/cosmos/cosmos-sdk/x/genutil/client/testutil"
+ "github.com/cosmos/ibc-go/testing/simapp"
+ simcmd "github.com/cosmos/ibc-go/testing/simapp/simd/cmd"
+)
+
+var testMbm = module.NewBasicManager(genutil.AppModuleBasic{})
+
+func TestAddGenesisAccountCmd(t *testing.T) {
+ _, _, addr1 := testdata.KeyTestPubAddr()
+ tests := []struct {
+ name string
+ addr string
+ denom string
+ expectErr bool
+ }{
+ {
+ name: "invalid address",
+ addr: "",
+ denom: "1000atom",
+ expectErr: true,
+ },
+ {
+ name: "valid address",
+ addr: addr1.String(),
+ denom: "1000atom",
+ expectErr: false,
+ },
+ {
+ name: "multiple denoms",
+ addr: addr1.String(),
+ denom: "1000atom, 2000stake",
+ expectErr: false,
+ },
+ }
+
+ for _, tc := range tests {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ home := t.TempDir()
+ logger := log.NewNopLogger()
+ cfg, err := genutiltest.CreateDefaultTendermintConfig(home)
+ require.NoError(t, err)
+
+ appCodec := simapp.MakeTestEncodingConfig().Marshaler
+ err = genutiltest.ExecInitCmd(testMbm, home, appCodec)
+ require.NoError(t, err)
+
+ serverCtx := server.NewContext(viper.New(), cfg, logger)
+ clientCtx := client.Context{}.WithJSONMarshaler(appCodec).WithHomeDir(home)
+
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, client.ClientContextKey, &clientCtx)
+ ctx = context.WithValue(ctx, server.ServerContextKey, serverCtx)
+
+ cmd := simcmd.AddGenesisAccountCmd(home)
+ cmd.SetArgs([]string{
+ tc.addr,
+ tc.denom,
+ fmt.Sprintf("--%s=home", flags.FlagHome)})
+
+ if tc.expectErr {
+ require.Error(t, cmd.ExecuteContext(ctx))
+ } else {
+ require.NoError(t, cmd.ExecuteContext(ctx))
+ }
+ })
+ }
+}
diff --git a/testing/simapp/simd/cmd/root.go b/testing/simapp/simd/cmd/root.go
new file mode 100644
index 00000000..af9d3c1b
--- /dev/null
+++ b/testing/simapp/simd/cmd/root.go
@@ -0,0 +1,230 @@
+package cmd
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cast"
+ "github.com/spf13/cobra"
+ tmcli "github.com/tendermint/tendermint/libs/cli"
+ "github.com/tendermint/tendermint/libs/log"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/debug"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/keys"
+ "github.com/cosmos/cosmos-sdk/client/rpc"
+ "github.com/cosmos/cosmos-sdk/server"
+ servertypes "github.com/cosmos/cosmos-sdk/server/types"
+ "github.com/cosmos/cosmos-sdk/snapshots"
+ "github.com/cosmos/cosmos-sdk/store"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authclient "github.com/cosmos/cosmos-sdk/x/auth/client"
+ authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
+ "github.com/cosmos/cosmos-sdk/x/auth/types"
+ vestingcli "github.com/cosmos/cosmos-sdk/x/auth/vesting/client/cli"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "github.com/cosmos/cosmos-sdk/x/crisis"
+ genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
+ "github.com/cosmos/ibc-go/testing/simapp"
+ "github.com/cosmos/ibc-go/testing/simapp/params"
+)
+
+// NewRootCmd creates a new root command for simd. It is called once in the
+// main function.
+func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
+ encodingConfig := simapp.MakeTestEncodingConfig()
+ initClientCtx := client.Context{}.
+ WithJSONMarshaler(encodingConfig.Marshaler).
+ WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
+ WithTxConfig(encodingConfig.TxConfig).
+ WithLegacyAmino(encodingConfig.Amino).
+ WithInput(os.Stdin).
+ WithAccountRetriever(types.AccountRetriever{}).
+ WithBroadcastMode(flags.BroadcastBlock).
+ WithHomeDir(simapp.DefaultNodeHome)
+
+ rootCmd := &cobra.Command{
+ Use: "simd",
+ Short: "simulation app",
+ PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
+ if err := client.SetCmdClientContextHandler(initClientCtx, cmd); err != nil {
+ return err
+ }
+
+ return server.InterceptConfigsPreRunHandler(cmd)
+ },
+ }
+
+ initRootCmd(rootCmd, encodingConfig)
+
+ return rootCmd, encodingConfig
+}
+
+func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) {
+ authclient.Codec = encodingConfig.Marshaler
+
+ rootCmd.AddCommand(
+ genutilcli.InitCmd(simapp.ModuleBasics, simapp.DefaultNodeHome),
+ genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome),
+ genutilcli.MigrateGenesisCmd(),
+ genutilcli.GenTxCmd(simapp.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome),
+ genutilcli.ValidateGenesisCmd(simapp.ModuleBasics),
+ AddGenesisAccountCmd(simapp.DefaultNodeHome),
+ tmcli.NewCompletionCmd(rootCmd, true),
+ testnetCmd(simapp.ModuleBasics, banktypes.GenesisBalancesIterator{}),
+ debug.Cmd(),
+ )
+
+ a := appCreator{encodingConfig}
+ server.AddCommands(rootCmd, simapp.DefaultNodeHome, a.newApp, a.appExport, addModuleInitFlags)
+
+ // add keybase, auxiliary RPC, query, and tx child commands
+ rootCmd.AddCommand(
+ rpc.StatusCommand(),
+ queryCommand(),
+ txCommand(),
+ keys.Commands(simapp.DefaultNodeHome),
+ )
+
+ // add rosetta
+ rootCmd.AddCommand(server.RosettaCommand(encodingConfig.InterfaceRegistry, encodingConfig.Marshaler))
+}
+
+func addModuleInitFlags(startCmd *cobra.Command) {
+ crisis.AddModuleInitFlags(startCmd)
+}
+
+func queryCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "query",
+ Aliases: []string{"q"},
+ Short: "Querying subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ cmd.AddCommand(
+ authcmd.GetAccountCmd(),
+ rpc.ValidatorCommand(),
+ rpc.BlockCommand(),
+ authcmd.QueryTxsByEventsCmd(),
+ authcmd.QueryTxCmd(),
+ )
+
+ simapp.ModuleBasics.AddQueryCommands(cmd)
+ cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID")
+
+ return cmd
+}
+
+func txCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "tx",
+ Short: "Transactions subcommands",
+ DisableFlagParsing: true,
+ SuggestionsMinimumDistance: 2,
+ RunE: client.ValidateCmd,
+ }
+
+ cmd.AddCommand(
+ authcmd.GetSignCommand(),
+ authcmd.GetSignBatchCommand(),
+ authcmd.GetMultiSignCommand(),
+ authcmd.GetMultiSignBatchCmd(),
+ authcmd.GetValidateSignaturesCommand(),
+ flags.LineBreak,
+ authcmd.GetBroadcastCommand(),
+ authcmd.GetEncodeCommand(),
+ authcmd.GetDecodeCommand(),
+ flags.LineBreak,
+ vestingcli.GetTxCmd(),
+ )
+
+ simapp.ModuleBasics.AddTxCommands(cmd)
+ cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID")
+
+ return cmd
+}
+
+type appCreator struct {
+ encCfg params.EncodingConfig
+}
+
+// newApp is an AppCreator
+func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, appOpts servertypes.AppOptions) servertypes.Application {
+ var cache sdk.MultiStorePersistentCache
+
+ if cast.ToBool(appOpts.Get(server.FlagInterBlockCache)) {
+ cache = store.NewCommitKVStoreCacheManager()
+ }
+
+ skipUpgradeHeights := make(map[int64]bool)
+ for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) {
+ skipUpgradeHeights[int64(h)] = true
+ }
+
+ pruningOpts, err := server.GetPruningOptionsFromFlags(appOpts)
+ if err != nil {
+ panic(err)
+ }
+
+ snapshotDir := filepath.Join(cast.ToString(appOpts.Get(flags.FlagHome)), "data", "snapshots")
+ snapshotDB, err := sdk.NewLevelDB("metadata", snapshotDir)
+ if err != nil {
+ panic(err)
+ }
+ snapshotStore, err := snapshots.NewStore(snapshotDB, snapshotDir)
+ if err != nil {
+ panic(err)
+ }
+
+ return simapp.NewSimApp(
+ logger, db, traceStore, true, skipUpgradeHeights,
+ cast.ToString(appOpts.Get(flags.FlagHome)),
+ cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
+ a.encCfg,
+ appOpts,
+ baseapp.SetPruning(pruningOpts),
+ baseapp.SetMinGasPrices(cast.ToString(appOpts.Get(server.FlagMinGasPrices))),
+ baseapp.SetHaltHeight(cast.ToUint64(appOpts.Get(server.FlagHaltHeight))),
+ baseapp.SetHaltTime(cast.ToUint64(appOpts.Get(server.FlagHaltTime))),
+ baseapp.SetMinRetainBlocks(cast.ToUint64(appOpts.Get(server.FlagMinRetainBlocks))),
+ baseapp.SetInterBlockCache(cache),
+ baseapp.SetTrace(cast.ToBool(appOpts.Get(server.FlagTrace))),
+ baseapp.SetIndexEvents(cast.ToStringSlice(appOpts.Get(server.FlagIndexEvents))),
+ baseapp.SetSnapshotStore(snapshotStore),
+ baseapp.SetSnapshotInterval(cast.ToUint64(appOpts.Get(server.FlagStateSyncSnapshotInterval))),
+ baseapp.SetSnapshotKeepRecent(cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent))),
+ )
+}
+
+// appExport creates a new simapp (optionally at a given height)
+// and exports state.
+func (a appCreator) appExport(
+ logger log.Logger, db dbm.DB, traceStore io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string,
+ appOpts servertypes.AppOptions) (servertypes.ExportedApp, error) {
+
+ var simApp *simapp.SimApp
+ homePath, ok := appOpts.Get(flags.FlagHome).(string)
+ if !ok || homePath == "" {
+ return servertypes.ExportedApp{}, errors.New("application home not set")
+ }
+
+ if height != -1 {
+ simApp = simapp.NewSimApp(logger, db, traceStore, false, map[int64]bool{}, homePath, uint(1), a.encCfg, appOpts)
+
+ if err := simApp.LoadHeight(height); err != nil {
+ return servertypes.ExportedApp{}, err
+ }
+ } else {
+ simApp = simapp.NewSimApp(logger, db, traceStore, true, map[int64]bool{}, homePath, uint(1), a.encCfg, appOpts)
+ }
+
+ return simApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs)
+}
diff --git a/testing/simapp/simd/cmd/testnet.go b/testing/simapp/simd/cmd/testnet.go
new file mode 100644
index 00000000..0717b398
--- /dev/null
+++ b/testing/simapp/simd/cmd/testnet.go
@@ -0,0 +1,400 @@
+package cmd
+
+// DONTCOVER
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+
+ "github.com/spf13/cobra"
+ tmconfig "github.com/tendermint/tendermint/config"
+ tmos "github.com/tendermint/tendermint/libs/os"
+ tmrand "github.com/tendermint/tendermint/libs/rand"
+ "github.com/tendermint/tendermint/types"
+ tmtime "github.com/tendermint/tendermint/types/time"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/client/flags"
+ "github.com/cosmos/cosmos-sdk/client/tx"
+ "github.com/cosmos/cosmos-sdk/crypto/hd"
+ "github.com/cosmos/cosmos-sdk/crypto/keyring"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ "github.com/cosmos/cosmos-sdk/server"
+ srvconfig "github.com/cosmos/cosmos-sdk/server/config"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ "github.com/cosmos/cosmos-sdk/x/genutil"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+)
+
+var (
+ flagNodeDirPrefix = "node-dir-prefix"
+ flagNumValidators = "v"
+ flagOutputDir = "output-dir"
+ flagNodeDaemonHome = "node-daemon-home"
+ flagStartingIPAddress = "starting-ip-address"
+)
+
+// get cmd to initialize all files for tendermint testnet and application
+func testnetCmd(mbm module.BasicManager, genBalIterator banktypes.GenesisBalancesIterator) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "testnet",
+ Short: "Initialize files for a simapp testnet",
+ Long: `testnet will create "v" number of directories and populate each with
+necessary files (private validator, genesis, config, etc.).
+
+Note, strict routability for addresses is turned off in the config file.
+
+Example:
+ simd testnet --v 4 --output-dir ./output --starting-ip-address 192.168.10.2
+ `,
+ RunE: func(cmd *cobra.Command, _ []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+
+ serverCtx := server.GetServerContextFromCmd(cmd)
+ config := serverCtx.Config
+
+ outputDir, _ := cmd.Flags().GetString(flagOutputDir)
+ keyringBackend, _ := cmd.Flags().GetString(flags.FlagKeyringBackend)
+ chainID, _ := cmd.Flags().GetString(flags.FlagChainID)
+ minGasPrices, _ := cmd.Flags().GetString(server.FlagMinGasPrices)
+ nodeDirPrefix, _ := cmd.Flags().GetString(flagNodeDirPrefix)
+ nodeDaemonHome, _ := cmd.Flags().GetString(flagNodeDaemonHome)
+ startingIPAddress, _ := cmd.Flags().GetString(flagStartingIPAddress)
+ numValidators, _ := cmd.Flags().GetInt(flagNumValidators)
+ algo, _ := cmd.Flags().GetString(flags.FlagKeyAlgorithm)
+
+ return InitTestnet(
+ clientCtx, cmd, config, mbm, genBalIterator, outputDir, chainID, minGasPrices,
+ nodeDirPrefix, nodeDaemonHome, startingIPAddress, keyringBackend, algo, numValidators,
+ )
+ },
+ }
+
+ cmd.Flags().Int(flagNumValidators, 4, "Number of validators to initialize the testnet with")
+ cmd.Flags().StringP(flagOutputDir, "o", "./mytestnet", "Directory to store initialization data for the testnet")
+ cmd.Flags().String(flagNodeDirPrefix, "node", "Prefix the directory name for each node with (node results in node0, node1, ...)")
+ cmd.Flags().String(flagNodeDaemonHome, "simd", "Home directory of the node's daemon configuration")
+ cmd.Flags().String(flagStartingIPAddress, "192.168.0.1", "Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:46656, ID1@192.168.0.2:46656, ...)")
+ cmd.Flags().String(flags.FlagChainID, "", "genesis file chain-id, if left blank will be randomly created")
+ cmd.Flags().String(server.FlagMinGasPrices, fmt.Sprintf("0.000006%s", sdk.DefaultBondDenom), "Minimum gas prices to accept for transactions; All fees in a tx must meet this minimum (e.g. 0.01photino,0.001stake)")
+ cmd.Flags().String(flags.FlagKeyringBackend, flags.DefaultKeyringBackend, "Select keyring's backend (os|file|test)")
+ cmd.Flags().String(flags.FlagKeyAlgorithm, string(hd.Secp256k1Type), "Key signing algorithm to generate keys for")
+
+ return cmd
+}
+
+const nodeDirPerm = 0755
+
+// Initialize the testnet
+func InitTestnet(
+ clientCtx client.Context,
+ cmd *cobra.Command,
+ nodeConfig *tmconfig.Config,
+ mbm module.BasicManager,
+ genBalIterator banktypes.GenesisBalancesIterator,
+ outputDir,
+ chainID,
+ minGasPrices,
+ nodeDirPrefix,
+ nodeDaemonHome,
+ startingIPAddress,
+ keyringBackend,
+ algoStr string,
+ numValidators int,
+) error {
+
+ if chainID == "" {
+ chainID = "chain-" + tmrand.NewRand().Str(6)
+ }
+
+ nodeIDs := make([]string, numValidators)
+ valPubKeys := make([]cryptotypes.PubKey, numValidators)
+
+ simappConfig := srvconfig.DefaultConfig()
+ simappConfig.MinGasPrices = minGasPrices
+ simappConfig.API.Enable = true
+ simappConfig.Telemetry.Enabled = true
+ simappConfig.Telemetry.PrometheusRetentionTime = 60
+ simappConfig.Telemetry.EnableHostnameLabel = false
+ simappConfig.Telemetry.GlobalLabels = [][]string{{"chain_id", chainID}}
+
+ var (
+ genAccounts []authtypes.GenesisAccount
+ genBalances []banktypes.Balance
+ genFiles []string
+ )
+
+ inBuf := bufio.NewReader(cmd.InOrStdin())
+ // generate private keys, node IDs, and initial transactions
+ for i := 0; i < numValidators; i++ {
+ nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
+ nodeDir := filepath.Join(outputDir, nodeDirName, nodeDaemonHome)
+ gentxsDir := filepath.Join(outputDir, "gentxs")
+
+ nodeConfig.SetRoot(nodeDir)
+ nodeConfig.RPC.ListenAddress = "tcp://0.0.0.0:26657"
+
+ if err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm); err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
+ }
+
+ nodeConfig.Moniker = nodeDirName
+
+ ip, err := getIP(i, startingIPAddress)
+ if err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
+ }
+
+ nodeIDs[i], valPubKeys[i], err = genutil.InitializeNodeValidatorFiles(nodeConfig)
+ if err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
+ }
+
+ memo := fmt.Sprintf("%s@%s:26656", nodeIDs[i], ip)
+ genFiles = append(genFiles, nodeConfig.GenesisFile())
+
+ kb, err := keyring.New(sdk.KeyringServiceName(), keyringBackend, nodeDir, inBuf)
+ if err != nil {
+ return err
+ }
+
+ keyringAlgos, _ := kb.SupportedAlgorithms()
+ algo, err := keyring.NewSigningAlgoFromString(algoStr, keyringAlgos)
+ if err != nil {
+ return err
+ }
+
+ addr, secret, err := server.GenerateSaveCoinKey(kb, nodeDirName, true, algo)
+ if err != nil {
+ _ = os.RemoveAll(outputDir)
+ return err
+ }
+
+ info := map[string]string{"secret": secret}
+
+ cliPrint, err := json.Marshal(info)
+ if err != nil {
+ return err
+ }
+
+ // save private key seed words
+ if err := writeFile(fmt.Sprintf("%v.json", "key_seed"), nodeDir, cliPrint); err != nil {
+ return err
+ }
+
+ accTokens := sdk.TokensFromConsensusPower(1000)
+ accStakingTokens := sdk.TokensFromConsensusPower(500)
+ coins := sdk.Coins{
+ sdk.NewCoin(fmt.Sprintf("%stoken", nodeDirName), accTokens),
+ sdk.NewCoin(sdk.DefaultBondDenom, accStakingTokens),
+ }
+
+ genBalances = append(genBalances, banktypes.Balance{Address: addr.String(), Coins: coins.Sort()})
+ genAccounts = append(genAccounts, authtypes.NewBaseAccount(addr, nil, 0, 0))
+
+ valTokens := sdk.TokensFromConsensusPower(100)
+ createValMsg, err := stakingtypes.NewMsgCreateValidator(
+ sdk.ValAddress(addr),
+ valPubKeys[i],
+ sdk.NewCoin(sdk.DefaultBondDenom, valTokens),
+ stakingtypes.NewDescription(nodeDirName, "", "", "", ""),
+ stakingtypes.NewCommissionRates(sdk.OneDec(), sdk.OneDec(), sdk.OneDec()),
+ sdk.OneInt(),
+ )
+ if err != nil {
+ return err
+ }
+
+ txBuilder := clientCtx.TxConfig.NewTxBuilder()
+ if err := txBuilder.SetMsgs(createValMsg); err != nil {
+ return err
+ }
+
+ txBuilder.SetMemo(memo)
+
+ txFactory := tx.Factory{}
+ txFactory = txFactory.
+ WithChainID(chainID).
+ WithMemo(memo).
+ WithKeybase(kb).
+ WithTxConfig(clientCtx.TxConfig)
+
+ if err := tx.Sign(txFactory, nodeDirName, txBuilder, true); err != nil {
+ return err
+ }
+
+ txBz, err := clientCtx.TxConfig.TxJSONEncoder()(txBuilder.GetTx())
+ if err != nil {
+ return err
+ }
+
+ if err := writeFile(fmt.Sprintf("%v.json", nodeDirName), gentxsDir, txBz); err != nil {
+ return err
+ }
+
+ srvconfig.WriteConfigFile(filepath.Join(nodeDir, "config/app.toml"), simappConfig)
+ }
+
+ if err := initGenFiles(clientCtx, mbm, chainID, genAccounts, genBalances, genFiles, numValidators); err != nil {
+ return err
+ }
+
+ err := collectGenFiles(
+ clientCtx, nodeConfig, chainID, nodeIDs, valPubKeys, numValidators,
+ outputDir, nodeDirPrefix, nodeDaemonHome, genBalIterator,
+ )
+ if err != nil {
+ return err
+ }
+
+ cmd.PrintErrf("Successfully initialized %d node directories\n", numValidators)
+ return nil
+}
+
+func initGenFiles(
+ clientCtx client.Context, mbm module.BasicManager, chainID string,
+ genAccounts []authtypes.GenesisAccount, genBalances []banktypes.Balance,
+ genFiles []string, numValidators int,
+) error {
+
+ appGenState := mbm.DefaultGenesis(clientCtx.JSONMarshaler)
+
+ // set the accounts in the genesis state
+ var authGenState authtypes.GenesisState
+ clientCtx.JSONMarshaler.MustUnmarshalJSON(appGenState[authtypes.ModuleName], &authGenState)
+
+ accounts, err := authtypes.PackAccounts(genAccounts)
+ if err != nil {
+ return err
+ }
+
+ authGenState.Accounts = accounts
+ appGenState[authtypes.ModuleName] = clientCtx.JSONMarshaler.MustMarshalJSON(&authGenState)
+
+ // set the balances in the genesis state
+ var bankGenState banktypes.GenesisState
+ clientCtx.JSONMarshaler.MustUnmarshalJSON(appGenState[banktypes.ModuleName], &bankGenState)
+
+ bankGenState.Balances = genBalances
+ appGenState[banktypes.ModuleName] = clientCtx.JSONMarshaler.MustMarshalJSON(&bankGenState)
+
+ appGenStateJSON, err := json.MarshalIndent(appGenState, "", " ")
+ if err != nil {
+ return err
+ }
+
+ genDoc := types.GenesisDoc{
+ ChainID: chainID,
+ AppState: appGenStateJSON,
+ Validators: nil,
+ }
+
+ // generate empty genesis files for each validator and save
+ for i := 0; i < numValidators; i++ {
+ if err := genDoc.SaveAs(genFiles[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func collectGenFiles(
+ clientCtx client.Context, nodeConfig *tmconfig.Config, chainID string,
+ nodeIDs []string, valPubKeys []cryptotypes.PubKey, numValidators int,
+ outputDir, nodeDirPrefix, nodeDaemonHome string, genBalIterator banktypes.GenesisBalancesIterator,
+) error {
+
+ var appState json.RawMessage
+ genTime := tmtime.Now()
+
+ for i := 0; i < numValidators; i++ {
+ nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
+ nodeDir := filepath.Join(outputDir, nodeDirName, nodeDaemonHome)
+ gentxsDir := filepath.Join(outputDir, "gentxs")
+ nodeConfig.Moniker = nodeDirName
+
+ nodeConfig.SetRoot(nodeDir)
+
+ nodeID, valPubKey := nodeIDs[i], valPubKeys[i]
+ initCfg := genutiltypes.NewInitConfig(chainID, gentxsDir, nodeID, valPubKey)
+
+ genDoc, err := types.GenesisDocFromFile(nodeConfig.GenesisFile())
+ if err != nil {
+ return err
+ }
+
+ nodeAppState, err := genutil.GenAppStateFromConfig(clientCtx.JSONMarshaler, clientCtx.TxConfig, nodeConfig, initCfg, *genDoc, genBalIterator)
+ if err != nil {
+ return err
+ }
+
+ if appState == nil {
+ // set the canonical application state (they should not differ)
+ appState = nodeAppState
+ }
+
+ genFile := nodeConfig.GenesisFile()
+
+ // overwrite each validator's genesis file to have a canonical genesis time
+ if err := genutil.ExportGenesisFileWithTime(genFile, chainID, nil, appState, genTime); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func getIP(i int, startingIPAddr string) (ip string, err error) {
+ if len(startingIPAddr) == 0 {
+ ip, err = server.ExternalIP()
+ if err != nil {
+ return "", err
+ }
+ return ip, nil
+ }
+ return calculateIP(startingIPAddr, i)
+}
+
+func calculateIP(ip string, i int) (string, error) {
+ ipv4 := net.ParseIP(ip).To4()
+ if ipv4 == nil {
+ return "", fmt.Errorf("%v: non ipv4 address", ip)
+ }
+
+ for j := 0; j < i; j++ {
+ ipv4[3]++
+ }
+
+ return ipv4.String(), nil
+}
+
+func writeFile(name string, dir string, contents []byte) error {
+ writePath := filepath.Join(dir)
+ file := filepath.Join(writePath, name)
+
+ err := tmos.EnsureDir(writePath, 0755)
+ if err != nil {
+ return err
+ }
+
+ err = tmos.WriteFile(file, contents, 0644)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/testing/simapp/simd/main.go b/testing/simapp/simd/main.go
new file mode 100644
index 00000000..a389f795
--- /dev/null
+++ b/testing/simapp/simd/main.go
@@ -0,0 +1,24 @@
+package main
+
+import (
+ "os"
+
+ "github.com/cosmos/cosmos-sdk/server"
+ svrcmd "github.com/cosmos/cosmos-sdk/server/cmd"
+ "github.com/cosmos/ibc-go/testing/simapp"
+ "github.com/cosmos/ibc-go/testing/simapp/simd/cmd"
+)
+
+func main() {
+ rootCmd, _ := cmd.NewRootCmd()
+
+ if err := svrcmd.Execute(rootCmd, simapp.DefaultNodeHome); err != nil {
+ switch e := err.(type) {
+ case server.ErrorCode:
+ os.Exit(e.Code)
+
+ default:
+ os.Exit(1)
+ }
+ }
+}
diff --git a/testing/simapp/state.go b/testing/simapp/state.go
new file mode 100644
index 00000000..6b52fb45
--- /dev/null
+++ b/testing/simapp/state.go
@@ -0,0 +1,233 @@
+package simapp
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "time"
+
+ tmjson "github.com/tendermint/tendermint/libs/json"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ simappparams "github.com/cosmos/ibc-go/testing/simapp/params"
+)
+
+// AppStateFn returns the initial application state using a genesis or the simulation parameters.
+// It panics if the user provides files for both of them.
+// If a file is not given for the genesis or the sim params, it creates a randomized one.
+func AppStateFn(cdc codec.JSONMarshaler, simManager *module.SimulationManager) simtypes.AppStateFn {
+ return func(r *rand.Rand, accs []simtypes.Account, config simtypes.Config,
+ ) (appState json.RawMessage, simAccs []simtypes.Account, chainID string, genesisTimestamp time.Time) {
+
+ if FlagGenesisTimeValue == 0 {
+ genesisTimestamp = simtypes.RandTimestamp(r)
+ } else {
+ genesisTimestamp = time.Unix(FlagGenesisTimeValue, 0)
+ }
+
+ chainID = config.ChainID
+ switch {
+ case config.ParamsFile != "" && config.GenesisFile != "":
+ panic("cannot provide both a genesis file and a params file")
+
+ case config.GenesisFile != "":
+ // override the default chain-id from simapp to set it later to the config
+ genesisDoc, accounts := AppStateFromGenesisFileFn(r, cdc, config.GenesisFile)
+
+ if FlagGenesisTimeValue == 0 {
+ // use genesis timestamp if no custom timestamp is provided (i.e no random timestamp)
+ genesisTimestamp = genesisDoc.GenesisTime
+ }
+
+ appState = genesisDoc.AppState
+ chainID = genesisDoc.ChainID
+ simAccs = accounts
+
+ case config.ParamsFile != "":
+ appParams := make(simtypes.AppParams)
+ bz, err := ioutil.ReadFile(config.ParamsFile)
+ if err != nil {
+ panic(err)
+ }
+
+ err = json.Unmarshal(bz, &appParams)
+ if err != nil {
+ panic(err)
+ }
+ appState, simAccs = AppStateRandomizedFn(simManager, r, cdc, accs, genesisTimestamp, appParams)
+
+ default:
+ appParams := make(simtypes.AppParams)
+ appState, simAccs = AppStateRandomizedFn(simManager, r, cdc, accs, genesisTimestamp, appParams)
+ }
+
+ rawState := make(map[string]json.RawMessage)
+ err := json.Unmarshal(appState, &rawState)
+ if err != nil {
+ panic(err)
+ }
+
+ stakingStateBz, ok := rawState[stakingtypes.ModuleName]
+ if !ok {
+ panic("staking genesis state is missing")
+ }
+
+ stakingState := new(stakingtypes.GenesisState)
+ err = cdc.UnmarshalJSON(stakingStateBz, stakingState)
+ if err != nil {
+ panic(err)
+ }
+ // compute not bonded balance
+ notBondedTokens := sdk.ZeroInt()
+ for _, val := range stakingState.Validators {
+ if val.Status != stakingtypes.Unbonded {
+ continue
+ }
+ notBondedTokens = notBondedTokens.Add(val.GetTokens())
+ }
+ notBondedCoins := sdk.NewCoin(stakingState.Params.BondDenom, notBondedTokens)
+ // edit bank state to make it have the not bonded pool tokens
+ bankStateBz, ok := rawState[banktypes.ModuleName]
+ // TODO(fdymylja/jonathan): should we panic in this case
+ if !ok {
+ panic("bank genesis state is missing")
+ }
+ bankState := new(banktypes.GenesisState)
+ err = cdc.UnmarshalJSON(bankStateBz, bankState)
+ if err != nil {
+ panic(err)
+ }
+
+ bankState.Balances = append(bankState.Balances, banktypes.Balance{
+ Address: authtypes.NewModuleAddress(stakingtypes.NotBondedPoolName).String(),
+ Coins: sdk.NewCoins(notBondedCoins),
+ })
+
+ // change appState back
+ rawState[stakingtypes.ModuleName] = cdc.MustMarshalJSON(stakingState)
+ rawState[banktypes.ModuleName] = cdc.MustMarshalJSON(bankState)
+
+ // replace appstate
+ appState, err = json.Marshal(rawState)
+ if err != nil {
+ panic(err)
+ }
+ return appState, simAccs, chainID, genesisTimestamp
+ }
+}
+
+// AppStateRandomizedFn creates calls each module's GenesisState generator function
+// and creates the simulation params
+func AppStateRandomizedFn(
+ simManager *module.SimulationManager, r *rand.Rand, cdc codec.JSONMarshaler,
+ accs []simtypes.Account, genesisTimestamp time.Time, appParams simtypes.AppParams,
+) (json.RawMessage, []simtypes.Account) {
+ numAccs := int64(len(accs))
+ genesisState := NewDefaultGenesisState(cdc)
+
+ // generate a random amount of initial stake coins and a random initial
+ // number of bonded accounts
+ var initialStake, numInitiallyBonded int64
+ appParams.GetOrGenerate(
+ cdc, simappparams.StakePerAccount, &initialStake, r,
+ func(r *rand.Rand) { initialStake = r.Int63n(1e12) },
+ )
+ appParams.GetOrGenerate(
+ cdc, simappparams.InitiallyBondedValidators, &numInitiallyBonded, r,
+ func(r *rand.Rand) { numInitiallyBonded = int64(r.Intn(300)) },
+ )
+
+ if numInitiallyBonded > numAccs {
+ numInitiallyBonded = numAccs
+ }
+
+ fmt.Printf(
+ `Selected randomly generated parameters for simulated genesis:
+{
+ stake_per_account: "%d",
+ initially_bonded_validators: "%d"
+}
+`, initialStake, numInitiallyBonded,
+ )
+
+ simState := &module.SimulationState{
+ AppParams: appParams,
+ Cdc: cdc,
+ Rand: r,
+ GenState: genesisState,
+ Accounts: accs,
+ InitialStake: initialStake,
+ NumBonded: numInitiallyBonded,
+ GenTimestamp: genesisTimestamp,
+ }
+
+ simManager.GenerateGenesisStates(simState)
+
+ appState, err := json.Marshal(genesisState)
+ if err != nil {
+ panic(err)
+ }
+
+ return appState, accs
+}
+
+// AppStateFromGenesisFileFn util function to generate the genesis AppState
+// from a genesis.json file.
+func AppStateFromGenesisFileFn(r io.Reader, cdc codec.JSONMarshaler, genesisFile string) (tmtypes.GenesisDoc, []simtypes.Account) {
+ bytes, err := ioutil.ReadFile(genesisFile)
+ if err != nil {
+ panic(err)
+ }
+
+ var genesis tmtypes.GenesisDoc
+ // NOTE: Tendermint uses a custom JSON decoder for GenesisDoc
+ err = tmjson.Unmarshal(bytes, &genesis)
+ if err != nil {
+ panic(err)
+ }
+
+ var appState GenesisState
+ err = json.Unmarshal(genesis.AppState, &appState)
+ if err != nil {
+ panic(err)
+ }
+
+ var authGenesis authtypes.GenesisState
+ if appState[authtypes.ModuleName] != nil {
+ cdc.MustUnmarshalJSON(appState[authtypes.ModuleName], &authGenesis)
+ }
+
+ newAccs := make([]simtypes.Account, len(authGenesis.Accounts))
+ for i, acc := range authGenesis.Accounts {
+ // Pick a random private key, since we don't know the actual key
+ // This should be fine as it's only used for mock Tendermint validators
+ // and these keys are never actually used to sign by mock Tendermint.
+ privkeySeed := make([]byte, 15)
+ if _, err := r.Read(privkeySeed); err != nil {
+ panic(err)
+ }
+
+ privKey := secp256k1.GenPrivKeyFromSecret(privkeySeed)
+
+ a, ok := acc.GetCachedValue().(authtypes.AccountI)
+ if !ok {
+ panic("expected account")
+ }
+
+ // create simulator accounts
+ simAcc := simtypes.Account{PrivKey: privKey, PubKey: privKey.PubKey(), Address: a.GetAddress()}
+ newAccs[i] = simAcc
+ }
+
+ return genesis, newAccs
+}
diff --git a/testing/simapp/test_helpers.go b/testing/simapp/test_helpers.go
new file mode 100644
index 00000000..69af7433
--- /dev/null
+++ b/testing/simapp/test_helpers.go
@@ -0,0 +1,448 @@
+package simapp
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/libs/log"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+ dbm "github.com/tendermint/tm-db"
+
+ bam "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
+ "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/errors"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/cosmos/ibc-go/testing/simapp/helpers"
+)
+
+// DefaultConsensusParams defines the default Tendermint consensus params used in
+// SimApp testing.
+var DefaultConsensusParams = &abci.ConsensusParams{
+ Block: &abci.BlockParams{
+ MaxBytes: 200000,
+ MaxGas: 2000000,
+ },
+ Evidence: &tmproto.EvidenceParams{
+ MaxAgeNumBlocks: 302400,
+ MaxAgeDuration: 504 * time.Hour, // 3 weeks is the max duration
+ MaxBytes: 10000,
+ },
+ Validator: &tmproto.ValidatorParams{
+ PubKeyTypes: []string{
+ tmtypes.ABCIPubKeyTypeEd25519,
+ },
+ },
+}
+
+func setup(withGenesis bool, invCheckPeriod uint) (*SimApp, GenesisState) {
+ db := dbm.NewMemDB()
+ encCdc := MakeTestEncodingConfig()
+ app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, invCheckPeriod, encCdc, EmptyAppOptions{})
+ if withGenesis {
+ return app, NewDefaultGenesisState(encCdc.Marshaler)
+ }
+ return app, GenesisState{}
+}
+
+// Setup initializes a new SimApp. A Nop logger is set in SimApp.
+func Setup(isCheckTx bool) *SimApp {
+ app, genesisState := setup(!isCheckTx, 5)
+ if !isCheckTx {
+ // init chain must be called to stop deliverState from being nil
+ stateBytes, err := json.MarshalIndent(genesisState, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ // Initialize the chain
+ app.InitChain(
+ abci.RequestInitChain{
+ Validators: []abci.ValidatorUpdate{},
+ ConsensusParams: DefaultConsensusParams,
+ AppStateBytes: stateBytes,
+ },
+ )
+ }
+
+ return app
+}
+
+// SetupWithGenesisValSet initializes a new SimApp with a validator set and genesis accounts
+// that also act as delegators. For simplicity, each validator is bonded with a delegation
+// of one consensus engine unit (10^6) in the default token of the simapp from first genesis
+// account. A Nop logger is set in SimApp.
+func SetupWithGenesisValSet(t *testing.T, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) *SimApp {
+ app, genesisState := setup(true, 5)
+ // set genesis accounts
+ authGenesis := authtypes.NewGenesisState(authtypes.DefaultParams(), genAccs)
+ genesisState[authtypes.ModuleName] = app.AppCodec().MustMarshalJSON(authGenesis)
+
+ validators := make([]stakingtypes.Validator, 0, len(valSet.Validators))
+ delegations := make([]stakingtypes.Delegation, 0, len(valSet.Validators))
+
+ bondAmt := sdk.NewInt(1000000)
+
+ for _, val := range valSet.Validators {
+ pk, err := cryptocodec.FromTmPubKeyInterface(val.PubKey)
+ require.NoError(t, err)
+ pkAny, err := codectypes.NewAnyWithValue(pk)
+ require.NoError(t, err)
+ validator := stakingtypes.Validator{
+ OperatorAddress: sdk.ValAddress(val.Address).String(),
+ ConsensusPubkey: pkAny,
+ Jailed: false,
+ Status: stakingtypes.Bonded,
+ Tokens: bondAmt,
+ DelegatorShares: sdk.OneDec(),
+ Description: stakingtypes.Description{},
+ UnbondingHeight: int64(0),
+ UnbondingTime: time.Unix(0, 0).UTC(),
+ Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()),
+ MinSelfDelegation: sdk.ZeroInt(),
+ }
+ validators = append(validators, validator)
+ delegations = append(delegations, stakingtypes.NewDelegation(genAccs[0].GetAddress(), val.Address.Bytes(), sdk.OneDec()))
+
+ }
+ // set validators and delegations
+ stakingGenesis := stakingtypes.NewGenesisState(stakingtypes.DefaultParams(), validators, delegations)
+ genesisState[stakingtypes.ModuleName] = app.AppCodec().MustMarshalJSON(stakingGenesis)
+
+ totalSupply := sdk.NewCoins()
+ for _, b := range balances {
+ // add genesis acc tokens and delegated tokens to total supply
+ totalSupply = totalSupply.Add(b.Coins.Add(sdk.NewCoin(sdk.DefaultBondDenom, bondAmt))...)
+ }
+
+ // add bonded amount to bonded pool module account
+ balances = append(balances, banktypes.Balance{
+ Address: authtypes.NewModuleAddress(stakingtypes.BondedPoolName).String(),
+ Coins: sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, bondAmt)},
+ })
+
+ // update total supply
+ bankGenesis := banktypes.NewGenesisState(banktypes.DefaultGenesisState().Params, balances, totalSupply, []banktypes.Metadata{})
+ genesisState[banktypes.ModuleName] = app.AppCodec().MustMarshalJSON(bankGenesis)
+
+ stateBytes, err := json.MarshalIndent(genesisState, "", " ")
+ require.NoError(t, err)
+
+ // init chain will set the validator set and initialize the genesis accounts
+ app.InitChain(
+ abci.RequestInitChain{
+ Validators: []abci.ValidatorUpdate{},
+ ConsensusParams: DefaultConsensusParams,
+ AppStateBytes: stateBytes,
+ },
+ )
+
+ // commit genesis changes
+ app.Commit()
+ app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{
+ Height: app.LastBlockHeight() + 1,
+ AppHash: app.LastCommitID().Hash,
+ ValidatorsHash: valSet.Hash(),
+ NextValidatorsHash: valSet.Hash(),
+ }})
+
+ return app
+}
+
+// SetupWithGenesisAccounts initializes a new SimApp with the provided genesis
+// accounts and possible balances.
+func SetupWithGenesisAccounts(genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) *SimApp {
+ app, genesisState := setup(true, 0)
+ authGenesis := authtypes.NewGenesisState(authtypes.DefaultParams(), genAccs)
+ genesisState[authtypes.ModuleName] = app.AppCodec().MustMarshalJSON(authGenesis)
+
+ totalSupply := sdk.NewCoins()
+ for _, b := range balances {
+ totalSupply = totalSupply.Add(b.Coins...)
+ }
+
+ bankGenesis := banktypes.NewGenesisState(banktypes.DefaultGenesisState().Params, balances, totalSupply, []banktypes.Metadata{})
+ genesisState[banktypes.ModuleName] = app.AppCodec().MustMarshalJSON(bankGenesis)
+
+ stateBytes, err := json.MarshalIndent(genesisState, "", " ")
+ if err != nil {
+ panic(err)
+ }
+
+ app.InitChain(
+ abci.RequestInitChain{
+ Validators: []abci.ValidatorUpdate{},
+ ConsensusParams: DefaultConsensusParams,
+ AppStateBytes: stateBytes,
+ },
+ )
+
+ app.Commit()
+ app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: app.LastBlockHeight() + 1}})
+
+ return app
+}
+
+type GenerateAccountStrategy func(int) []sdk.AccAddress
+
+// createRandomAccounts is a strategy used by addTestAddrs() in order to generated addresses in random order.
+func createRandomAccounts(accNum int) []sdk.AccAddress {
+ testAddrs := make([]sdk.AccAddress, accNum)
+ for i := 0; i < accNum; i++ {
+ pk := ed25519.GenPrivKey().PubKey()
+ testAddrs[i] = sdk.AccAddress(pk.Address())
+ }
+
+ return testAddrs
+}
+
+// createIncrementalAccounts is a strategy used by addTestAddrs() in order to generated addresses in ascending order.
+func createIncrementalAccounts(accNum int) []sdk.AccAddress {
+ var addresses []sdk.AccAddress
+ var buffer bytes.Buffer
+
+ // start at 100 so we can make up to 999 test addresses with valid test addresses
+ for i := 100; i < (accNum + 100); i++ {
+ numString := strconv.Itoa(i)
+ buffer.WriteString("A58856F0FD53BF058B4909A21AEC019107BA6") // base address string
+
+ buffer.WriteString(numString) // adding on final two digits to make addresses unique
+ res, _ := sdk.AccAddressFromHex(buffer.String())
+ bech := res.String()
+ addr, _ := TestAddr(buffer.String(), bech)
+
+ addresses = append(addresses, addr)
+ buffer.Reset()
+ }
+
+ return addresses
+}
+
+// AddTestAddrsFromPubKeys adds the addresses into the SimApp providing only the public keys.
+func AddTestAddrsFromPubKeys(app *SimApp, ctx sdk.Context, pubKeys []cryptotypes.PubKey, accAmt sdk.Int) {
+ initCoins := sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), accAmt))
+
+ for _, pk := range pubKeys {
+ initAccountWithCoins(app, ctx, sdk.AccAddress(pk.Address()), initCoins)
+ }
+}
+
+// AddTestAddrs constructs and returns accNum amount of accounts with an
+// initial balance of accAmt in random order
+func AddTestAddrs(app *SimApp, ctx sdk.Context, accNum int, accAmt sdk.Int) []sdk.AccAddress {
+ return addTestAddrs(app, ctx, accNum, accAmt, createRandomAccounts)
+}
+
+// AddTestAddrs constructs and returns accNum amount of accounts with an
+// initial balance of accAmt in random order
+func AddTestAddrsIncremental(app *SimApp, ctx sdk.Context, accNum int, accAmt sdk.Int) []sdk.AccAddress {
+ return addTestAddrs(app, ctx, accNum, accAmt, createIncrementalAccounts)
+}
+
+func addTestAddrs(app *SimApp, ctx sdk.Context, accNum int, accAmt sdk.Int, strategy GenerateAccountStrategy) []sdk.AccAddress {
+ testAddrs := strategy(accNum)
+
+ initCoins := sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), accAmt))
+
+ for _, addr := range testAddrs {
+ initAccountWithCoins(app, ctx, addr, initCoins)
+ }
+
+ return testAddrs
+}
+
+func initAccountWithCoins(app *SimApp, ctx sdk.Context, addr sdk.AccAddress, coins sdk.Coins) {
+ err := app.BankKeeper.MintCoins(ctx, minttypes.ModuleName, coins)
+ if err != nil {
+ panic(err)
+ }
+
+ err = app.BankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, addr, coins)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// ConvertAddrsToValAddrs converts the provided addresses to ValAddress.
+func ConvertAddrsToValAddrs(addrs []sdk.AccAddress) []sdk.ValAddress {
+ valAddrs := make([]sdk.ValAddress, len(addrs))
+
+ for i, addr := range addrs {
+ valAddrs[i] = sdk.ValAddress(addr)
+ }
+
+ return valAddrs
+}
+
+func TestAddr(addr string, bech string) (sdk.AccAddress, error) {
+ res, err := sdk.AccAddressFromHex(addr)
+ if err != nil {
+ return nil, err
+ }
+ bechexpected := res.String()
+ if bech != bechexpected {
+ return nil, fmt.Errorf("bech encoding doesn't match reference")
+ }
+
+ bechres, err := sdk.AccAddressFromBech32(bech)
+ if err != nil {
+ return nil, err
+ }
+ if !bytes.Equal(bechres, res) {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+// CheckBalance checks the balance of an account.
+func CheckBalance(t *testing.T, app *SimApp, addr sdk.AccAddress, balances sdk.Coins) {
+ ctxCheck := app.BaseApp.NewContext(true, tmproto.Header{})
+ require.True(t, balances.IsEqual(app.BankKeeper.GetAllBalances(ctxCheck, addr)))
+}
+
+// SignCheckDeliver checks a generated signed transaction and simulates a
+// block commitment with the given transaction. A test assertion is made using
+// the parameter 'expPass' against the result. A corresponding result is
+// returned.
+func SignCheckDeliver(
+ t *testing.T, txCfg client.TxConfig, app *bam.BaseApp, header tmproto.Header, msgs []sdk.Msg,
+ chainID string, accNums, accSeqs []uint64, expSimPass, expPass bool, priv ...cryptotypes.PrivKey,
+) (sdk.GasInfo, *sdk.Result, error) {
+
+ tx, err := helpers.GenTx(
+ txCfg,
+ msgs,
+ sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 0)},
+ helpers.DefaultGenTxGas,
+ chainID,
+ accNums,
+ accSeqs,
+ priv...,
+ )
+ require.NoError(t, err)
+ txBytes, err := txCfg.TxEncoder()(tx)
+ require.Nil(t, err)
+
+ // Must simulate now as CheckTx doesn't run Msgs anymore
+ _, res, err := app.Simulate(txBytes)
+
+ if expSimPass {
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ } else {
+ require.Error(t, err)
+ require.Nil(t, res)
+ }
+
+ // Simulate a sending a transaction and committing a block
+ app.BeginBlock(abci.RequestBeginBlock{Header: header})
+ gInfo, res, err := app.Deliver(txCfg.TxEncoder(), tx)
+
+ if expPass {
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ } else {
+ require.Error(t, err)
+ require.Nil(t, res)
+ }
+
+ app.EndBlock(abci.RequestEndBlock{})
+ app.Commit()
+
+ return gInfo, res, err
+}
+
+// GenSequenceOfTxs generates a set of signed transactions of messages, such
+// that they differ only by having the sequence numbers incremented between
+// every transaction.
+func GenSequenceOfTxs(txGen client.TxConfig, msgs []sdk.Msg, accNums []uint64, initSeqNums []uint64, numToGenerate int, priv ...cryptotypes.PrivKey) ([]sdk.Tx, error) {
+ txs := make([]sdk.Tx, numToGenerate)
+ var err error
+ for i := 0; i < numToGenerate; i++ {
+ txs[i], err = helpers.GenTx(
+ txGen,
+ msgs,
+ sdk.Coins{sdk.NewInt64Coin(sdk.DefaultBondDenom, 0)},
+ helpers.DefaultGenTxGas,
+ "",
+ accNums,
+ initSeqNums,
+ priv...,
+ )
+ if err != nil {
+ break
+ }
+ incrementAllSequenceNumbers(initSeqNums)
+ }
+
+ return txs, err
+}
+
+func incrementAllSequenceNumbers(initSeqNums []uint64) {
+ for i := 0; i < len(initSeqNums); i++ {
+ initSeqNums[i]++
+ }
+}
+
+// CreateTestPubKeys returns a total of numPubKeys public keys in ascending order.
+func CreateTestPubKeys(numPubKeys int) []cryptotypes.PubKey {
+ var publicKeys []cryptotypes.PubKey
+ var buffer bytes.Buffer
+
+ // start at 10 to avoid changing 1 to 01, 2 to 02, etc
+ for i := 100; i < (numPubKeys + 100); i++ {
+ numString := strconv.Itoa(i)
+ buffer.WriteString("0B485CFC0EECC619440448436F8FC9DF40566F2369E72400281454CB552AF") // base pubkey string
+ buffer.WriteString(numString) // adding on final two digits to make pubkeys unique
+ publicKeys = append(publicKeys, NewPubKeyFromHex(buffer.String()))
+ buffer.Reset()
+ }
+
+ return publicKeys
+}
+
+// NewPubKeyFromHex returns a PubKey from a hex string.
+func NewPubKeyFromHex(pk string) (res cryptotypes.PubKey) {
+ pkBytes, err := hex.DecodeString(pk)
+ if err != nil {
+ panic(err)
+ }
+ if len(pkBytes) != ed25519.PubKeySize {
+ panic(errors.Wrap(errors.ErrInvalidPubKey, "invalid pubkey size"))
+ }
+ return &ed25519.PubKey{Key: pkBytes}
+}
+
+// EmptyAppOptions is a stub implementing AppOptions
+type EmptyAppOptions struct{}
+
+// Get implements AppOptions
+func (ao EmptyAppOptions) Get(o string) interface{} {
+ return nil
+}
+
+// FundAccount is a utility function that funds an account by minting and sending the coins to the address
+// TODO(fdymylja): instead of using the mint module account, which has the permission of minting, create a "faucet" account
+func FundAccount(app *SimApp, ctx sdk.Context, addr sdk.AccAddress, amounts sdk.Coins) error {
+ err := app.BankKeeper.MintCoins(ctx, minttypes.ModuleName, amounts)
+ if err != nil {
+ return err
+ }
+ return app.BankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, addr, amounts)
+}
diff --git a/testing/simapp/types.go b/testing/simapp/types.go
new file mode 100644
index 00000000..0e190af1
--- /dev/null
+++ b/testing/simapp/types.go
@@ -0,0 +1,44 @@
+package simapp
+
+import (
+ abci "github.com/tendermint/tendermint/abci/types"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/server/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
+)
+
+// App implements the common methods for a Cosmos SDK-based application
+// specific blockchain.
+type App interface {
+ // The assigned name of the app.
+ Name() string
+
+ // The application types codec.
+ // NOTE: This shoult be sealed before being returned.
+ LegacyAmino() *codec.LegacyAmino
+
+ // Application updates every begin block.
+ BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock
+
+ // Application updates every end block.
+ EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock
+
+ // Application update at chain (i.e app) initialization.
+ InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain
+
+ // Loads the app at a given height.
+ LoadHeight(height int64) error
+
+ // Exports the state of the application for a genesis file.
+ ExportAppStateAndValidators(
+ forZeroHeight bool, jailAllowedAddrs []string,
+ ) (types.ExportedApp, error)
+
+ // All the registered module account addreses.
+ ModuleAccountAddrs() map[string]bool
+
+ // Helper for the simulation framework.
+ SimulationManager() *module.SimulationManager
+}
diff --git a/testing/simapp/utils.go b/testing/simapp/utils.go
new file mode 100644
index 00000000..cac61e94
--- /dev/null
+++ b/testing/simapp/utils.go
@@ -0,0 +1,131 @@
+package simapp
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/tendermint/tendermint/libs/log"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
+ "github.com/cosmos/ibc-go/testing/simapp/helpers"
+)
+
+// SetupSimulation creates the config, db (levelDB), temporary directory and logger for
+// the simulation tests. If `FlagEnabledValue` is false it skips the current test.
+// Returns error on an invalid db intantiation or temp dir creation.
+func SetupSimulation(dirPrefix, dbName string) (simtypes.Config, dbm.DB, string, log.Logger, bool, error) {
+ if !FlagEnabledValue {
+ return simtypes.Config{}, nil, "", nil, true, nil
+ }
+
+ config := NewConfigFromFlags()
+ config.ChainID = helpers.SimAppChainID
+
+ var logger log.Logger
+ if FlagVerboseValue {
+ logger = log.TestingLogger()
+ } else {
+ logger = log.NewNopLogger()
+ }
+
+ dir, err := ioutil.TempDir("", dirPrefix)
+ if err != nil {
+ return simtypes.Config{}, nil, "", nil, false, err
+ }
+
+ db, err := sdk.NewLevelDB(dbName, dir)
+ if err != nil {
+ return simtypes.Config{}, nil, "", nil, false, err
+ }
+
+ return config, db, dir, logger, false, nil
+}
+
+// SimulationOperations retrieves the simulation params from the provided file path
+// and returns all the modules weighted operations
+func SimulationOperations(app App, cdc codec.JSONMarshaler, config simtypes.Config) []simtypes.WeightedOperation {
+ simState := module.SimulationState{
+ AppParams: make(simtypes.AppParams),
+ Cdc: cdc,
+ }
+
+ if config.ParamsFile != "" {
+ bz, err := ioutil.ReadFile(config.ParamsFile)
+ if err != nil {
+ panic(err)
+ }
+
+ err = json.Unmarshal(bz, &simState.AppParams)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ simState.ParamChanges = app.SimulationManager().GenerateParamChanges(config.Seed)
+ simState.Contents = app.SimulationManager().GetProposalContents(simState)
+ return app.SimulationManager().WeightedOperations(simState)
+}
+
+// CheckExportSimulation exports the app state and simulation parameters to JSON
+// if the export paths are defined.
+func CheckExportSimulation(
+ app App, config simtypes.Config, params simtypes.Params,
+) error {
+ if config.ExportStatePath != "" {
+ fmt.Println("exporting app state...")
+ exported, err := app.ExportAppStateAndValidators(false, nil)
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(config.ExportStatePath, []byte(exported.AppState), 0600); err != nil {
+ return err
+ }
+ }
+
+ if config.ExportParamsPath != "" {
+ fmt.Println("exporting simulation params...")
+ paramsBz, err := json.MarshalIndent(params, "", " ")
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(config.ExportParamsPath, paramsBz, 0600); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PrintStats prints the corresponding statistics from the app DB.
+func PrintStats(db dbm.DB) {
+ fmt.Println("\nLevelDB Stats")
+ fmt.Println(db.Stats()["leveldb.stats"])
+ fmt.Println("LevelDB cached block size", db.Stats()["leveldb.cachedblock"])
+}
+
+// GetSimulationLog unmarshals the KVPair's Value to the corresponding type based on the
+// each's module store key and the prefix bytes of the KVPair's key.
+func GetSimulationLog(storeName string, sdr sdk.StoreDecoderRegistry, kvAs, kvBs []kv.Pair) (log string) {
+ for i := 0; i < len(kvAs); i++ {
+ if len(kvAs[i].Value) == 0 && len(kvBs[i].Value) == 0 {
+ // skip if the value doesn't have any bytes
+ continue
+ }
+
+ decoder, ok := sdr[storeName]
+ if ok {
+ log += decoder(kvAs[i], kvBs[i])
+ } else {
+ log += fmt.Sprintf("store A %X => %X\nstore B %X => %X\n", kvAs[i].Key, kvAs[i].Value, kvBs[i].Key, kvBs[i].Value)
+ }
+ }
+
+ return log
+}
diff --git a/testing/simapp/utils_test.go b/testing/simapp/utils_test.go
new file mode 100644
index 00000000..6d8bb21f
--- /dev/null
+++ b/testing/simapp/utils_test.go
@@ -0,0 +1,60 @@
+package simapp
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/std"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/kv"
+ "github.com/cosmos/cosmos-sdk/types/module"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+)
+
+func makeCodec(bm module.BasicManager) *codec.LegacyAmino {
+ cdc := codec.NewLegacyAmino()
+
+ bm.RegisterLegacyAminoCodec(cdc)
+ std.RegisterLegacyAminoCodec(cdc)
+
+ return cdc
+}
+
+func TestGetSimulationLog(t *testing.T) {
+ cdc := makeCodec(ModuleBasics)
+
+ decoders := make(sdk.StoreDecoderRegistry)
+ decoders[authtypes.StoreKey] = func(kvAs, kvBs kv.Pair) string { return "10" }
+
+ tests := []struct {
+ store string
+ kvPairs []kv.Pair
+ expectedLog string
+ }{
+ {
+ "Empty",
+ []kv.Pair{{}},
+ "",
+ },
+ {
+ authtypes.StoreKey,
+ []kv.Pair{{Key: authtypes.GlobalAccountNumberKey, Value: cdc.MustMarshalBinaryBare(uint64(10))}},
+ "10",
+ },
+ {
+ "OtherStore",
+ []kv.Pair{{Key: []byte("key"), Value: []byte("value")}},
+ fmt.Sprintf("store A %X => %X\nstore B %X => %X\n", []byte("key"), []byte("value"), []byte("key"), []byte("value")),
+ },
+ }
+
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.store, func(t *testing.T) {
+ require.Equal(t, tt.expectedLog, GetSimulationLog(tt.store, decoders, tt.kvPairs, tt.kvPairs), tt.store)
+ })
+ }
+}
diff --git a/third_party/proto/cosmos/upgrade/v1beta1/upgrade.proto b/third_party/proto/cosmos/upgrade/v1beta1/upgrade.proto
new file mode 100644
index 00000000..76fb14e2
--- /dev/null
+++ b/third_party/proto/cosmos/upgrade/v1beta1/upgrade.proto
@@ -0,0 +1,60 @@
+syntax = "proto3";
+package cosmos.upgrade.v1beta1;
+
+import "google/protobuf/any.proto";
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/cosmos/cosmos-sdk/x/upgrade/types";
+option (gogoproto.goproto_stringer_all) = false;
+option (gogoproto.goproto_getters_all) = false;
+
+// Plan specifies information about a planned upgrade and when it should occur.
+message Plan {
+ option (gogoproto.equal) = true;
+
+ // Sets the name for the upgrade. This name will be used by the upgraded
+ // version of the software to apply any special "on-upgrade" commands during
+ // the first BeginBlock method after the upgrade is applied. It is also used
+ // to detect whether a software version can handle a given upgrade. If no
+ // upgrade handler with this name has been set in the software, it will be
+ // assumed that the software is out-of-date when the upgrade Time or Height is
+ // reached and the software will exit.
+ string name = 1;
+
+ // The time after which the upgrade must be performed.
+ // Leave set to its zero value to use a pre-defined Height instead.
+ google.protobuf.Timestamp time = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+ // The height at which the upgrade must be performed.
+ // Only used if Time is not set.
+ int64 height = 3;
+
+ // Any application specific upgrade info to be included on-chain
+ // such as a git commit that validators could automatically upgrade to
+ string info = 4;
+
+ // UpgradedClientState field has been deprecated. IBC upgrade logic has been
+ // moved to the IBC module in the sub module 02-client.
+ reserved 5;
+ reserved "option";
+}
+
+// SoftwareUpgradeProposal is a gov Content type for initiating a software
+// upgrade.
+message SoftwareUpgradeProposal {
+ option (gogoproto.equal) = true;
+
+ string title = 1;
+ string description = 2;
+ Plan plan = 3 [(gogoproto.nullable) = false];
+}
+
+// CancelSoftwareUpgradeProposal is a gov Content type for cancelling a software
+// upgrade.
+message CancelSoftwareUpgradeProposal {
+ option (gogoproto.equal) = true;
+
+ string title = 1;
+ string description = 2;
+}
From dab556d6a8b94c2310e8e4d8be37f41e41e74356 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 3 Mar 2021 16:46:43 +0100
Subject: [PATCH 006/393] migration doc
---
docs/migrations/ibc-migration-042.md | 49 ++++++++++++++++++++++++++++
1 file changed, 49 insertions(+)
create mode 100644 docs/migrations/ibc-migration-042.md
diff --git a/docs/migrations/ibc-migration-042.md b/docs/migrations/ibc-migration-042.md
new file mode 100644
index 00000000..08530ce3
--- /dev/null
+++ b/docs/migrations/ibc-migration-042.md
@@ -0,0 +1,49 @@
+# Migrating to ibc-go
+
+This file contains information on how to migrate from the IBC module contained in the SDK 0.41.x line to the IBC module in the ibc-go repository based on the 0.42 SDK version.
+
+## Import Changes
+
+The most obvious changes is import name changes. We need to change:
+- applications -> apps
+- cosmos-sdk/x/ibc -> ibc-go
+
+On my GNU/Linux based machine I used the following commands, executed in order:
+
+`grep -RiIl 'cosmos-sdk\/x\/ibc\/applications' | xargs sed -i 's/cosmos-sdk\/x\/ibc\/applications/ibc-go\/apps/g'`
+`grep -RiIl 'cosmos-sdk\/x\/ibc' | xargs sed -i 's/cosmos-sdk\/x\/ibc/ibc-go/g'`
+
+Executing these commands out of order will cause issues.
+
+Feel free to use your own method for modifying import names.
+
+## Proto file changes
+
+The protobuf files have change package naming.
+The new package naming begins with `ibcgo` instead of `ibc`.
+
+The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
+
+## UpdateClient Proposal && IBC UpgradeProposal
+
+### UpdateClientProposal
+The `UpdateClient` has been modified to take in two client-identifiers and one initial height. Please see the [documentation](..//proposals.md) for more information.
+
+Simapp registration was incorrect in the 0.41.x releases. The `UpdateClient` proposal should be registered with the router key belonging to `ibc-go/core/02-client/types`.
+See this [commit](https://github.com/cosmos/cosmos-sdk/pull/8405/commits/9fae3ce6a335a6e2137aee09f7359c45957fb6fc#diff-8d1ca8086ee74e8f0490825ba21e7435be4753922192ff691311483aa3e71a0aL312)
+
+### UpgradeProposal
+
+A new IBC proposal type has been added, `UpgradeProposal`. This handles an IBC (breaking) Upgrade. The previous `UpgradedClientState` field in an Upgrade `Plan` has been deprecated in favor of this new proposal type.
+
+### Proposal CLI Registration
+
+Please ensure both proposal type CLI commands are registered on the governance module by adding the following arguments to `gov.NewAppModuleBasic()`:
+
+`ibcclientclient.UpdateClientProposalHandler, ibcclientclient.UpgradeProposalHandler`
+
+REST routes are not supported for these proposals.
+
+### Proposal Handler Registration
+
+The `ClientUpdateProposalHandler` has been renamed to `ClientProposalHandler`. It handles both `UpdateClientProposal`s and `UpgradeProposal`s.
From 9707ca67d2de1ada439e49d7ed91ff66e640f082 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 3 Mar 2021 16:50:15 +0100
Subject: [PATCH 007/393] formatting and more information
---
docs/migrations/ibc-migration-042.md | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/docs/migrations/ibc-migration-042.md b/docs/migrations/ibc-migration-042.md
index 08530ce3..06e56d59 100644
--- a/docs/migrations/ibc-migration-042.md
+++ b/docs/migrations/ibc-migration-042.md
@@ -11,6 +11,7 @@ The most obvious changes is import name changes. We need to change:
On my GNU/Linux based machine I used the following commands, executed in order:
`grep -RiIl 'cosmos-sdk\/x\/ibc\/applications' | xargs sed -i 's/cosmos-sdk\/x\/ibc\/applications/ibc-go\/apps/g'`
+
`grep -RiIl 'cosmos-sdk\/x\/ibc' | xargs sed -i 's/cosmos-sdk\/x\/ibc/ibc-go/g'`
Executing these commands out of order will cause issues.
@@ -24,7 +25,7 @@ The new package naming begins with `ibcgo` instead of `ibc`.
The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
-## UpdateClient Proposal && IBC UpgradeProposal
+## Proposals
### UpdateClientProposal
The `UpdateClient` has been modified to take in two client-identifiers and one initial height. Please see the [documentation](..//proposals.md) for more information.
@@ -47,3 +48,7 @@ REST routes are not supported for these proposals.
### Proposal Handler Registration
The `ClientUpdateProposalHandler` has been renamed to `ClientProposalHandler`. It handles both `UpdateClientProposal`s and `UpgradeProposal`s.
+
+Please ensure the governance module adds the following route:
+
+`AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper))`
From 70857e441d6434eb1c2849d28fa2ff06ef2bd6a6 Mon Sep 17 00:00:00 2001
From: Marko
Date: Thu, 4 Mar 2021 13:31:08 +0000
Subject: [PATCH 008/393] add codecov.yml (#12)
* add codecov yml
* remove tests
---
.codecov.yml | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)
create mode 100644 .codecov.yml
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 00000000..7c51fdda
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,54 @@
+#
+# This codecov.yml is the default configuration for
+# all repositories on Codecov. You may adjust the settings
+# below in your own codecov.yml in your repository.
+#
+coverage:
+ precision: 2
+ round: down
+ range: 70...100
+
+ status:
+ # Learn more at https://docs.codecov.io/docs/commit-status
+ project:
+ default:
+ threshold: 1% # allow this much decrease on project
+ app:
+ target: 70%
+ flags:
+ - app
+ modules:
+ target: 70%
+ flags:
+ - modules
+ client:
+ flags:
+ - client
+ changes: false
+
+comment:
+ layout: "reach, diff, files"
+ behavior: default # update if exists else create new
+ require_changes: true
+
+flags:
+ app:
+ paths:
+ - "app/"
+ - "baseapp/"
+ modules:
+ paths:
+ - "core/"
+ - "!core/**/client/" # ignore client package
+ client:
+ paths:
+ - "client/"
+ - "core/**/client/"
+
+ignore:
+ - "docs"
+ - "*.md"
+ - "**/*.pb.go"
+ - "core/**/*.pb.go"
+ - "core/**/test_common.go"
+ - "scripts/"
From 3f3220762a884f209c4be637d928516ae7983ace Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 4 Mar 2021 15:22:29 +0100
Subject: [PATCH 009/393] Final code migration from SDK (#13)
* migrate rest of the code
* revert codec registration changes
* add codecov ignore
---
.codecov.yml | 1 +
apps/transfer/keeper/keeper.go | 2 +-
apps/transfer/simulation/decoder_test.go | 2 +-
core/02-client/abci_test.go | 2 +-
core/02-client/client/cli/tx.go | 2 +-
core/02-client/keeper/client.go | 13 +++++++
core/02-client/keeper/client_test.go | 39 ++++++++++++++++++-
core/02-client/keeper/keeper.go | 4 +-
core/02-client/keeper/keeper_test.go | 4 +-
core/02-client/keeper/proposal_test.go | 2 +-
core/02-client/simulation/decoder_test.go | 2 +-
core/02-client/types/encoding.go | 33 ++++++++++++++--
core/02-client/types/encoding_test.go | 30 ++++++++++++++
core/02-client/types/events.go | 1 +
core/02-client/types/params.go | 2 +-
core/02-client/types/proposal.go | 2 +-
core/02-client/types/proposal_test.go | 2 +-
core/03-connection/simulation/decoder_test.go | 2 +-
core/03-connection/types/msgs_test.go | 2 +-
core/04-channel/simulation/decoder_test.go | 2 +-
core/04-channel/types/msgs_test.go | 2 +-
core/05-port/keeper/keeper_test.go | 2 +-
core/genesis_test.go | 2 +-
core/keeper/keeper.go | 2 +-
core/keeper/msg_server_test.go | 2 +-
core/simulation/decoder_test.go | 2 +-
core/spec/06_events.md | 2 +-
.../07-tendermint/types/tendermint_test.go | 2 +-
light-clients/07-tendermint/types/upgrade.go | 2 +-
.../07-tendermint/types/upgrade_test.go | 2 +-
.../09-localhost/types/localhost_test.go | 2 +-
31 files changed, 140 insertions(+), 31 deletions(-)
create mode 100644 core/02-client/types/encoding_test.go
diff --git a/.codecov.yml b/.codecov.yml
index 7c51fdda..c5314207 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -49,6 +49,7 @@ ignore:
- "docs"
- "*.md"
- "**/*.pb.go"
+ - "**/*.pb.gw.go"
- "core/**/*.pb.go"
- "core/**/test_common.go"
- "scripts/"
diff --git a/apps/transfer/keeper/keeper.go b/apps/transfer/keeper/keeper.go
index 27db6db2..fbc4a167 100644
--- a/apps/transfer/keeper/keeper.go
+++ b/apps/transfer/keeper/keeper.go
@@ -11,10 +11,10 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/cosmos/ibc-go/apps/transfer/types"
channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
host "github.com/cosmos/ibc-go/core/24-host"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
// Keeper defines the IBC fungible transfer keeper
diff --git a/apps/transfer/simulation/decoder_test.go b/apps/transfer/simulation/decoder_test.go
index 69206b93..93162775 100644
--- a/apps/transfer/simulation/decoder_test.go
+++ b/apps/transfer/simulation/decoder_test.go
@@ -6,10 +6,10 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/apps/transfer/simulation"
"github.com/cosmos/ibc-go/apps/transfer/types"
- "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/02-client/abci_test.go b/core/02-client/abci_test.go
index a36bed8b..3e466d38 100644
--- a/core/02-client/abci_test.go
+++ b/core/02-client/abci_test.go
@@ -7,13 +7,13 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
client "github.com/cosmos/ibc-go/core/02-client"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
type ClientTestSuite struct {
diff --git a/core/02-client/client/cli/tx.go b/core/02-client/client/cli/tx.go
index e2ca4d9c..ad1d0acc 100644
--- a/core/02-client/client/cli/tx.go
+++ b/core/02-client/client/cli/tx.go
@@ -16,9 +16,9 @@ import (
"github.com/cosmos/cosmos-sdk/version"
govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// NewCreateClientCmd defines the command to create a new IBC light client.
diff --git a/core/02-client/keeper/client.go b/core/02-client/keeper/client.go
index 45a2af42..750bac2c 100644
--- a/core/02-client/keeper/client.go
+++ b/core/02-client/keeper/client.go
@@ -1,6 +1,8 @@
package keeper
import (
+ "encoding/hex"
+
"github.com/armon/go-metrics"
"github.com/cosmos/cosmos-sdk/telemetry"
@@ -95,6 +97,16 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
)
}()
+ // emit the full header in events
+ var headerStr string
+ if header != nil {
+ // Marshal the Header as an Any and encode the resulting bytes to hex.
+ // This prevents the event value from containing invalid UTF-8 characters
+ // which may cause data to be lost when JSON encoding/decoding.
+ headerStr = hex.EncodeToString(types.MustMarshalHeader(k.cdc, header))
+
+ }
+
// emitting events in the keeper emits for both begin block and handler client updates
ctx.EventManager().EmitEvent(
sdk.NewEvent(
@@ -102,6 +114,7 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
sdk.NewAttribute(types.AttributeKeyClientID, clientID),
sdk.NewAttribute(types.AttributeKeyClientType, clientState.ClientType()),
sdk.NewAttribute(types.AttributeKeyConsensusHeight, consensusHeight.String()),
+ sdk.NewAttribute(types.AttributeKeyHeader, headerStr),
),
)
diff --git a/core/02-client/keeper/client_test.go b/core/02-client/keeper/client_test.go
index b5f259fc..2466e5ce 100644
--- a/core/02-client/keeper/client_test.go
+++ b/core/02-client/keeper/client_test.go
@@ -1,12 +1,12 @@
package keeper_test
import (
+ "encoding/hex"
"fmt"
"time"
tmtypes "github.com/tendermint/tendermint/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
@@ -15,6 +15,7 @@ import (
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *KeeperTestSuite) TestCreateClient() {
@@ -589,3 +590,39 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
})
}
}
+
+func (suite *KeeperTestSuite) TestUpdateClientEventEmission() {
+ clientID, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ header, err := suite.chainA.ConstructUpdateTMClientHeader(suite.chainB, clientID)
+ suite.Require().NoError(err)
+
+ msg, err := clienttypes.NewMsgUpdateClient(
+ clientID, header,
+ suite.chainA.SenderAccount.GetAddress(),
+ )
+
+ result, err := suite.chainA.SendMsgs(msg)
+ suite.Require().NoError(err)
+ // first event type is "message"
+ updateEvent := result.Events[1]
+
+ suite.Require().Equal(clienttypes.EventTypeUpdateClient, updateEvent.Type)
+
+ // use a boolean to ensure the update event contains the header
+ contains := false
+ for _, attr := range updateEvent.Attributes {
+ if string(attr.Key) == clienttypes.AttributeKeyHeader {
+ contains = true
+
+ bz, err := hex.DecodeString(string(attr.Value))
+ suite.Require().NoError(err)
+
+ emittedHeader, err := types.UnmarshalHeader(suite.chainA.App.AppCodec(), bz)
+ suite.Require().NoError(err)
+ suite.Require().Equal(header, emittedHeader)
+ }
+
+ }
+ suite.Require().True(contains)
+
+}
diff --git a/core/02-client/keeper/keeper.go b/core/02-client/keeper/keeper.go
index a41eaf16..e7808058 100644
--- a/core/02-client/keeper/keeper.go
+++ b/core/02-client/keeper/keeper.go
@@ -12,13 +12,13 @@ import (
"github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
host "github.com/cosmos/ibc-go/core/24-host"
"github.com/cosmos/ibc-go/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// Keeper represents a type that grants read and write permissions to any client
diff --git a/core/02-client/keeper/keeper_test.go b/core/02-client/keeper/keeper_test.go
index 806aaee2..2233583b 100644
--- a/core/02-client/keeper/keeper_test.go
+++ b/core/02-client/keeper/keeper_test.go
@@ -13,8 +13,8 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
+ "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/cosmos/ibc-go/core/02-client/keeper"
"github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
@@ -23,7 +23,7 @@ import (
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
- "github.com/cosmos/ibc-go/testing/simapp"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
)
const (
diff --git a/core/02-client/keeper/proposal_test.go b/core/02-client/keeper/proposal_test.go
index 1d2580dc..5f98a04e 100644
--- a/core/02-client/keeper/proposal_test.go
+++ b/core/02-client/keeper/proposal_test.go
@@ -2,11 +2,11 @@ package keeper_test
import (
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *KeeperTestSuite) TestClientUpdateProposal() {
diff --git a/core/02-client/simulation/decoder_test.go b/core/02-client/simulation/decoder_test.go
index 56483c7f..27add5fb 100644
--- a/core/02-client/simulation/decoder_test.go
+++ b/core/02-client/simulation/decoder_test.go
@@ -7,12 +7,12 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/core/02-client/simulation"
"github.com/cosmos/ibc-go/core/02-client/types"
host "github.com/cosmos/ibc-go/core/24-host"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/02-client/types/encoding.go b/core/02-client/types/encoding.go
index 6e9cb07b..2bd45b67 100644
--- a/core/02-client/types/encoding.go
+++ b/core/02-client/types/encoding.go
@@ -57,7 +57,7 @@ func MustUnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) exported.
return consensusState
}
-// MustMarshalConsensusState attempts to encode an ConsensusState object and returns the
+// MustMarshalConsensusState attempts to encode a ConsensusState object and returns the
// raw encoded bytes. It panics on error.
func MustMarshalConsensusState(cdc codec.BinaryMarshaler, consensusState exported.ConsensusState) []byte {
bz, err := MarshalConsensusState(cdc, consensusState)
@@ -68,12 +68,12 @@ func MustMarshalConsensusState(cdc codec.BinaryMarshaler, consensusState exporte
return bz
}
-// MarshalConsensusState protobuf serializes an ConsensusState interface
+// MarshalConsensusState protobuf serializes a ConsensusState interface
func MarshalConsensusState(cdc codec.BinaryMarshaler, cs exported.ConsensusState) ([]byte, error) {
return cdc.MarshalInterface(cs)
}
-// UnmarshalConsensusState returns an ConsensusState interface from raw encoded clientState
+// UnmarshalConsensusState returns a ConsensusState interface from raw encoded consensus state
// bytes of a Proto-based ConsensusState type. An error is returned upon decoding
// failure.
func UnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) (exported.ConsensusState, error) {
@@ -84,3 +84,30 @@ func UnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) (exported.Con
return consensusState, nil
}
+
+// MarshalHeader protobuf serializes a Header interface
+func MarshalHeader(cdc codec.BinaryMarshaler, h exported.Header) ([]byte, error) {
+ return cdc.MarshalInterface(h)
+}
+
+// MustMarshalHeader attempts to encode a Header object and returns the
+// raw encoded bytes. It panics on error.
+func MustMarshalHeader(cdc codec.BinaryMarshaler, header exported.Header) []byte {
+ bz, err := MarshalHeader(cdc, header)
+ if err != nil {
+ panic(fmt.Errorf("failed to encode header: %w", err))
+ }
+
+ return bz
+}
+
+// UnmarshalHeader returns a Header interface from raw proto encoded header bytes.
+// An error is returned upon decoding failure.
+func UnmarshalHeader(cdc codec.BinaryMarshaler, bz []byte) (exported.Header, error) {
+ var header exported.Header
+ if err := cdc.UnmarshalInterface(bz, &header); err != nil {
+ return nil, err
+ }
+
+ return header, nil
+}
diff --git a/core/02-client/types/encoding_test.go b/core/02-client/types/encoding_test.go
new file mode 100644
index 00000000..066aecbf
--- /dev/null
+++ b/core/02-client/types/encoding_test.go
@@ -0,0 +1,30 @@
+package types_test
+
+import (
+ "github.com/cosmos/ibc-go/core/02-client/types"
+ ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+)
+
+func (suite *TypesTestSuite) TestMarshalHeader() {
+
+ cdc := suite.chainA.App.AppCodec()
+ h := &ibctmtypes.Header{
+ TrustedHeight: types.NewHeight(4, 100),
+ }
+
+ // marshal header
+ bz, err := types.MarshalHeader(cdc, h)
+ suite.Require().NoError(err)
+
+ // unmarshal header
+ newHeader, err := types.UnmarshalHeader(cdc, bz)
+ suite.Require().NoError(err)
+
+ suite.Require().Equal(h, newHeader)
+
+ // use invalid bytes
+ invalidHeader, err := types.UnmarshalHeader(cdc, []byte("invalid bytes"))
+ suite.Require().Error(err)
+ suite.Require().Nil(invalidHeader)
+
+}
diff --git a/core/02-client/types/events.go b/core/02-client/types/events.go
index 47aeda7a..d9f91c51 100644
--- a/core/02-client/types/events.go
+++ b/core/02-client/types/events.go
@@ -12,6 +12,7 @@ const (
AttributeKeySubjectClientID = "subject_client_id"
AttributeKeyClientType = "client_type"
AttributeKeyConsensusHeight = "consensus_height"
+ AttributeKeyHeader = "header"
)
// IBC client events vars
diff --git a/core/02-client/types/params.go b/core/02-client/types/params.go
index 7a21ad75..a652aa1a 100644
--- a/core/02-client/types/params.go
+++ b/core/02-client/types/params.go
@@ -4,8 +4,8 @@ import (
"fmt"
"strings"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/cosmos/ibc-go/core/exported"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
var (
diff --git a/core/02-client/types/proposal.go b/core/02-client/types/proposal.go
index 3141402b..36b6e992 100644
--- a/core/02-client/types/proposal.go
+++ b/core/02-client/types/proposal.go
@@ -6,8 +6,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/exported"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const (
diff --git a/core/02-client/types/proposal_test.go b/core/02-client/types/proposal_test.go
index 5c6bfff8..52cdd563 100644
--- a/core/02-client/types/proposal_test.go
+++ b/core/02-client/types/proposal_test.go
@@ -7,11 +7,11 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *TypesTestSuite) TestValidateBasic() {
diff --git a/core/03-connection/simulation/decoder_test.go b/core/03-connection/simulation/decoder_test.go
index ff567c19..c4c66644 100644
--- a/core/03-connection/simulation/decoder_test.go
+++ b/core/03-connection/simulation/decoder_test.go
@@ -6,11 +6,11 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/core/03-connection/simulation"
"github.com/cosmos/ibc-go/core/03-connection/types"
host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/03-connection/types/msgs_test.go b/core/03-connection/types/msgs_test.go
index d0b14966..39709126 100644
--- a/core/03-connection/types/msgs_test.go
+++ b/core/03-connection/types/msgs_test.go
@@ -10,6 +10,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
+ "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
@@ -19,7 +20,6 @@ import (
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
- "github.com/cosmos/ibc-go/testing/simapp"
)
var (
diff --git a/core/04-channel/simulation/decoder_test.go b/core/04-channel/simulation/decoder_test.go
index 9212acd6..d94b5606 100644
--- a/core/04-channel/simulation/decoder_test.go
+++ b/core/04-channel/simulation/decoder_test.go
@@ -6,12 +6,12 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/core/04-channel/simulation"
"github.com/cosmos/ibc-go/core/04-channel/types"
host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/04-channel/types/msgs_test.go b/core/04-channel/types/msgs_test.go
index d0b5e2d4..966e5ec3 100644
--- a/core/04-channel/types/msgs_test.go
+++ b/core/04-channel/types/msgs_test.go
@@ -9,6 +9,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
+ "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
@@ -17,7 +18,6 @@ import (
"github.com/cosmos/ibc-go/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
"github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/core/05-port/keeper/keeper_test.go b/core/05-port/keeper/keeper_test.go
index 2d519aba..7081978f 100644
--- a/core/05-port/keeper/keeper_test.go
+++ b/core/05-port/keeper/keeper_test.go
@@ -7,9 +7,9 @@ import (
"github.com/stretchr/testify/suite"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/ibc-go/core/05-port/keeper"
- "github.com/cosmos/ibc-go/testing/simapp"
)
var (
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 0b94f6da..fb968921 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -8,6 +8,7 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/ibc-go/testing/simapp"
ibc "github.com/cosmos/ibc-go/core"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
@@ -18,7 +19,6 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
- "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/core/keeper/keeper.go b/core/keeper/keeper.go
index 109cb4ab..5c105eb5 100644
--- a/core/keeper/keeper.go
+++ b/core/keeper/keeper.go
@@ -4,7 +4,6 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
clientkeeper "github.com/cosmos/ibc-go/core/02-client/keeper"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectionkeeper "github.com/cosmos/ibc-go/core/03-connection/keeper"
@@ -12,6 +11,7 @@ import (
portkeeper "github.com/cosmos/ibc-go/core/05-port/keeper"
porttypes "github.com/cosmos/ibc-go/core/05-port/types"
"github.com/cosmos/ibc-go/core/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
var _ types.QueryServer = (*Keeper)(nil)
diff --git a/core/keeper/msg_server_test.go b/core/keeper/msg_server_test.go
index 1f41abb4..461e2917 100644
--- a/core/keeper/msg_server_test.go
+++ b/core/keeper/msg_server_test.go
@@ -6,7 +6,6 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
@@ -16,6 +15,7 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const height = 10
diff --git a/core/simulation/decoder_test.go b/core/simulation/decoder_test.go
index b397cc29..827d94cf 100644
--- a/core/simulation/decoder_test.go
+++ b/core/simulation/decoder_test.go
@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
@@ -13,7 +14,6 @@ import (
host "github.com/cosmos/ibc-go/core/24-host"
"github.com/cosmos/ibc-go/core/simulation"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/spec/06_events.md b/core/spec/06_events.md
index 528a30cf..8a416217 100644
--- a/core/spec/06_events.md
+++ b/core/spec/06_events.md
@@ -32,6 +32,7 @@ callbacks to IBC applications.
| update_client | client_id | {clientId} |
| update_client | client_type | {clientType} |
| update_client | consensus_height | {consensusHeight} |
+| update_client | header | {header} |
| message | action | update_client |
| message | module | ibc_client |
@@ -238,4 +239,3 @@ callbacks to IBC applications.
| message | action | timeout_packet |
| message | module | ibc-channel |
-
diff --git a/light-clients/07-tendermint/types/tendermint_test.go b/light-clients/07-tendermint/types/tendermint_test.go
index c9833951..cb939548 100644
--- a/light-clients/07-tendermint/types/tendermint_test.go
+++ b/light-clients/07-tendermint/types/tendermint_test.go
@@ -10,12 +10,12 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
- "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/light-clients/07-tendermint/types/upgrade.go b/light-clients/07-tendermint/types/upgrade.go
index 144b5219..ce408325 100644
--- a/light-clients/07-tendermint/types/upgrade.go
+++ b/light-clients/07-tendermint/types/upgrade.go
@@ -6,10 +6,10 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
"github.com/cosmos/ibc-go/core/exported"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// VerifyUpgradeAndUpdateState checks if the upgraded client has been committed by the current client
diff --git a/light-clients/07-tendermint/types/upgrade_test.go b/light-clients/07-tendermint/types/upgrade_test.go
index 27ea3331..ffafdfcb 100644
--- a/light-clients/07-tendermint/types/upgrade_test.go
+++ b/light-clients/07-tendermint/types/upgrade_test.go
@@ -1,11 +1,11 @@
package types_test
import (
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
"github.com/cosmos/ibc-go/core/exported"
"github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *TendermintTestSuite) TestVerifyUpgrade() {
diff --git a/light-clients/09-localhost/types/localhost_test.go b/light-clients/09-localhost/types/localhost_test.go
index c3d03c9a..69d5c1cf 100644
--- a/light-clients/09-localhost/types/localhost_test.go
+++ b/light-clients/09-localhost/types/localhost_test.go
@@ -7,10 +7,10 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
"github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/testing/simapp"
)
const (
From fa091bcda3b1dbbf687ff4465f9fd309a4206e67 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Fri, 5 Mar 2021 14:26:35 +0100
Subject: [PATCH 010/393] Add "modules" top level directory (#14)
* move codes into 'modules' dir
* fix codecov ignore
---
.codecov.yml | 7 +-
core/types/codec.go | 23 ---
.../apps}/transfer/client/cli/cli.go | 0
.../apps}/transfer/client/cli/query.go | 2 +-
.../apps}/transfer/client/cli/tx.go | 6 +-
{apps => modules/apps}/transfer/handler.go | 2 +-
.../apps}/transfer/handler_test.go | 8 +-
.../apps}/transfer/keeper/MBT_README.md | 0
.../apps}/transfer/keeper/encoding.go | 2 +-
.../apps}/transfer/keeper/genesis.go | 2 +-
.../apps}/transfer/keeper/genesis_test.go | 2 +-
.../apps}/transfer/keeper/grpc_query.go | 2 +-
.../apps}/transfer/keeper/grpc_query_test.go | 2 +-
.../apps}/transfer/keeper/keeper.go | 6 +-
.../apps}/transfer/keeper/keeper_test.go | 2 +-
.../apps}/transfer/keeper/mbt_relay_test.go | 8 +-
.../model_based_tests/Test5Packets.json | 0
.../keeper/model_based_tests/Test5Packets.tla | 0
.../Test5PacketsAllDifferentPass.json | 0
.../Test5PacketsAllDifferentPass.tla | 0
.../TestOnRecvAcknowledgementErrorFail.json | 0
.../TestOnRecvAcknowledgementErrorFail.tla | 0
.../TestOnRecvAcknowledgementErrorPass.json | 0
.../TestOnRecvAcknowledgementErrorPass.tla | 0
.../TestOnRecvAcknowledgementResultFail.json | 0
.../TestOnRecvAcknowledgementResultFail.tla | 0
.../TestOnRecvAcknowledgementResultPass.json | 0
.../TestOnRecvAcknowledgementResultPass.tla | 0
.../TestOnRecvPacketFail.json | 0
.../TestOnRecvPacketFail.tla | 0
.../TestOnRecvPacketPass.json | 0
.../TestOnRecvPacketPass.tla | 0
.../model_based_tests/TestOnTimeoutFail.json | 0
.../model_based_tests/TestOnTimeoutFail.tla | 0
.../model_based_tests/TestOnTimeoutPass.json | 0
.../model_based_tests/TestOnTimeoutPass.tla | 0
.../TestSendTransferFail.json | 0
.../TestSendTransferFail.tla | 0
.../TestSendTransferPass.json | 0
.../TestSendTransferPass.tla | 0
.../model_based_tests/TestUnescrowTokens.json | 0
.../model_based_tests/TestUnescrowTokens.tla | 0
.../apps}/transfer/keeper/msg_server.go | 2 +-
.../apps}/transfer/keeper/params.go | 2 +-
.../apps}/transfer/keeper/params_test.go | 2 +-
.../apps}/transfer/keeper/relay.go | 8 +-
.../transfer/keeper/relay_model/account.tla | 0
.../keeper/relay_model/account_record.tla | 0
.../relay_model/apalache-to-relay-test.json | 0
.../relay_model/apalache-to-relay-test2.json | 0
.../transfer/keeper/relay_model/denom.tla | 0
.../keeper/relay_model/denom_record.tla | 0
.../keeper/relay_model/denom_record2.tla | 0
.../keeper/relay_model/denom_sequence.tla | 0
.../keeper/relay_model/identifiers.tla | 0
.../transfer/keeper/relay_model/relay.tla | 0
.../keeper/relay_model/relay_tests.tla | 0
.../apps}/transfer/keeper/relay_test.go | 10 +-
{apps => modules/apps}/transfer/module.go | 14 +-
.../apps}/transfer/module_test.go | 8 +-
.../apps}/transfer/simulation/decoder.go | 2 +-
.../apps}/transfer/simulation/decoder_test.go | 4 +-
.../apps}/transfer/simulation/genesis.go | 2 +-
.../apps}/transfer/simulation/genesis_test.go | 4 +-
.../apps}/transfer/simulation/params.go | 2 +-
.../apps}/transfer/simulation/params_test.go | 2 +-
.../apps}/transfer/spec/01_concepts.md | 0
.../apps}/transfer/spec/02_state.md | 0
.../transfer/spec/03_state_transitions.md | 0
.../apps}/transfer/spec/04_messages.md | 0
.../apps}/transfer/spec/05_events.md | 0
.../apps}/transfer/spec/06_metrics.md | 0
.../apps}/transfer/spec/07_params.md | 0
.../apps}/transfer/spec/README.md | 0
.../apps}/transfer/types/codec.go | 0
{apps => modules/apps}/transfer/types/coin.go | 0
.../apps}/transfer/types/errors.go | 0
.../apps}/transfer/types/events.go | 0
.../apps}/transfer/types/expected_keepers.go | 6 +-
.../apps}/transfer/types/genesis.go | 2 +-
.../apps}/transfer/types/genesis.pb.go | 12 +-
.../apps}/transfer/types/genesis_test.go | 2 +-
{apps => modules/apps}/transfer/types/keys.go | 0
.../apps}/transfer/types/keys_test.go | 2 +-
{apps => modules/apps}/transfer/types/msgs.go | 4 +-
.../apps}/transfer/types/msgs_test.go | 2 +-
.../apps}/transfer/types/packet.go | 0
.../apps}/transfer/types/packet_test.go | 0
.../apps}/transfer/types/params.go | 0
.../apps}/transfer/types/params_test.go | 0
.../apps}/transfer/types/query.pb.go | 68 +++----
.../apps}/transfer/types/query.pb.gw.go | 0
.../apps}/transfer/types/trace.go | 2 +-
.../apps}/transfer/types/trace_test.go | 0
.../apps}/transfer/types/transfer.pb.go | 47 ++---
.../apps}/transfer/types/tx.pb.go | 65 +++---
{core => modules/core}/02-client/abci.go | 6 +-
{core => modules/core}/02-client/abci_test.go | 10 +-
.../core}/02-client/client/cli/cli.go | 2 +-
.../core}/02-client/client/cli/query.go | 6 +-
.../core}/02-client/client/cli/tx.go | 4 +-
.../02-client/client/proposal_handler.go | 2 +-
.../core}/02-client/client/utils/utils.go | 12 +-
{core => modules/core}/02-client/doc.go | 0
{core => modules/core}/02-client/genesis.go | 6 +-
.../core}/02-client/keeper/client.go | 4 +-
.../core}/02-client/keeper/client_test.go | 12 +-
.../core}/02-client/keeper/encoding.go | 4 +-
.../core}/02-client/keeper/grpc_query.go | 6 +-
.../core}/02-client/keeper/grpc_query_test.go | 8 +-
.../core}/02-client/keeper/keeper.go | 10 +-
.../core}/02-client/keeper/keeper_test.go | 12 +-
.../core}/02-client/keeper/params.go | 2 +-
.../core}/02-client/keeper/params_test.go | 2 +-
.../core}/02-client/keeper/proposal.go | 4 +-
.../core}/02-client/keeper/proposal_test.go | 6 +-
{core => modules/core}/02-client/module.go | 4 +-
.../core}/02-client/proposal_handler.go | 4 +-
.../core}/02-client/proposal_handler_test.go | 8 +-
.../core}/02-client/simulation/decoder.go | 6 +-
.../02-client/simulation/decoder_test.go | 8 +-
.../core}/02-client/simulation/genesis.go | 2 +-
.../core}/02-client/types/client.go | 4 +-
.../core}/02-client/types/client.pb.go | 95 ++++-----
.../core}/02-client/types/client_test.go | 4 +-
.../core}/02-client/types/codec.go | 2 +-
.../core}/02-client/types/codec_test.go | 10 +-
.../core}/02-client/types/encoding.go | 2 +-
.../core}/02-client/types/encoding_test.go | 4 +-
.../core}/02-client/types/errors.go | 0
.../core}/02-client/types/events.go | 2 +-
.../core}/02-client/types/expected_keepers.go | 0
.../core}/02-client/types/genesis.go | 4 +-
.../core}/02-client/types/genesis.pb.go | 70 +++----
.../core}/02-client/types/genesis_test.go | 14 +-
.../core}/02-client/types/height.go | 2 +-
.../core}/02-client/types/height_test.go | 2 +-
.../core}/02-client/types/keys.go | 2 +-
.../core}/02-client/types/keys_test.go | 2 +-
.../core}/02-client/types/msgs.go | 4 +-
.../core}/02-client/types/msgs_test.go | 10 +-
.../core}/02-client/types/params.go | 2 +-
.../core}/02-client/types/params_test.go | 2 +-
.../core}/02-client/types/proposal.go | 2 +-
.../core}/02-client/types/proposal_test.go | 6 +-
.../core}/02-client/types/query.go | 2 +-
.../core}/02-client/types/query.pb.go | 114 +++++------
.../core}/02-client/types/query.pb.gw.go | 0
.../core}/02-client/types/tx.pb.go | 78 +++----
.../core}/03-connection/client/cli/cli.go | 2 +-
.../core}/03-connection/client/cli/query.go | 6 +-
.../core}/03-connection/client/cli/tx.go | 8 +-
.../core}/03-connection/client/utils/utils.go | 14 +-
.../core}/03-connection/genesis.go | 4 +-
.../core}/03-connection/keeper/grpc_query.go | 6 +-
.../03-connection/keeper/grpc_query_test.go | 8 +-
.../core}/03-connection/keeper/handshake.go | 8 +-
.../03-connection/keeper/handshake_test.go | 10 +-
.../core}/03-connection/keeper/keeper.go | 10 +-
.../core}/03-connection/keeper/keeper_test.go | 4 +-
.../core}/03-connection/keeper/verify.go | 4 +-
.../core}/03-connection/keeper/verify_test.go | 12 +-
.../core}/03-connection/module.go | 4 +-
.../core}/03-connection/simulation/decoder.go | 4 +-
.../03-connection/simulation/decoder_test.go | 6 +-
.../core}/03-connection/simulation/genesis.go | 2 +-
.../core}/03-connection/types/codec.go | 2 +-
.../core}/03-connection/types/connection.go | 6 +-
.../03-connection/types/connection.pb.go | 80 ++++----
.../03-connection/types/connection_test.go | 6 +-
.../core}/03-connection/types/errors.go | 0
.../core}/03-connection/types/events.go | 2 +-
.../03-connection/types/expected_keepers.go | 2 +-
.../core}/03-connection/types/genesis.go | 2 +-
.../core}/03-connection/types/genesis.pb.go | 44 ++--
.../core}/03-connection/types/genesis_test.go | 4 +-
.../core}/03-connection/types/keys.go | 2 +-
.../core}/03-connection/types/keys_test.go | 2 +-
.../core}/03-connection/types/msgs.go | 8 +-
.../core}/03-connection/types/msgs_test.go | 8 +-
.../core}/03-connection/types/query.go | 4 +-
.../core}/03-connection/types/query.pb.go | 116 +++++------
.../core}/03-connection/types/query.pb.gw.go | 0
.../core}/03-connection/types/tx.pb.go | 120 +++++------
.../core}/03-connection/types/version.go | 2 +-
.../core}/03-connection/types/version_test.go | 4 +-
.../core}/04-channel/client/cli/cli.go | 2 +-
.../core}/04-channel/client/cli/query.go | 6 +-
.../core}/04-channel/client/cli/tx.go | 8 +-
.../core}/04-channel/client/utils/utils.go | 12 +-
{core => modules/core}/04-channel/genesis.go | 4 +-
{core => modules/core}/04-channel/handler.go | 4 +-
.../core}/04-channel/keeper/grpc_query.go | 8 +-
.../04-channel/keeper/grpc_query_test.go | 8 +-
.../core}/04-channel/keeper/handshake.go | 10 +-
.../core}/04-channel/keeper/handshake_test.go | 10 +-
.../core}/04-channel/keeper/keeper.go | 12 +-
.../core}/04-channel/keeper/keeper_test.go | 4 +-
.../core}/04-channel/keeper/packet.go | 10 +-
.../core}/04-channel/keeper/packet_test.go | 10 +-
.../core}/04-channel/keeper/timeout.go | 8 +-
.../core}/04-channel/keeper/timeout_test.go | 8 +-
{core => modules/core}/04-channel/module.go | 4 +-
.../core}/04-channel/simulation/decoder.go | 4 +-
.../04-channel/simulation/decoder_test.go | 6 +-
.../core}/04-channel/simulation/genesis.go | 2 +-
.../core}/04-channel/types/acknowledgement.go | 0
.../04-channel/types/acknowledgement_test.go | 2 +-
.../core}/04-channel/types/channel.go | 4 +-
.../core}/04-channel/types/channel.pb.go | 119 +++++------
.../core}/04-channel/types/channel_test.go | 2 +-
.../core}/04-channel/types/codec.go | 2 +-
.../core}/04-channel/types/errors.go | 0
.../core}/04-channel/types/events.go | 2 +-
.../04-channel/types/expected_keepers.go | 4 +-
.../core}/04-channel/types/genesis.go | 2 +-
.../core}/04-channel/types/genesis.pb.go | 62 +++---
.../core}/04-channel/types/genesis_test.go | 2 +-
.../core}/04-channel/types/keys.go | 2 +-
.../core}/04-channel/types/keys_test.go | 2 +-
.../core}/04-channel/types/msgs.go | 6 +-
.../core}/04-channel/types/msgs_test.go | 8 +-
.../core}/04-channel/types/packet.go | 6 +-
.../core}/04-channel/types/packet_test.go | 4 +-
.../core}/04-channel/types/query.go | 4 +-
.../core}/04-channel/types/query.pb.go | 191 +++++++++---------
.../core}/04-channel/types/query.pb.gw.go | 0
.../core}/04-channel/types/tx.pb.go | 142 ++++++-------
.../core}/05-port/keeper/keeper.go | 4 +-
.../core}/05-port/keeper/keeper_test.go | 2 +-
.../core}/05-port/types/errors.go | 0
{core => modules/core}/05-port/types/keys.go | 0
.../core}/05-port/types/module.go | 2 +-
.../core}/05-port/types/router.go | 0
{core => modules/core}/05-port/types/utils.go | 0
.../core}/23-commitment/types/bench_test.go | 0
.../core}/23-commitment/types/codec.go | 2 +-
.../23-commitment/types/commitment.pb.go | 45 +++--
.../23-commitment/types/commitment_test.go | 0
.../core}/23-commitment/types/errors.go | 0
.../core}/23-commitment/types/merkle.go | 2 +-
.../core}/23-commitment/types/merkle_test.go | 2 +-
.../core}/23-commitment/types/utils.go | 0
.../core}/23-commitment/types/utils_test.go | 2 +-
{core => modules/core}/24-host/errors.go | 0
{core => modules/core}/24-host/keys.go | 2 +-
{core => modules/core}/24-host/parse.go | 0
{core => modules/core}/24-host/parse_test.go | 4 +-
{core => modules/core}/24-host/validate.go | 0
.../core}/24-host/validate_test.go | 0
{core => modules/core}/client/cli/cli.go | 8 +-
{core => modules/core}/client/query.go | 6 +-
{core => modules/core}/exported/channel.go | 0
{core => modules/core}/exported/client.go | 0
{core => modules/core}/exported/commitment.go | 0
{core => modules/core}/exported/connection.go | 0
{core => modules/core}/genesis.go | 10 +-
{core => modules/core}/genesis_test.go | 18 +-
{core => modules/core}/handler.go | 8 +-
{core => modules/core}/keeper/grpc_query.go | 6 +-
{core => modules/core}/keeper/keeper.go | 14 +-
{core => modules/core}/keeper/msg_server.go | 10 +-
.../core}/keeper/msg_server_test.go | 14 +-
{core => modules/core}/module.go | 18 +-
{core => modules/core}/simulation/decoder.go | 10 +-
.../core}/simulation/decoder_test.go | 12 +-
{core => modules/core}/simulation/genesis.go | 16 +-
.../core}/simulation/genesis_test.go | 6 +-
{core => modules/core}/spec/01_concepts.md | 0
{core => modules/core}/spec/02_state.md | 0
.../core}/spec/03_state_transitions.md | 0
{core => modules/core}/spec/04_messages.md | 0
{core => modules/core}/spec/05_callbacks.md | 0
{core => modules/core}/spec/06_events.md | 0
{core => modules/core}/spec/07_params.md | 0
{core => modules/core}/spec/README.md | 0
modules/core/types/codec.go | 23 +++
{core => modules/core}/types/genesis.go | 6 +-
{core => modules/core}/types/genesis.pb.go | 49 ++---
{core => modules/core}/types/query.go | 12 +-
.../light-clients}/06-solomachine/doc.go | 0
.../light-clients}/06-solomachine/module.go | 2 +-
.../06-solomachine/spec/01_concepts.md | 0
.../06-solomachine/spec/02_state.md | 0
.../spec/03_state_transitions.md | 0
.../06-solomachine/spec/04_messages.md | 0
.../06-solomachine/spec/README.md | 0
.../06-solomachine/types/client_state.go | 8 +-
.../06-solomachine/types/client_state_test.go | 14 +-
.../06-solomachine/types/codec.go | 4 +-
.../06-solomachine/types/codec_test.go | 8 +-
.../06-solomachine/types/consensus_state.go | 4 +-
.../types/consensus_state_test.go | 4 +-
.../06-solomachine/types/errors.go | 0
.../06-solomachine/types/header.go | 4 +-
.../06-solomachine/types/header_test.go | 4 +-
.../06-solomachine/types/misbehaviour.go | 6 +-
.../types/misbehaviour_handle.go | 4 +-
.../types/misbehaviour_handle_test.go | 6 +-
.../06-solomachine/types/misbehaviour_test.go | 4 +-
.../06-solomachine/types/proof.go | 10 +-
.../06-solomachine/types/proof_test.go | 4 +-
.../06-solomachine/types/proposal_handle.go | 4 +-
.../types/proposal_handle_test.go | 6 +-
.../06-solomachine/types/solomachine.go | 2 +-
.../06-solomachine/types/solomachine.pb.go | 178 ++++++++--------
.../06-solomachine/types/solomachine_test.go | 6 +-
.../06-solomachine/types/update.go | 4 +-
.../06-solomachine/types/update_test.go | 6 +-
.../light-clients}/07-tendermint/doc.go | 0
.../light-clients}/07-tendermint/module.go | 2 +-
.../07-tendermint/types/client_state.go | 12 +-
.../07-tendermint/types/client_state_test.go | 12 +-
.../07-tendermint/types/codec.go | 2 +-
.../07-tendermint/types/consensus_state.go | 6 +-
.../types/consensus_state_test.go | 6 +-
.../07-tendermint/types/errors.go | 0
.../07-tendermint/types/fraction.go | 0
.../07-tendermint/types/genesis.go | 4 +-
.../07-tendermint/types/genesis_test.go | 6 +-
.../07-tendermint/types/header.go | 6 +-
.../07-tendermint/types/header_test.go | 6 +-
.../07-tendermint/types/misbehaviour.go | 6 +-
.../types/misbehaviour_handle.go | 4 +-
.../types/misbehaviour_handle_test.go | 8 +-
.../07-tendermint/types/misbehaviour_test.go | 6 +-
.../07-tendermint/types/proposal_handle.go | 4 +-
.../types/proposal_handle_test.go | 6 +-
.../07-tendermint/types/store.go | 6 +-
.../07-tendermint/types/store_test.go | 12 +-
.../07-tendermint/types/tendermint.pb.go | 140 ++++++-------
.../07-tendermint/types/tendermint_test.go | 4 +-
.../07-tendermint/types/update.go | 6 +-
.../07-tendermint/types/update_test.go | 6 +-
.../07-tendermint/types/upgrade.go | 6 +-
.../07-tendermint/types/upgrade_test.go | 8 +-
.../light-clients}/09-localhost/doc.go | 0
.../light-clients}/09-localhost/module.go | 2 +-
.../09-localhost/types/client_state.go | 10 +-
.../09-localhost/types/client_state_test.go | 16 +-
.../09-localhost/types/codec.go | 2 +-
.../09-localhost/types/errors.go | 0
.../light-clients}/09-localhost/types/keys.go | 0
.../09-localhost/types/localhost.pb.go | 16 +-
.../09-localhost/types/localhost_test.go | 4 +-
proto/ibcgo/apps/transfer/v1/genesis.proto | 2 +-
proto/ibcgo/apps/transfer/v1/query.proto | 2 +-
proto/ibcgo/apps/transfer/v1/transfer.proto | 2 +-
proto/ibcgo/apps/transfer/v1/tx.proto | 2 +-
proto/ibcgo/core/channel/v1/channel.proto | 2 +-
proto/ibcgo/core/channel/v1/genesis.proto | 2 +-
proto/ibcgo/core/channel/v1/query.proto | 2 +-
proto/ibcgo/core/channel/v1/tx.proto | 2 +-
proto/ibcgo/core/client/v1/client.proto | 2 +-
proto/ibcgo/core/client/v1/genesis.proto | 2 +-
proto/ibcgo/core/client/v1/query.proto | 2 +-
proto/ibcgo/core/client/v1/tx.proto | 2 +-
.../ibcgo/core/commitment/v1/commitment.proto | 2 +-
.../ibcgo/core/connection/v1/connection.proto | 2 +-
proto/ibcgo/core/connection/v1/genesis.proto | 2 +-
proto/ibcgo/core/connection/v1/query.proto | 2 +-
proto/ibcgo/core/connection/v1/tx.proto | 2 +-
proto/ibcgo/core/types/v1/genesis.proto | 2 +-
.../lightclients/localhost/v1/localhost.proto | 2 +-
.../solomachine/v1/solomachine.proto | 2 +-
.../tendermint/v1/tendermint.proto | 2 +-
testing/chain.go | 18 +-
testing/coordinator.go | 6 +-
testing/mock/mock.go | 4 +-
testing/sdk_test.go | 4 +-
testing/simapp/app.go | 20 +-
testing/simapp/app_test.go | 4 +-
testing/simapp/sim_test.go | 4 +-
testing/solomachine.go | 10 +-
testing/types.go | 2 +-
375 files changed, 1615 insertions(+), 1607 deletions(-)
delete mode 100644 core/types/codec.go
rename {apps => modules/apps}/transfer/client/cli/cli.go (100%)
rename {apps => modules/apps}/transfer/client/cli/query.go (98%)
rename {apps => modules/apps}/transfer/client/cli/tx.go (95%)
rename {apps => modules/apps}/transfer/handler.go (92%)
rename {apps => modules/apps}/transfer/handler_test.go (96%)
rename {apps => modules/apps}/transfer/keeper/MBT_README.md (100%)
rename {apps => modules/apps}/transfer/keeper/encoding.go (95%)
rename {apps => modules/apps}/transfer/keeper/genesis.go (95%)
rename {apps => modules/apps}/transfer/keeper/genesis_test.go (93%)
rename {apps => modules/apps}/transfer/keeper/grpc_query.go (97%)
rename {apps => modules/apps}/transfer/keeper/grpc_query_test.go (98%)
rename {apps => modules/apps}/transfer/keeper/keeper.go (97%)
rename {apps => modules/apps}/transfer/keeper/keeper_test.go (96%)
rename {apps => modules/apps}/transfer/keeper/mbt_relay_test.go (97%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/Test5Packets.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/Test5Packets.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnTimeoutFail.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnTimeoutPass.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestSendTransferFail.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestSendTransferFail.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestSendTransferPass.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestSendTransferPass.tla (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestUnescrowTokens.json (100%)
rename {apps => modules/apps}/transfer/keeper/model_based_tests/TestUnescrowTokens.tla (100%)
rename {apps => modules/apps}/transfer/keeper/msg_server.go (95%)
rename {apps => modules/apps}/transfer/keeper/params.go (93%)
rename {apps => modules/apps}/transfer/keeper/params_test.go (88%)
rename {apps => modules/apps}/transfer/keeper/relay.go (98%)
rename {apps => modules/apps}/transfer/keeper/relay_model/account.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/account_record.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/apalache-to-relay-test.json (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/apalache-to-relay-test2.json (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/denom.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/denom_record.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/denom_record2.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/denom_sequence.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/identifiers.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/relay.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_model/relay_tests.tla (100%)
rename {apps => modules/apps}/transfer/keeper/relay_test.go (98%)
rename {apps => modules/apps}/transfer/module.go (97%)
rename {apps => modules/apps}/transfer/module_test.go (96%)
rename {apps => modules/apps}/transfer/simulation/decoder.go (94%)
rename {apps => modules/apps}/transfer/simulation/decoder_test.go (91%)
rename {apps => modules/apps}/transfer/simulation/genesis.go (96%)
rename {apps => modules/apps}/transfer/simulation/genesis_test.go (94%)
rename {apps => modules/apps}/transfer/simulation/params.go (94%)
rename {apps => modules/apps}/transfer/simulation/params_test.go (92%)
rename {apps => modules/apps}/transfer/spec/01_concepts.md (100%)
rename {apps => modules/apps}/transfer/spec/02_state.md (100%)
rename {apps => modules/apps}/transfer/spec/03_state_transitions.md (100%)
rename {apps => modules/apps}/transfer/spec/04_messages.md (100%)
rename {apps => modules/apps}/transfer/spec/05_events.md (100%)
rename {apps => modules/apps}/transfer/spec/06_metrics.md (100%)
rename {apps => modules/apps}/transfer/spec/07_params.md (100%)
rename {apps => modules/apps}/transfer/spec/README.md (100%)
rename {apps => modules/apps}/transfer/types/codec.go (100%)
rename {apps => modules/apps}/transfer/types/coin.go (100%)
rename {apps => modules/apps}/transfer/types/errors.go (100%)
rename {apps => modules/apps}/transfer/types/events.go (100%)
rename {apps => modules/apps}/transfer/types/expected_keepers.go (90%)
rename {apps => modules/apps}/transfer/types/genesis.go (93%)
rename {apps => modules/apps}/transfer/types/genesis.pb.go (96%)
rename {apps => modules/apps}/transfer/types/genesis_test.go (92%)
rename {apps => modules/apps}/transfer/types/keys.go (100%)
rename {apps => modules/apps}/transfer/types/keys_test.go (90%)
rename {apps => modules/apps}/transfer/types/msgs.go (95%)
rename {apps => modules/apps}/transfer/types/msgs_test.go (98%)
rename {apps => modules/apps}/transfer/types/packet.go (100%)
rename {apps => modules/apps}/transfer/types/packet_test.go (100%)
rename {apps => modules/apps}/transfer/types/params.go (100%)
rename {apps => modules/apps}/transfer/types/params_test.go (100%)
rename {apps => modules/apps}/transfer/types/query.pb.go (91%)
rename {apps => modules/apps}/transfer/types/query.pb.gw.go (100%)
rename {apps => modules/apps}/transfer/types/trace.go (99%)
rename {apps => modules/apps}/transfer/types/trace_test.go (100%)
rename {apps => modules/apps}/transfer/types/transfer.pb.go (90%)
rename {apps => modules/apps}/transfer/types/tx.pb.go (86%)
rename {core => modules/core}/02-client/abci.go (87%)
rename {core => modules/core}/02-client/abci_test.go (90%)
rename {core => modules/core}/02-client/client/cli/cli.go (95%)
rename {core => modules/core}/02-client/client/cli/query.go (97%)
rename {core => modules/core}/02-client/client/cli/tx.go (99%)
rename {core => modules/core}/02-client/client/proposal_handler.go (92%)
rename {core => modules/core}/02-client/client/utils/utils.go (93%)
rename {core => modules/core}/02-client/doc.go (100%)
rename {core => modules/core}/02-client/genesis.go (92%)
rename {core => modules/core}/02-client/keeper/client.go (98%)
rename {core => modules/core}/02-client/keeper/client_test.go (98%)
rename {core => modules/core}/02-client/keeper/encoding.go (93%)
rename {core => modules/core}/02-client/keeper/grpc_query.go (97%)
rename {core => modules/core}/02-client/keeper/grpc_query_test.go (97%)
rename {core => modules/core}/02-client/keeper/keeper.go (97%)
rename {core => modules/core}/02-client/keeper/keeper_test.go (97%)
rename {core => modules/core}/02-client/keeper/params.go (91%)
rename {core => modules/core}/02-client/keeper/params_test.go (89%)
rename {core => modules/core}/02-client/keeper/proposal.go (97%)
rename {core => modules/core}/02-client/keeper/proposal_test.go (98%)
rename {core => modules/core}/02-client/module.go (83%)
rename {core => modules/core}/02-client/proposal_handler.go (85%)
rename {core => modules/core}/02-client/proposal_handler_test.go (91%)
rename {core => modules/core}/02-client/simulation/decoder.go (88%)
rename {core => modules/core}/02-client/simulation/decoder_test.go (86%)
rename {core => modules/core}/02-client/simulation/genesis.go (83%)
rename {core => modules/core}/02-client/types/client.go (97%)
rename {core => modules/core}/02-client/types/client.pb.go (91%)
rename {core => modules/core}/02-client/types/client_test.go (95%)
rename {core => modules/core}/02-client/types/codec.go (99%)
rename {core => modules/core}/02-client/types/codec_test.go (92%)
rename {core => modules/core}/02-client/types/encoding.go (98%)
rename {core => modules/core}/02-client/types/encoding_test.go (80%)
rename {core => modules/core}/02-client/types/errors.go (100%)
rename {core => modules/core}/02-client/types/events.go (92%)
rename {core => modules/core}/02-client/types/expected_keepers.go (100%)
rename {core => modules/core}/02-client/types/genesis.go (98%)
rename {core => modules/core}/02-client/types/genesis.pb.go (88%)
rename {core => modules/core}/02-client/types/genesis_test.go (97%)
rename {core => modules/core}/02-client/types/height.go (99%)
rename {core => modules/core}/02-client/types/height_test.go (98%)
rename {core => modules/core}/02-client/types/keys.go (97%)
rename {core => modules/core}/02-client/types/keys_test.go (96%)
rename {core => modules/core}/02-client/types/msgs.go (99%)
rename {core => modules/core}/02-client/types/msgs_test.go (98%)
rename {core => modules/core}/02-client/types/params.go (97%)
rename {core => modules/core}/02-client/types/params_test.go (91%)
rename {core => modules/core}/02-client/types/proposal.go (98%)
rename {core => modules/core}/02-client/types/proposal_test.go (97%)
rename {core => modules/core}/02-client/types/query.go (97%)
rename {core => modules/core}/02-client/types/query.pb.go (93%)
rename {core => modules/core}/02-client/types/query.pb.gw.go (100%)
rename {core => modules/core}/02-client/types/tx.pb.go (93%)
rename {core => modules/core}/03-connection/client/cli/cli.go (94%)
rename {core => modules/core}/03-connection/client/cli/query.go (94%)
rename {core => modules/core}/03-connection/client/cli/tx.go (97%)
rename {core => modules/core}/03-connection/client/utils/utils.go (94%)
rename {core => modules/core}/03-connection/genesis.go (88%)
rename {core => modules/core}/03-connection/keeper/grpc_query.go (96%)
rename {core => modules/core}/03-connection/keeper/grpc_query_test.go (97%)
rename {core => modules/core}/03-connection/keeper/handshake.go (98%)
rename {core => modules/core}/03-connection/keeper/handshake_test.go (98%)
rename {core => modules/core}/03-connection/keeper/keeper.go (95%)
rename {core => modules/core}/03-connection/keeper/keeper_test.go (97%)
rename {core => modules/core}/03-connection/keeper/verify.go (98%)
rename {core => modules/core}/03-connection/keeper/verify_test.go (97%)
rename {core => modules/core}/03-connection/module.go (83%)
rename {core => modules/core}/03-connection/simulation/decoder.go (90%)
rename {core => modules/core}/03-connection/simulation/decoder_test.go (89%)
rename {core => modules/core}/03-connection/simulation/genesis.go (82%)
rename {core => modules/core}/03-connection/types/codec.go (96%)
rename {core => modules/core}/03-connection/types/connection.go (95%)
rename {core => modules/core}/03-connection/types/connection.pb.go (91%)
rename {core => modules/core}/03-connection/types/connection_test.go (95%)
rename {core => modules/core}/03-connection/types/errors.go (100%)
rename {core => modules/core}/03-connection/types/events.go (92%)
rename {core => modules/core}/03-connection/types/expected_keepers.go (92%)
rename {core => modules/core}/03-connection/types/genesis.go (97%)
rename {core => modules/core}/03-connection/types/genesis.pb.go (83%)
rename {core => modules/core}/03-connection/types/genesis_test.go (96%)
rename {core => modules/core}/03-connection/types/keys.go (97%)
rename {core => modules/core}/03-connection/types/keys_test.go (95%)
rename {core => modules/core}/03-connection/types/msgs.go (98%)
rename {core => modules/core}/03-connection/types/msgs_test.go (98%)
rename {core => modules/core}/03-connection/types/query.go (95%)
rename {core => modules/core}/03-connection/types/query.pb.go (93%)
rename {core => modules/core}/03-connection/types/query.pb.gw.go (100%)
rename {core => modules/core}/03-connection/types/tx.pb.go (92%)
rename {core => modules/core}/03-connection/types/version.go (99%)
rename {core => modules/core}/03-connection/types/version_test.go (98%)
rename {core => modules/core}/04-channel/client/cli/cli.go (96%)
rename {core => modules/core}/04-channel/client/cli/query.go (98%)
rename {core => modules/core}/04-channel/client/cli/tx.go (96%)
rename {core => modules/core}/04-channel/client/utils/utils.go (96%)
rename {core => modules/core}/04-channel/genesis.go (93%)
rename {core => modules/core}/04-channel/handler.go (98%)
rename {core => modules/core}/04-channel/keeper/grpc_query.go (98%)
rename {core => modules/core}/04-channel/keeper/grpc_query_test.go (99%)
rename {core => modules/core}/04-channel/keeper/handshake.go (98%)
rename {core => modules/core}/04-channel/keeper/handshake_test.go (99%)
rename {core => modules/core}/04-channel/keeper/keeper.go (97%)
rename {core => modules/core}/04-channel/keeper/keeper_test.go (99%)
rename {core => modules/core}/04-channel/keeper/packet.go (98%)
rename {core => modules/core}/04-channel/keeper/packet_test.go (99%)
rename {core => modules/core}/04-channel/keeper/timeout.go (97%)
rename {core => modules/core}/04-channel/keeper/timeout_test.go (98%)
rename {core => modules/core}/04-channel/module.go (83%)
rename {core => modules/core}/04-channel/simulation/decoder.go (93%)
rename {core => modules/core}/04-channel/simulation/decoder_test.go (92%)
rename {core => modules/core}/04-channel/simulation/genesis.go (83%)
rename {core => modules/core}/04-channel/types/acknowledgement.go (100%)
rename {core => modules/core}/04-channel/types/acknowledgement_test.go (94%)
rename {core => modules/core}/04-channel/types/channel.go (97%)
rename {core => modules/core}/04-channel/types/channel.pb.go (90%)
rename {core => modules/core}/04-channel/types/channel_test.go (97%)
rename {core => modules/core}/04-channel/types/codec.go (96%)
rename {core => modules/core}/04-channel/types/errors.go (100%)
rename {core => modules/core}/04-channel/types/events.go (96%)
rename {core => modules/core}/04-channel/types/expected_keepers.go (93%)
rename {core => modules/core}/04-channel/types/genesis.go (98%)
rename {core => modules/core}/04-channel/types/genesis.pb.go (88%)
rename {core => modules/core}/04-channel/types/genesis_test.go (99%)
rename {core => modules/core}/04-channel/types/keys.go (97%)
rename {core => modules/core}/04-channel/types/keys_test.go (95%)
rename {core => modules/core}/04-channel/types/msgs.go (99%)
rename {core => modules/core}/04-channel/types/msgs_test.go (98%)
rename {core => modules/core}/04-channel/types/packet.go (95%)
rename {core => modules/core}/04-channel/types/packet_test.go (94%)
rename {core => modules/core}/04-channel/types/query.go (96%)
rename {core => modules/core}/04-channel/types/query.pb.go (95%)
rename {core => modules/core}/04-channel/types/query.pb.gw.go (100%)
rename {core => modules/core}/04-channel/types/tx.pb.go (95%)
rename {core => modules/core}/05-port/keeper/keeper.go (95%)
rename {core => modules/core}/05-port/keeper/keeper_test.go (97%)
rename {core => modules/core}/05-port/types/errors.go (100%)
rename {core => modules/core}/05-port/types/keys.go (100%)
rename {core => modules/core}/05-port/types/module.go (95%)
rename {core => modules/core}/05-port/types/router.go (100%)
rename {core => modules/core}/05-port/types/utils.go (100%)
rename {core => modules/core}/23-commitment/types/bench_test.go (100%)
rename {core => modules/core}/23-commitment/types/codec.go (94%)
rename {core => modules/core}/23-commitment/types/commitment.pb.go (90%)
rename {core => modules/core}/23-commitment/types/commitment_test.go (100%)
rename {core => modules/core}/23-commitment/types/errors.go (100%)
rename {core => modules/core}/23-commitment/types/merkle.go (99%)
rename {core => modules/core}/23-commitment/types/merkle_test.go (99%)
rename {core => modules/core}/23-commitment/types/utils.go (100%)
rename {core => modules/core}/23-commitment/types/utils_test.go (97%)
rename {core => modules/core}/24-host/errors.go (100%)
rename {core => modules/core}/24-host/keys.go (99%)
rename {core => modules/core}/24-host/parse.go (100%)
rename {core => modules/core}/24-host/parse_test.go (91%)
rename {core => modules/core}/24-host/validate.go (100%)
rename {core => modules/core}/24-host/validate_test.go (100%)
rename {core => modules/core}/client/cli/cli.go (82%)
rename {core => modules/core}/client/query.go (91%)
rename {core => modules/core}/exported/channel.go (100%)
rename {core => modules/core}/exported/client.go (100%)
rename {core => modules/core}/exported/commitment.go (100%)
rename {core => modules/core}/exported/connection.go (100%)
rename {core => modules/core}/genesis.go (74%)
rename {core => modules/core}/genesis_test.go (95%)
rename {core => modules/core}/handler.go (92%)
rename {core => modules/core}/keeper/grpc_query.go (96%)
rename {core => modules/core}/keeper/keeper.go (79%)
rename {core => modules/core}/keeper/msg_server.go (98%)
rename {core => modules/core}/keeper/msg_server_test.go (98%)
rename {core => modules/core}/module.go (92%)
rename {core => modules/core}/simulation/decoder.go (68%)
rename {core => modules/core}/simulation/decoder_test.go (82%)
rename {core => modules/core}/simulation/genesis.go (75%)
rename {core => modules/core}/simulation/genesis_test.go (89%)
rename {core => modules/core}/spec/01_concepts.md (100%)
rename {core => modules/core}/spec/02_state.md (100%)
rename {core => modules/core}/spec/03_state_transitions.md (100%)
rename {core => modules/core}/spec/04_messages.md (100%)
rename {core => modules/core}/spec/05_callbacks.md (100%)
rename {core => modules/core}/spec/06_events.md (100%)
rename {core => modules/core}/spec/07_params.md (100%)
rename {core => modules/core}/spec/README.md (100%)
create mode 100644 modules/core/types/codec.go
rename {core => modules/core}/types/genesis.go (82%)
rename {core => modules/core}/types/genesis.pb.go (82%)
rename {core => modules/core}/types/query.go (59%)
rename {light-clients => modules/light-clients}/06-solomachine/doc.go (100%)
rename {light-clients => modules/light-clients}/06-solomachine/module.go (64%)
rename {light-clients => modules/light-clients}/06-solomachine/spec/01_concepts.md (100%)
rename {light-clients => modules/light-clients}/06-solomachine/spec/02_state.md (100%)
rename {light-clients => modules/light-clients}/06-solomachine/spec/03_state_transitions.md (100%)
rename {light-clients => modules/light-clients}/06-solomachine/spec/04_messages.md (100%)
rename {light-clients => modules/light-clients}/06-solomachine/spec/README.md (100%)
rename {light-clients => modules/light-clients}/06-solomachine/types/client_state.go (98%)
rename {light-clients => modules/light-clients}/06-solomachine/types/client_state_test.go (97%)
rename {light-clients => modules/light-clients}/06-solomachine/types/codec.go (96%)
rename {light-clients => modules/light-clients}/06-solomachine/types/codec_test.go (95%)
rename {light-clients => modules/light-clients}/06-solomachine/types/consensus_state.go (93%)
rename {light-clients => modules/light-clients}/06-solomachine/types/consensus_state_test.go (93%)
rename {light-clients => modules/light-clients}/06-solomachine/types/errors.go (100%)
rename {light-clients => modules/light-clients}/06-solomachine/types/header.go (94%)
rename {light-clients => modules/light-clients}/06-solomachine/types/header_test.go (94%)
rename {light-clients => modules/light-clients}/06-solomachine/types/misbehaviour.go (93%)
rename {light-clients => modules/light-clients}/06-solomachine/types/misbehaviour_handle.go (96%)
rename {light-clients => modules/light-clients}/06-solomachine/types/misbehaviour_handle_test.go (97%)
rename {light-clients => modules/light-clients}/06-solomachine/types/misbehaviour_test.go (96%)
rename {light-clients => modules/light-clients}/06-solomachine/types/proof.go (97%)
rename {light-clients => modules/light-clients}/06-solomachine/types/proof_test.go (94%)
rename {light-clients => modules/light-clients}/06-solomachine/types/proposal_handle.go (94%)
rename {light-clients => modules/light-clients}/06-solomachine/types/proposal_handle_test.go (93%)
rename {light-clients => modules/light-clients}/06-solomachine/types/solomachine.go (97%)
rename {light-clients => modules/light-clients}/06-solomachine/types/solomachine.pb.go (92%)
rename {light-clients => modules/light-clients}/06-solomachine/types/solomachine_test.go (94%)
rename {light-clients => modules/light-clients}/06-solomachine/types/update.go (96%)
rename {light-clients => modules/light-clients}/06-solomachine/types/update_test.go (95%)
rename {light-clients => modules/light-clients}/07-tendermint/doc.go (100%)
rename {light-clients => modules/light-clients}/07-tendermint/module.go (62%)
rename {light-clients => modules/light-clients}/07-tendermint/types/client_state.go (97%)
rename {light-clients => modules/light-clients}/07-tendermint/types/client_state_test.go (98%)
rename {light-clients => modules/light-clients}/07-tendermint/types/codec.go (92%)
rename {light-clients => modules/light-clients}/07-tendermint/types/consensus_state.go (89%)
rename {light-clients => modules/light-clients}/07-tendermint/types/consensus_state_test.go (89%)
rename {light-clients => modules/light-clients}/07-tendermint/types/errors.go (100%)
rename {light-clients => modules/light-clients}/07-tendermint/types/fraction.go (100%)
rename {light-clients => modules/light-clients}/07-tendermint/types/genesis.go (81%)
rename {light-clients => modules/light-clients}/07-tendermint/types/genesis_test.go (89%)
rename {light-clients => modules/light-clients}/07-tendermint/types/header.go (93%)
rename {light-clients => modules/light-clients}/07-tendermint/types/header_test.go (91%)
rename {light-clients => modules/light-clients}/07-tendermint/types/misbehaviour.go (96%)
rename {light-clients => modules/light-clients}/07-tendermint/types/misbehaviour_handle.go (97%)
rename {light-clients => modules/light-clients}/07-tendermint/types/misbehaviour_handle_test.go (98%)
rename {light-clients => modules/light-clients}/07-tendermint/types/misbehaviour_test.go (97%)
rename {light-clients => modules/light-clients}/07-tendermint/types/proposal_handle.go (97%)
rename {light-clients => modules/light-clients}/07-tendermint/types/proposal_handle_test.go (98%)
rename {light-clients => modules/light-clients}/07-tendermint/types/store.go (95%)
rename {light-clients => modules/light-clients}/07-tendermint/types/store_test.go (90%)
rename {light-clients => modules/light-clients}/07-tendermint/types/tendermint.pb.go (87%)
rename {light-clients => modules/light-clients}/07-tendermint/types/tendermint_test.go (95%)
rename {light-clients => modules/light-clients}/07-tendermint/types/update.go (97%)
rename {light-clients => modules/light-clients}/07-tendermint/types/update_test.go (98%)
rename {light-clients => modules/light-clients}/07-tendermint/types/upgrade.go (97%)
rename {light-clients => modules/light-clients}/07-tendermint/types/upgrade_test.go (98%)
rename {light-clients => modules/light-clients}/09-localhost/doc.go (100%)
rename {light-clients => modules/light-clients}/09-localhost/module.go (63%)
rename {light-clients => modules/light-clients}/09-localhost/types/client_state.go (96%)
rename {light-clients => modules/light-clients}/09-localhost/types/client_state_test.go (96%)
rename {light-clients => modules/light-clients}/09-localhost/types/codec.go (86%)
rename {light-clients => modules/light-clients}/09-localhost/types/errors.go (100%)
rename {light-clients => modules/light-clients}/09-localhost/types/keys.go (100%)
rename {light-clients => modules/light-clients}/09-localhost/types/localhost.pb.go (94%)
rename {light-clients => modules/light-clients}/09-localhost/types/localhost_test.go (87%)
diff --git a/.codecov.yml b/.codecov.yml
index c5314207..7935dfdb 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -39,7 +39,7 @@ flags:
modules:
paths:
- "core/"
- - "!core/**/client/" # ignore client package
+ - "!modules/core/**/client/" # ignore client package
client:
paths:
- "client/"
@@ -50,6 +50,7 @@ ignore:
- "*.md"
- "**/*.pb.go"
- "**/*.pb.gw.go"
- - "core/**/*.pb.go"
- - "core/**/test_common.go"
+ - "modules/**/**/**/*.pb.go"
+ - "modules/**/**/**/*.pb.gw.go"
+ - "modules/**/**/**/test_common.go"
- "scripts/"
diff --git a/core/types/codec.go b/core/types/codec.go
deleted file mode 100644
index 16351c74..00000000
--- a/core/types/codec.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package types
-
-import (
- codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
-)
-
-// RegisterInterfaces registers x/ibc interfaces into protobuf Any.
-func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
- clienttypes.RegisterInterfaces(registry)
- connectiontypes.RegisterInterfaces(registry)
- channeltypes.RegisterInterfaces(registry)
- solomachinetypes.RegisterInterfaces(registry)
- ibctmtypes.RegisterInterfaces(registry)
- localhosttypes.RegisterInterfaces(registry)
- commitmenttypes.RegisterInterfaces(registry)
-}
diff --git a/apps/transfer/client/cli/cli.go b/modules/apps/transfer/client/cli/cli.go
similarity index 100%
rename from apps/transfer/client/cli/cli.go
rename to modules/apps/transfer/client/cli/cli.go
diff --git a/apps/transfer/client/cli/query.go b/modules/apps/transfer/client/cli/query.go
similarity index 98%
rename from apps/transfer/client/cli/query.go
rename to modules/apps/transfer/client/cli/query.go
index d6123e42..6dd2e6cf 100644
--- a/apps/transfer/client/cli/query.go
+++ b/modules/apps/transfer/client/cli/query.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// GetCmdQueryDenomTrace defines the command to query a a denomination trace from a given hash.
diff --git a/apps/transfer/client/cli/tx.go b/modules/apps/transfer/client/cli/tx.go
similarity index 95%
rename from apps/transfer/client/cli/tx.go
rename to modules/apps/transfer/client/cli/tx.go
index 9eafea9a..0efba3b4 100644
--- a/apps/transfer/client/cli/tx.go
+++ b/modules/apps/transfer/client/cli/tx.go
@@ -12,9 +12,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channelutils "github.com/cosmos/ibc-go/core/04-channel/client/utils"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channelutils "github.com/cosmos/ibc-go/modules/core/04-channel/client/utils"
)
const (
diff --git a/apps/transfer/handler.go b/modules/apps/transfer/handler.go
similarity index 92%
rename from apps/transfer/handler.go
rename to modules/apps/transfer/handler.go
index 58ad69fd..f6a0ace3 100644
--- a/apps/transfer/handler.go
+++ b/modules/apps/transfer/handler.go
@@ -3,7 +3,7 @@ package transfer
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// NewHandler returns sdk.Handler for IBC token transfer module messages
diff --git a/apps/transfer/handler_test.go b/modules/apps/transfer/handler_test.go
similarity index 96%
rename from apps/transfer/handler_test.go
rename to modules/apps/transfer/handler_test.go
index 584b4a32..5d4d95d7 100644
--- a/apps/transfer/handler_test.go
+++ b/modules/apps/transfer/handler_test.go
@@ -6,10 +6,10 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/apps/transfer/keeper/MBT_README.md b/modules/apps/transfer/keeper/MBT_README.md
similarity index 100%
rename from apps/transfer/keeper/MBT_README.md
rename to modules/apps/transfer/keeper/MBT_README.md
diff --git a/apps/transfer/keeper/encoding.go b/modules/apps/transfer/keeper/encoding.go
similarity index 95%
rename from apps/transfer/keeper/encoding.go
rename to modules/apps/transfer/keeper/encoding.go
index 1f6e7e63..ae0741a9 100644
--- a/apps/transfer/keeper/encoding.go
+++ b/modules/apps/transfer/keeper/encoding.go
@@ -1,7 +1,7 @@
package keeper
import (
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// UnmarshalDenomTrace attempts to decode and return an DenomTrace object from
diff --git a/apps/transfer/keeper/genesis.go b/modules/apps/transfer/keeper/genesis.go
similarity index 95%
rename from apps/transfer/keeper/genesis.go
rename to modules/apps/transfer/keeper/genesis.go
index 1c7aaef8..7050a2c5 100644
--- a/apps/transfer/keeper/genesis.go
+++ b/modules/apps/transfer/keeper/genesis.go
@@ -4,7 +4,7 @@ import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// InitGenesis initializes the ibc-transfer state and binds to PortID.
diff --git a/apps/transfer/keeper/genesis_test.go b/modules/apps/transfer/keeper/genesis_test.go
similarity index 93%
rename from apps/transfer/keeper/genesis_test.go
rename to modules/apps/transfer/keeper/genesis_test.go
index ad708004..7dfffbdb 100644
--- a/apps/transfer/keeper/genesis_test.go
+++ b/modules/apps/transfer/keeper/genesis_test.go
@@ -3,7 +3,7 @@ package keeper_test
import (
"fmt"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
func (suite *KeeperTestSuite) TestGenesis() {
diff --git a/apps/transfer/keeper/grpc_query.go b/modules/apps/transfer/keeper/grpc_query.go
similarity index 97%
rename from apps/transfer/keeper/grpc_query.go
rename to modules/apps/transfer/keeper/grpc_query.go
index 08656587..10015f72 100644
--- a/apps/transfer/keeper/grpc_query.go
+++ b/modules/apps/transfer/keeper/grpc_query.go
@@ -11,7 +11,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
var _ types.QueryServer = Keeper{}
diff --git a/apps/transfer/keeper/grpc_query_test.go b/modules/apps/transfer/keeper/grpc_query_test.go
similarity index 98%
rename from apps/transfer/keeper/grpc_query_test.go
rename to modules/apps/transfer/keeper/grpc_query_test.go
index c297ea9d..ca98bd77 100644
--- a/apps/transfer/keeper/grpc_query_test.go
+++ b/modules/apps/transfer/keeper/grpc_query_test.go
@@ -5,7 +5,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
func (suite *KeeperTestSuite) TestQueryDenomTrace() {
diff --git a/apps/transfer/keeper/keeper.go b/modules/apps/transfer/keeper/keeper.go
similarity index 97%
rename from apps/transfer/keeper/keeper.go
rename to modules/apps/transfer/keeper/keeper.go
index fbc4a167..be69ca3f 100644
--- a/apps/transfer/keeper/keeper.go
+++ b/modules/apps/transfer/keeper/keeper.go
@@ -11,9 +11,9 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
diff --git a/apps/transfer/keeper/keeper_test.go b/modules/apps/transfer/keeper/keeper_test.go
similarity index 96%
rename from apps/transfer/keeper/keeper_test.go
rename to modules/apps/transfer/keeper/keeper_test.go
index f7f01038..8c90f186 100644
--- a/apps/transfer/keeper/keeper_test.go
+++ b/modules/apps/transfer/keeper/keeper_test.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/apps/transfer/keeper/mbt_relay_test.go b/modules/apps/transfer/keeper/mbt_relay_test.go
similarity index 97%
rename from apps/transfer/keeper/mbt_relay_test.go
rename to modules/apps/transfer/keeper/mbt_relay_test.go
index 52205088..4130845c 100644
--- a/apps/transfer/keeper/mbt_relay_test.go
+++ b/modules/apps/transfer/keeper/mbt_relay_test.go
@@ -15,10 +15,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/apps/transfer/keeper/model_based_tests/Test5Packets.json b/modules/apps/transfer/keeper/model_based_tests/Test5Packets.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/Test5Packets.json
rename to modules/apps/transfer/keeper/model_based_tests/Test5Packets.json
diff --git a/apps/transfer/keeper/model_based_tests/Test5Packets.tla b/modules/apps/transfer/keeper/model_based_tests/Test5Packets.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/Test5Packets.tla
rename to modules/apps/transfer/keeper/model_based_tests/Test5Packets.tla
diff --git a/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json b/modules/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
rename to modules/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json
diff --git a/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla b/modules/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
rename to modules/apps/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.json b/modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.json b/modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
rename to modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.json
diff --git a/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla b/modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestSendTransferFail.json b/modules/apps/transfer/keeper/model_based_tests/TestSendTransferFail.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestSendTransferFail.json
rename to modules/apps/transfer/keeper/model_based_tests/TestSendTransferFail.json
diff --git a/apps/transfer/keeper/model_based_tests/TestSendTransferFail.tla b/modules/apps/transfer/keeper/model_based_tests/TestSendTransferFail.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestSendTransferFail.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestSendTransferFail.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestSendTransferPass.json b/modules/apps/transfer/keeper/model_based_tests/TestSendTransferPass.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestSendTransferPass.json
rename to modules/apps/transfer/keeper/model_based_tests/TestSendTransferPass.json
diff --git a/apps/transfer/keeper/model_based_tests/TestSendTransferPass.tla b/modules/apps/transfer/keeper/model_based_tests/TestSendTransferPass.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestSendTransferPass.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestSendTransferPass.tla
diff --git a/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.json b/modules/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.json
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestUnescrowTokens.json
rename to modules/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.json
diff --git a/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.tla b/modules/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
similarity index 100%
rename from apps/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
rename to modules/apps/transfer/keeper/model_based_tests/TestUnescrowTokens.tla
diff --git a/apps/transfer/keeper/msg_server.go b/modules/apps/transfer/keeper/msg_server.go
similarity index 95%
rename from apps/transfer/keeper/msg_server.go
rename to modules/apps/transfer/keeper/msg_server.go
index 4c658434..3fb536fa 100644
--- a/apps/transfer/keeper/msg_server.go
+++ b/modules/apps/transfer/keeper/msg_server.go
@@ -4,7 +4,7 @@ import (
"context"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
var _ types.MsgServer = Keeper{}
diff --git a/apps/transfer/keeper/params.go b/modules/apps/transfer/keeper/params.go
similarity index 93%
rename from apps/transfer/keeper/params.go
rename to modules/apps/transfer/keeper/params.go
index 1d5a9d0c..c6686acf 100644
--- a/apps/transfer/keeper/params.go
+++ b/modules/apps/transfer/keeper/params.go
@@ -2,7 +2,7 @@ package keeper
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// GetSendEnabled retrieves the send enabled boolean from the paramstore
diff --git a/apps/transfer/keeper/params_test.go b/modules/apps/transfer/keeper/params_test.go
similarity index 88%
rename from apps/transfer/keeper/params_test.go
rename to modules/apps/transfer/keeper/params_test.go
index 464ce300..ac680a41 100644
--- a/apps/transfer/keeper/params_test.go
+++ b/modules/apps/transfer/keeper/params_test.go
@@ -1,6 +1,6 @@
package keeper_test
-import "github.com/cosmos/ibc-go/apps/transfer/types"
+import "github.com/cosmos/ibc-go/modules/apps/transfer/types"
func (suite *KeeperTestSuite) TestParams() {
expParams := types.DefaultParams()
diff --git a/apps/transfer/keeper/relay.go b/modules/apps/transfer/keeper/relay.go
similarity index 98%
rename from apps/transfer/keeper/relay.go
rename to modules/apps/transfer/keeper/relay.go
index 56b0489e..a4ce016d 100644
--- a/apps/transfer/keeper/relay.go
+++ b/modules/apps/transfer/keeper/relay.go
@@ -9,10 +9,10 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// SendTransfer handles transfer sending logic. There are 2 possible cases:
diff --git a/apps/transfer/keeper/relay_model/account.tla b/modules/apps/transfer/keeper/relay_model/account.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/account.tla
rename to modules/apps/transfer/keeper/relay_model/account.tla
diff --git a/apps/transfer/keeper/relay_model/account_record.tla b/modules/apps/transfer/keeper/relay_model/account_record.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/account_record.tla
rename to modules/apps/transfer/keeper/relay_model/account_record.tla
diff --git a/apps/transfer/keeper/relay_model/apalache-to-relay-test.json b/modules/apps/transfer/keeper/relay_model/apalache-to-relay-test.json
similarity index 100%
rename from apps/transfer/keeper/relay_model/apalache-to-relay-test.json
rename to modules/apps/transfer/keeper/relay_model/apalache-to-relay-test.json
diff --git a/apps/transfer/keeper/relay_model/apalache-to-relay-test2.json b/modules/apps/transfer/keeper/relay_model/apalache-to-relay-test2.json
similarity index 100%
rename from apps/transfer/keeper/relay_model/apalache-to-relay-test2.json
rename to modules/apps/transfer/keeper/relay_model/apalache-to-relay-test2.json
diff --git a/apps/transfer/keeper/relay_model/denom.tla b/modules/apps/transfer/keeper/relay_model/denom.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/denom.tla
rename to modules/apps/transfer/keeper/relay_model/denom.tla
diff --git a/apps/transfer/keeper/relay_model/denom_record.tla b/modules/apps/transfer/keeper/relay_model/denom_record.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/denom_record.tla
rename to modules/apps/transfer/keeper/relay_model/denom_record.tla
diff --git a/apps/transfer/keeper/relay_model/denom_record2.tla b/modules/apps/transfer/keeper/relay_model/denom_record2.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/denom_record2.tla
rename to modules/apps/transfer/keeper/relay_model/denom_record2.tla
diff --git a/apps/transfer/keeper/relay_model/denom_sequence.tla b/modules/apps/transfer/keeper/relay_model/denom_sequence.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/denom_sequence.tla
rename to modules/apps/transfer/keeper/relay_model/denom_sequence.tla
diff --git a/apps/transfer/keeper/relay_model/identifiers.tla b/modules/apps/transfer/keeper/relay_model/identifiers.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/identifiers.tla
rename to modules/apps/transfer/keeper/relay_model/identifiers.tla
diff --git a/apps/transfer/keeper/relay_model/relay.tla b/modules/apps/transfer/keeper/relay_model/relay.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/relay.tla
rename to modules/apps/transfer/keeper/relay_model/relay.tla
diff --git a/apps/transfer/keeper/relay_model/relay_tests.tla b/modules/apps/transfer/keeper/relay_model/relay_tests.tla
similarity index 100%
rename from apps/transfer/keeper/relay_model/relay_tests.tla
rename to modules/apps/transfer/keeper/relay_model/relay_tests.tla
diff --git a/apps/transfer/keeper/relay_test.go b/modules/apps/transfer/keeper/relay_test.go
similarity index 98%
rename from apps/transfer/keeper/relay_test.go
rename to modules/apps/transfer/keeper/relay_test.go
index 97f4f96a..2f754e60 100644
--- a/apps/transfer/keeper/relay_test.go
+++ b/modules/apps/transfer/keeper/relay_test.go
@@ -6,11 +6,11 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/apps/transfer/module.go b/modules/apps/transfer/module.go
similarity index 97%
rename from apps/transfer/module.go
rename to modules/apps/transfer/module.go
index 20dd3919..f4620ee9 100644
--- a/apps/transfer/module.go
+++ b/modules/apps/transfer/module.go
@@ -22,13 +22,13 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/ibc-go/apps/transfer/client/cli"
- "github.com/cosmos/ibc-go/apps/transfer/keeper"
- "github.com/cosmos/ibc-go/apps/transfer/simulation"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- porttypes "github.com/cosmos/ibc-go/core/05-port/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/client/cli"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/keeper"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
var (
diff --git a/apps/transfer/module_test.go b/modules/apps/transfer/module_test.go
similarity index 96%
rename from apps/transfer/module_test.go
rename to modules/apps/transfer/module_test.go
index c316341e..53876213 100644
--- a/apps/transfer/module_test.go
+++ b/modules/apps/transfer/module_test.go
@@ -4,10 +4,10 @@ import (
"math"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/ibc-go/apps/transfer/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/apps/transfer/simulation/decoder.go b/modules/apps/transfer/simulation/decoder.go
similarity index 94%
rename from apps/transfer/simulation/decoder.go
rename to modules/apps/transfer/simulation/decoder.go
index 70191c6a..882e9516 100644
--- a/apps/transfer/simulation/decoder.go
+++ b/modules/apps/transfer/simulation/decoder.go
@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// TransferUnmarshaler defines the expected encoding store functions.
diff --git a/apps/transfer/simulation/decoder_test.go b/modules/apps/transfer/simulation/decoder_test.go
similarity index 91%
rename from apps/transfer/simulation/decoder_test.go
rename to modules/apps/transfer/simulation/decoder_test.go
index 93162775..b4198136 100644
--- a/apps/transfer/simulation/decoder_test.go
+++ b/modules/apps/transfer/simulation/decoder_test.go
@@ -8,8 +8,8 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/apps/transfer/simulation"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
func TestDecodeStore(t *testing.T) {
diff --git a/apps/transfer/simulation/genesis.go b/modules/apps/transfer/simulation/genesis.go
similarity index 96%
rename from apps/transfer/simulation/genesis.go
rename to modules/apps/transfer/simulation/genesis.go
index 647f2321..b300e891 100644
--- a/apps/transfer/simulation/genesis.go
+++ b/modules/apps/transfer/simulation/genesis.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// Simulation parameter constants
diff --git a/apps/transfer/simulation/genesis_test.go b/modules/apps/transfer/simulation/genesis_test.go
similarity index 94%
rename from apps/transfer/simulation/genesis_test.go
rename to modules/apps/transfer/simulation/genesis_test.go
index 9cac5ab3..c4bd6103 100644
--- a/apps/transfer/simulation/genesis_test.go
+++ b/modules/apps/transfer/simulation/genesis_test.go
@@ -11,8 +11,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/ibc-go/apps/transfer/simulation"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState.
diff --git a/apps/transfer/simulation/params.go b/modules/apps/transfer/simulation/params.go
similarity index 94%
rename from apps/transfer/simulation/params.go
rename to modules/apps/transfer/simulation/params.go
index 29f84d6c..49437c5a 100644
--- a/apps/transfer/simulation/params.go
+++ b/modules/apps/transfer/simulation/params.go
@@ -9,7 +9,7 @@ import (
"github.com/cosmos/cosmos-sdk/x/simulation"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// ParamChanges defines the parameters that can be modified by param change proposals
diff --git a/apps/transfer/simulation/params_test.go b/modules/apps/transfer/simulation/params_test.go
similarity index 92%
rename from apps/transfer/simulation/params_test.go
rename to modules/apps/transfer/simulation/params_test.go
index 71e4a815..978c38e0 100644
--- a/apps/transfer/simulation/params_test.go
+++ b/modules/apps/transfer/simulation/params_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/apps/transfer/simulation"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/simulation"
)
func TestParamChanges(t *testing.T) {
diff --git a/apps/transfer/spec/01_concepts.md b/modules/apps/transfer/spec/01_concepts.md
similarity index 100%
rename from apps/transfer/spec/01_concepts.md
rename to modules/apps/transfer/spec/01_concepts.md
diff --git a/apps/transfer/spec/02_state.md b/modules/apps/transfer/spec/02_state.md
similarity index 100%
rename from apps/transfer/spec/02_state.md
rename to modules/apps/transfer/spec/02_state.md
diff --git a/apps/transfer/spec/03_state_transitions.md b/modules/apps/transfer/spec/03_state_transitions.md
similarity index 100%
rename from apps/transfer/spec/03_state_transitions.md
rename to modules/apps/transfer/spec/03_state_transitions.md
diff --git a/apps/transfer/spec/04_messages.md b/modules/apps/transfer/spec/04_messages.md
similarity index 100%
rename from apps/transfer/spec/04_messages.md
rename to modules/apps/transfer/spec/04_messages.md
diff --git a/apps/transfer/spec/05_events.md b/modules/apps/transfer/spec/05_events.md
similarity index 100%
rename from apps/transfer/spec/05_events.md
rename to modules/apps/transfer/spec/05_events.md
diff --git a/apps/transfer/spec/06_metrics.md b/modules/apps/transfer/spec/06_metrics.md
similarity index 100%
rename from apps/transfer/spec/06_metrics.md
rename to modules/apps/transfer/spec/06_metrics.md
diff --git a/apps/transfer/spec/07_params.md b/modules/apps/transfer/spec/07_params.md
similarity index 100%
rename from apps/transfer/spec/07_params.md
rename to modules/apps/transfer/spec/07_params.md
diff --git a/apps/transfer/spec/README.md b/modules/apps/transfer/spec/README.md
similarity index 100%
rename from apps/transfer/spec/README.md
rename to modules/apps/transfer/spec/README.md
diff --git a/apps/transfer/types/codec.go b/modules/apps/transfer/types/codec.go
similarity index 100%
rename from apps/transfer/types/codec.go
rename to modules/apps/transfer/types/codec.go
diff --git a/apps/transfer/types/coin.go b/modules/apps/transfer/types/coin.go
similarity index 100%
rename from apps/transfer/types/coin.go
rename to modules/apps/transfer/types/coin.go
diff --git a/apps/transfer/types/errors.go b/modules/apps/transfer/types/errors.go
similarity index 100%
rename from apps/transfer/types/errors.go
rename to modules/apps/transfer/types/errors.go
diff --git a/apps/transfer/types/events.go b/modules/apps/transfer/types/events.go
similarity index 100%
rename from apps/transfer/types/events.go
rename to modules/apps/transfer/types/events.go
diff --git a/apps/transfer/types/expected_keepers.go b/modules/apps/transfer/types/expected_keepers.go
similarity index 90%
rename from apps/transfer/types/expected_keepers.go
rename to modules/apps/transfer/types/expected_keepers.go
index 6087855c..df16f947 100644
--- a/apps/transfer/types/expected_keepers.go
+++ b/modules/apps/transfer/types/expected_keepers.go
@@ -4,9 +4,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- ibcexported "github.com/cosmos/ibc-go/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ ibcexported "github.com/cosmos/ibc-go/modules/core/exported"
)
// AccountKeeper defines the contract required for account APIs.
diff --git a/apps/transfer/types/genesis.go b/modules/apps/transfer/types/genesis.go
similarity index 93%
rename from apps/transfer/types/genesis.go
rename to modules/apps/transfer/types/genesis.go
index 6432f3a7..1a17bc47 100644
--- a/apps/transfer/types/genesis.go
+++ b/modules/apps/transfer/types/genesis.go
@@ -1,7 +1,7 @@
package types
import (
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// NewGenesisState creates a new ibc-transfer GenesisState instance.
diff --git a/apps/transfer/types/genesis.pb.go b/modules/apps/transfer/types/genesis.pb.go
similarity index 96%
rename from apps/transfer/types/genesis.pb.go
rename to modules/apps/transfer/types/genesis.pb.go
index b19173d8..f06c2fb8 100644
--- a/apps/transfer/types/genesis.pb.go
+++ b/modules/apps/transfer/types/genesis.pb.go
@@ -93,7 +93,7 @@ func init() {
}
var fileDescriptor_19e19f3d07c11479 = []byte{
- // 305 bytes of a gzipped FileDescriptorProto
+ // 315 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xc9, 0x4c, 0x4a, 0x4e,
0xcf, 0xd7, 0x4f, 0x2c, 0x28, 0x28, 0xd6, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2,
0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
@@ -108,12 +108,12 @@ var fileDescriptor_19e19f3d07c11479 = []byte{
0xe4, 0x85, 0x21, 0x26, 0x23, 0x9b, 0xa2, 0xb4, 0xea, 0xbe, 0x3c, 0x1b, 0x58, 0x55, 0x71, 0x10,
0x77, 0x0a, 0x5c, 0x4b, 0xb1, 0x90, 0x0d, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04,
0xb3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x1c, 0x2e, 0x7b, 0x02, 0xc0, 0xaa, 0x9c, 0x58, 0x40, 0x76,
- 0x04, 0x41, 0xf5, 0x38, 0xb9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
+ 0x04, 0x41, 0xf5, 0x38, 0xf9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94,
- 0x4e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e,
- 0x7e, 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x2e, 0x46, 0x98, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1,
- 0x81, 0x03, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb9, 0x62, 0x19, 0xb5, 0x01, 0x00,
- 0x00,
+ 0x49, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e,
+ 0x7e, 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x6e, 0x7a, 0xbe, 0x7e, 0x6e, 0x7e, 0x4a, 0x69, 0x4e, 0x6a,
+ 0x31, 0x5a, 0xd8, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0x03, 0xd0, 0x18, 0x10, 0x00,
+ 0x00, 0xff, 0xff, 0xfe, 0xa6, 0xd7, 0x5d, 0xbd, 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/apps/transfer/types/genesis_test.go b/modules/apps/transfer/types/genesis_test.go
similarity index 92%
rename from apps/transfer/types/genesis_test.go
rename to modules/apps/transfer/types/genesis_test.go
index bac4c35d..23305ae1 100644
--- a/apps/transfer/types/genesis_test.go
+++ b/modules/apps/transfer/types/genesis_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
func TestValidateGenesis(t *testing.T) {
diff --git a/apps/transfer/types/keys.go b/modules/apps/transfer/types/keys.go
similarity index 100%
rename from apps/transfer/types/keys.go
rename to modules/apps/transfer/types/keys.go
diff --git a/apps/transfer/types/keys_test.go b/modules/apps/transfer/types/keys_test.go
similarity index 90%
rename from apps/transfer/types/keys_test.go
rename to modules/apps/transfer/types/keys_test.go
index 3096fec7..54cf3f6c 100644
--- a/apps/transfer/types/keys_test.go
+++ b/modules/apps/transfer/types/keys_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/apps/transfer/types"
+ "github.com/cosmos/ibc-go/modules/apps/transfer/types"
)
// Test that there is domain separation between the port id and the channel id otherwise an
diff --git a/apps/transfer/types/msgs.go b/modules/apps/transfer/types/msgs.go
similarity index 95%
rename from apps/transfer/types/msgs.go
rename to modules/apps/transfer/types/msgs.go
index 568c3d8d..6985e3b9 100644
--- a/apps/transfer/types/msgs.go
+++ b/modules/apps/transfer/types/msgs.go
@@ -5,8 +5,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// msg types
diff --git a/apps/transfer/types/msgs_test.go b/modules/apps/transfer/types/msgs_test.go
similarity index 98%
rename from apps/transfer/types/msgs_test.go
rename to modules/apps/transfer/types/msgs_test.go
index e0598869..2d24438f 100644
--- a/apps/transfer/types/msgs_test.go
+++ b/modules/apps/transfer/types/msgs_test.go
@@ -8,7 +8,7 @@ import (
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// define constants used for testing
diff --git a/apps/transfer/types/packet.go b/modules/apps/transfer/types/packet.go
similarity index 100%
rename from apps/transfer/types/packet.go
rename to modules/apps/transfer/types/packet.go
diff --git a/apps/transfer/types/packet_test.go b/modules/apps/transfer/types/packet_test.go
similarity index 100%
rename from apps/transfer/types/packet_test.go
rename to modules/apps/transfer/types/packet_test.go
diff --git a/apps/transfer/types/params.go b/modules/apps/transfer/types/params.go
similarity index 100%
rename from apps/transfer/types/params.go
rename to modules/apps/transfer/types/params.go
diff --git a/apps/transfer/types/params_test.go b/modules/apps/transfer/types/params_test.go
similarity index 100%
rename from apps/transfer/types/params_test.go
rename to modules/apps/transfer/types/params_test.go
diff --git a/apps/transfer/types/query.pb.go b/modules/apps/transfer/types/query.pb.go
similarity index 91%
rename from apps/transfer/types/query.pb.go
rename to modules/apps/transfer/types/query.pb.go
index 3e365af1..bf77c5e7 100644
--- a/apps/transfer/types/query.pb.go
+++ b/modules/apps/transfer/types/query.pb.go
@@ -324,40 +324,40 @@ func init() {
}
var fileDescriptor_956e6703e65895ef = []byte{
- // 519 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0x13, 0x3d,
- 0x14, 0x8d, 0xdb, 0xef, 0x8b, 0xc4, 0x0d, 0x62, 0x61, 0xaa, 0x12, 0x8d, 0xaa, 0x69, 0x65, 0x95,
- 0xbf, 0xb4, 0xd8, 0x4c, 0x91, 0x78, 0x80, 0x82, 0xca, 0x0a, 0xa9, 0x44, 0xb0, 0x61, 0x01, 0xf2,
- 0x4c, 0xcd, 0x64, 0x24, 0x32, 0x9e, 0x8e, 0x9d, 0x48, 0x15, 0x62, 0xc3, 0x86, 0x2d, 0x12, 0x5b,
- 0x16, 0xac, 0x79, 0x04, 0x9e, 0xa0, 0xcb, 0x4a, 0xdd, 0xb0, 0x02, 0x94, 0xf0, 0x20, 0x68, 0x6c,
- 0x4f, 0x33, 0xd1, 0x24, 0x74, 0x76, 0xd6, 0xf5, 0xb9, 0xe7, 0x9e, 0x73, 0xae, 0x65, 0x20, 0x49,
- 0x18, 0xc5, 0x92, 0xf1, 0x2c, 0x53, 0x4c, 0xe7, 0x3c, 0x55, 0x6f, 0x44, 0xce, 0xc6, 0x01, 0x3b,
- 0x1e, 0x89, 0xfc, 0x84, 0x66, 0xb9, 0xd4, 0x12, 0xaf, 0x1b, 0x0c, 0x2d, 0x30, 0xb4, 0xc4, 0xd0,
- 0x71, 0xe0, 0xad, 0xc5, 0x32, 0x96, 0x06, 0xc2, 0x8a, 0x93, 0x45, 0x7b, 0xbd, 0x48, 0xaa, 0xa1,
- 0x54, 0x2c, 0xe4, 0x4a, 0x58, 0x1a, 0x36, 0x0e, 0x42, 0xa1, 0x79, 0xc0, 0x32, 0x1e, 0x27, 0x29,
- 0xd7, 0x89, 0x4c, 0x1d, 0xf6, 0xe6, 0x92, 0xe9, 0x17, 0x53, 0x2c, 0x6c, 0x23, 0x96, 0x32, 0x7e,
- 0x2b, 0x18, 0xcf, 0x12, 0xc6, 0xd3, 0x54, 0x6a, 0xc3, 0xa1, 0xec, 0x2d, 0xd9, 0x85, 0xf5, 0x67,
- 0xc5, 0x98, 0xc7, 0x22, 0x95, 0xc3, 0xe7, 0x39, 0x8f, 0x44, 0x5f, 0x1c, 0x8f, 0x84, 0xd2, 0x18,
- 0xc3, 0x7f, 0x03, 0xae, 0x06, 0x5d, 0xb4, 0x85, 0xee, 0x5c, 0xe9, 0x9b, 0x33, 0x79, 0x05, 0x37,
- 0x6a, 0x68, 0x95, 0xc9, 0x54, 0x09, 0xfc, 0x08, 0x3a, 0x47, 0x45, 0xf5, 0xb5, 0x2e, 0xca, 0xa6,
- 0xab, 0xb3, 0x47, 0xe8, 0x62, 0xf7, 0xb4, 0x42, 0x00, 0x47, 0x17, 0x67, 0xc2, 0x6b, 0xfc, 0xaa,
- 0x94, 0x73, 0x00, 0x30, 0x4b, 0xc0, 0xd1, 0xdf, 0xa2, 0x36, 0x2e, 0x5a, 0xc4, 0x45, 0x6d, 0xea,
- 0x2e, 0x2e, 0x7a, 0xc8, 0xe3, 0xd2, 0x4a, 0xbf, 0xd2, 0x49, 0xbe, 0x23, 0xe8, 0xd6, 0x67, 0x38,
- 0x13, 0x2f, 0xe0, 0x6a, 0xc5, 0x84, 0xea, 0xa2, 0xad, 0xd5, 0x66, 0x2e, 0xf6, 0xaf, 0x9d, 0xfe,
- 0xdc, 0x6c, 0x7d, 0xfb, 0xb5, 0xd9, 0x76, 0x8c, 0x9d, 0x99, 0x2b, 0x85, 0x9f, 0xcc, 0x69, 0x5f,
- 0x31, 0xda, 0x6f, 0x5f, 0xaa, 0xdd, 0x6a, 0x9a, 0x13, 0xbf, 0x06, 0xd8, 0x68, 0x3f, 0xe4, 0x39,
- 0x1f, 0x96, 0xd1, 0x90, 0xa7, 0x70, 0x7d, 0xae, 0xea, 0xcc, 0x3c, 0x84, 0x76, 0x66, 0x2a, 0x2e,
- 0x2d, 0x7f, 0x99, 0x0d, 0xd7, 0xe7, 0xd0, 0x7b, 0xe7, 0xab, 0xf0, 0xbf, 0xe1, 0xc3, 0x5f, 0x11,
- 0xc0, 0xcc, 0x23, 0xa6, 0xcb, 0x08, 0x16, 0xbf, 0x20, 0x8f, 0x35, 0xc6, 0x5b, 0xc5, 0x24, 0xf8,
- 0x70, 0xfe, 0xe7, 0xf3, 0xca, 0x0e, 0xbe, 0xcb, 0x92, 0x30, 0xaa, 0x3f, 0xec, 0xea, 0x6a, 0xd8,
- 0xbb, 0xe2, 0x41, 0xbe, 0xc7, 0x5f, 0x10, 0x74, 0x2a, 0x9b, 0xc4, 0x4d, 0x67, 0x96, 0xe1, 0x79,
- 0xf7, 0x9b, 0x37, 0x38, 0x95, 0x3d, 0xa3, 0x72, 0x1b, 0x93, 0xcb, 0x55, 0xe2, 0x8f, 0x08, 0xda,
- 0x36, 0x5e, 0xdc, 0xfb, 0xe7, 0xa0, 0xb9, 0x8d, 0x7a, 0x3b, 0x8d, 0xb0, 0x4e, 0xcf, 0xb6, 0xd1,
- 0xe3, 0xe3, 0x8d, 0xc5, 0x7a, 0xec, 0x56, 0xf7, 0x0f, 0x4e, 0x27, 0x3e, 0x3a, 0x9b, 0xf8, 0xe8,
- 0xf7, 0xc4, 0x47, 0x9f, 0xa6, 0x7e, 0xeb, 0x6c, 0xea, 0xb7, 0x7e, 0x4c, 0xfd, 0xd6, 0xcb, 0xdd,
- 0x38, 0xd1, 0x83, 0x51, 0x48, 0x23, 0x39, 0x64, 0xee, 0xfb, 0x49, 0xc2, 0xe8, 0x5e, 0xed, 0x6b,
- 0xd1, 0x27, 0x99, 0x50, 0x61, 0xdb, 0xfc, 0x1b, 0x0f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xef,
- 0xe0, 0xd8, 0x44, 0xfc, 0x04, 0x00, 0x00,
+ // 525 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4d, 0x6f, 0xd3, 0x40,
+ 0x10, 0xcd, 0xb6, 0x10, 0x89, 0x09, 0xe2, 0xb0, 0x54, 0x25, 0xb2, 0x2a, 0xb7, 0xb2, 0xca, 0x57,
+ 0x0a, 0xbb, 0xb8, 0x20, 0x7e, 0x40, 0x41, 0x70, 0x02, 0x95, 0x08, 0x2e, 0x1c, 0x40, 0x6b, 0x67,
+ 0x71, 0x2c, 0xd5, 0x5e, 0xd7, 0xbb, 0x89, 0x54, 0x21, 0x2e, 0x5c, 0xb8, 0x22, 0x71, 0xe5, 0xc0,
+ 0x99, 0x9f, 0xc0, 0x2f, 0xe8, 0xb1, 0x52, 0x2f, 0x9c, 0x00, 0x25, 0xfc, 0x10, 0xe4, 0xdd, 0x75,
+ 0xe3, 0x90, 0xa4, 0xf1, 0x6d, 0x35, 0x7e, 0xf3, 0xe6, 0xbd, 0x37, 0x23, 0x83, 0x17, 0x07, 0x61,
+ 0x24, 0x28, 0xcb, 0x32, 0x49, 0x55, 0xce, 0x52, 0xf9, 0x8e, 0xe7, 0x74, 0xe8, 0xd3, 0xc3, 0x01,
+ 0xcf, 0x8f, 0x48, 0x96, 0x0b, 0x25, 0xf0, 0xba, 0xc6, 0x90, 0x02, 0x43, 0x4a, 0x0c, 0x19, 0xfa,
+ 0xce, 0x5a, 0x24, 0x22, 0xa1, 0x21, 0xb4, 0x78, 0x19, 0xb4, 0xd3, 0x09, 0x85, 0x4c, 0x84, 0xa4,
+ 0x01, 0x93, 0xdc, 0xd0, 0xd0, 0xa1, 0x1f, 0x70, 0xc5, 0x7c, 0x9a, 0xb1, 0x28, 0x4e, 0x99, 0x8a,
+ 0x45, 0x6a, 0xb1, 0xd7, 0x17, 0x4c, 0x3f, 0x9b, 0x62, 0x60, 0x1b, 0x91, 0x10, 0xd1, 0x01, 0xa7,
+ 0x2c, 0x8b, 0x29, 0x4b, 0x53, 0xa1, 0x34, 0x87, 0x34, 0x5f, 0xbd, 0x3b, 0xb0, 0xfe, 0xa2, 0x18,
+ 0xf3, 0x98, 0xa7, 0x22, 0x79, 0x99, 0xb3, 0x90, 0x77, 0xf9, 0xe1, 0x80, 0x4b, 0x85, 0x31, 0x5c,
+ 0xe8, 0x33, 0xd9, 0x6f, 0xa3, 0x2d, 0x74, 0xeb, 0x52, 0x57, 0xbf, 0xbd, 0x37, 0x70, 0x6d, 0x06,
+ 0x2d, 0x33, 0x91, 0x4a, 0x8e, 0x1f, 0x41, 0xab, 0x57, 0x54, 0xdf, 0xaa, 0xa2, 0xac, 0xbb, 0x5a,
+ 0xbb, 0x1e, 0x99, 0xef, 0x9e, 0x54, 0x08, 0xa0, 0x77, 0xf6, 0xf6, 0xd8, 0x0c, 0xbf, 0x2c, 0xe5,
+ 0x3c, 0x01, 0x98, 0x24, 0x60, 0xe9, 0x6f, 0x10, 0x13, 0x17, 0x29, 0xe2, 0x22, 0x26, 0x75, 0x1b,
+ 0x17, 0xd9, 0x67, 0x51, 0x69, 0xa5, 0x5b, 0xe9, 0xf4, 0x7e, 0x20, 0x68, 0xcf, 0xce, 0xb0, 0x26,
+ 0x5e, 0xc1, 0xe5, 0x8a, 0x09, 0xd9, 0x46, 0x5b, 0xab, 0xf5, 0x5c, 0xec, 0x5d, 0x39, 0xfe, 0xb5,
+ 0xd9, 0xf8, 0xfe, 0x7b, 0xb3, 0x69, 0x19, 0x5b, 0x13, 0x57, 0x12, 0x3f, 0x9d, 0xd2, 0xbe, 0xa2,
+ 0xb5, 0xdf, 0x5c, 0xaa, 0xdd, 0x68, 0x9a, 0x12, 0xbf, 0x06, 0x58, 0x6b, 0xdf, 0x67, 0x39, 0x4b,
+ 0xca, 0x68, 0xbc, 0x67, 0x70, 0x75, 0xaa, 0x6a, 0xcd, 0x3c, 0x84, 0x66, 0xa6, 0x2b, 0x36, 0x2d,
+ 0x77, 0x91, 0x0d, 0xdb, 0x67, 0xd1, 0xbb, 0xa7, 0xab, 0x70, 0x51, 0xf3, 0xe1, 0x6f, 0x08, 0x60,
+ 0xe2, 0x11, 0x93, 0x45, 0x04, 0xf3, 0x2f, 0xc8, 0xa1, 0xb5, 0xf1, 0x46, 0xb1, 0xe7, 0x7f, 0x3c,
+ 0xfd, 0xfb, 0x65, 0x65, 0x07, 0xdf, 0xa6, 0x71, 0x10, 0xce, 0x1e, 0x76, 0x75, 0x35, 0xf4, 0x7d,
+ 0x71, 0x90, 0x1f, 0xf0, 0x57, 0x04, 0xad, 0xca, 0x26, 0x71, 0xdd, 0x99, 0x65, 0x78, 0xce, 0xbd,
+ 0xfa, 0x0d, 0x56, 0x65, 0x47, 0xab, 0xdc, 0xc6, 0xde, 0x72, 0x95, 0xf8, 0x13, 0x82, 0xa6, 0x89,
+ 0x17, 0x77, 0xce, 0x1d, 0x34, 0xb5, 0x51, 0x67, 0xa7, 0x16, 0xd6, 0xea, 0xd9, 0xd6, 0x7a, 0x5c,
+ 0xbc, 0x31, 0x5f, 0x8f, 0xd9, 0xea, 0xde, 0xf3, 0xe3, 0x91, 0x8b, 0x4e, 0x46, 0x2e, 0xfa, 0x33,
+ 0x72, 0xd1, 0xe7, 0xb1, 0xdb, 0x38, 0x19, 0xbb, 0x8d, 0x9f, 0x63, 0xb7, 0xf1, 0xfa, 0x41, 0x14,
+ 0xab, 0xfe, 0x20, 0x20, 0xa1, 0x48, 0xa8, 0xfd, 0xfd, 0xc4, 0x41, 0x78, 0x37, 0x12, 0x34, 0x11,
+ 0xbd, 0xc1, 0x01, 0x97, 0xff, 0x71, 0xaa, 0xa3, 0x8c, 0xcb, 0xa0, 0xa9, 0xff, 0x1f, 0xf7, 0xff,
+ 0x05, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x96, 0x10, 0x04, 0x05, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/apps/transfer/types/query.pb.gw.go b/modules/apps/transfer/types/query.pb.gw.go
similarity index 100%
rename from apps/transfer/types/query.pb.gw.go
rename to modules/apps/transfer/types/query.pb.gw.go
diff --git a/apps/transfer/types/trace.go b/modules/apps/transfer/types/trace.go
similarity index 99%
rename from apps/transfer/types/trace.go
rename to modules/apps/transfer/types/trace.go
index cc19a4c4..8d1e4937 100644
--- a/apps/transfer/types/trace.go
+++ b/modules/apps/transfer/types/trace.go
@@ -12,7 +12,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// ParseDenomTrace parses a string with the ibc prefix (denom trace) and the base denomination
diff --git a/apps/transfer/types/trace_test.go b/modules/apps/transfer/types/trace_test.go
similarity index 100%
rename from apps/transfer/types/trace_test.go
rename to modules/apps/transfer/types/trace_test.go
diff --git a/apps/transfer/types/transfer.pb.go b/modules/apps/transfer/types/transfer.pb.go
similarity index 90%
rename from apps/transfer/types/transfer.pb.go
rename to modules/apps/transfer/types/transfer.pb.go
index 7b405f30..64bfef49 100644
--- a/apps/transfer/types/transfer.pb.go
+++ b/modules/apps/transfer/types/transfer.pb.go
@@ -226,29 +226,30 @@ func init() {
}
var fileDescriptor_0cd9e010e90bbec6 = []byte{
- // 349 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
- 0x10, 0xc6, 0x8d, 0x7f, 0xff, 0xa2, 0xdb, 0xd2, 0xc2, 0x56, 0x34, 0x08, 0x8d, 0x12, 0x28, 0x78,
- 0x68, 0x13, 0xa4, 0x37, 0x2f, 0x05, 0x6b, 0x3d, 0x4b, 0xf0, 0xd4, 0x8b, 0x6c, 0xd6, 0x69, 0x0c,
- 0x9a, 0xdd, 0xb0, 0xbb, 0x0a, 0xd2, 0x27, 0xe8, 0xad, 0x8f, 0xd5, 0xa3, 0xc7, 0x9e, 0xa4, 0xe8,
- 0x1b, 0xf8, 0x04, 0x65, 0x37, 0x21, 0x94, 0xf6, 0x36, 0xdf, 0x7c, 0xbf, 0x6f, 0x66, 0x60, 0xd0,
- 0x4d, 0x1c, 0xd2, 0x88, 0xfb, 0x24, 0x4d, 0xa5, 0xaf, 0x04, 0x61, 0xf2, 0x05, 0x84, 0xbf, 0xe9,
- 0x17, 0xb5, 0x97, 0x0a, 0xae, 0x38, 0x6e, 0x1a, 0xcc, 0xd3, 0x98, 0x57, 0x58, 0x9b, 0x7e, 0xbb,
- 0x11, 0xf1, 0x88, 0x1b, 0xc4, 0xd7, 0x55, 0x46, 0xbb, 0xaf, 0xa8, 0x35, 0x5e, 0xb3, 0x28, 0x0e,
- 0x57, 0x30, 0xe5, 0x4b, 0x60, 0x13, 0x42, 0x97, 0xa0, 0x46, 0x44, 0x11, 0xdc, 0x40, 0xff, 0xe7,
- 0xc0, 0x78, 0x62, 0x5b, 0x5d, 0xab, 0x57, 0x0f, 0x32, 0x81, 0x9b, 0xa8, 0x4a, 0x12, 0xbe, 0x66,
- 0xca, 0x2e, 0x77, 0xad, 0x5e, 0x25, 0xc8, 0x95, 0xee, 0x4b, 0x60, 0x73, 0x10, 0xf6, 0x3f, 0x83,
- 0xe7, 0x0a, 0xb7, 0x51, 0x4d, 0x00, 0x85, 0x78, 0x03, 0xc2, 0xae, 0x18, 0xa7, 0xd0, 0xee, 0x03,
- 0x42, 0x23, 0x3d, 0x74, 0x2a, 0x08, 0x05, 0x8c, 0x51, 0x25, 0x25, 0x6a, 0x91, 0xaf, 0x33, 0x35,
- 0xbe, 0x46, 0x28, 0x24, 0x12, 0x66, 0xd9, 0x21, 0x65, 0xe3, 0xd4, 0x75, 0xc7, 0xe4, 0xdc, 0x37,
- 0x0b, 0x55, 0x27, 0x44, 0x90, 0x44, 0xe2, 0x01, 0x3a, 0xd7, 0x1b, 0x67, 0xc0, 0x48, 0xb8, 0x82,
- 0xb9, 0x99, 0x52, 0x1b, 0xb6, 0x4e, 0xfb, 0xce, 0xd5, 0x96, 0x24, 0xab, 0x81, 0xfb, 0xd3, 0x75,
- 0x83, 0x33, 0x2d, 0x9f, 0x32, 0x85, 0x1f, 0xd1, 0x65, 0x7e, 0x53, 0x11, 0x2f, 0x9b, 0x78, 0xfb,
- 0xb4, 0xef, 0x34, 0xb3, 0xf8, 0x2f, 0xc0, 0x0d, 0x2e, 0xf2, 0x4e, 0x3e, 0x64, 0x38, 0xfe, 0x38,
- 0x38, 0xd6, 0xee, 0xe0, 0x58, 0x5f, 0x07, 0xc7, 0x7a, 0x3f, 0x3a, 0xa5, 0xdd, 0xd1, 0x29, 0x7d,
- 0x1e, 0x9d, 0xd2, 0xf3, 0x6d, 0x14, 0xab, 0xc5, 0x3a, 0xf4, 0x28, 0x4f, 0x7c, 0xca, 0x65, 0xc2,
- 0xa5, 0x1f, 0x87, 0xf4, 0xee, 0xcf, 0x2f, 0xd5, 0x36, 0x05, 0x19, 0x56, 0xcd, 0x63, 0xee, 0xbf,
- 0x03, 0x00, 0x00, 0xff, 0xff, 0x41, 0x6a, 0xce, 0x58, 0xef, 0x01, 0x00, 0x00,
+ // 358 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xc1, 0x6a, 0xea, 0x40,
+ 0x14, 0x86, 0x8d, 0xd7, 0x2b, 0x3a, 0xf7, 0x72, 0x2f, 0x4c, 0x45, 0x83, 0xd0, 0x28, 0x81, 0x82,
+ 0x9b, 0x26, 0x48, 0xbb, 0x72, 0x53, 0xb0, 0xb6, 0xcb, 0x22, 0xc1, 0x55, 0x37, 0x32, 0x49, 0x4e,
+ 0x63, 0x30, 0x99, 0x09, 0x33, 0x13, 0x41, 0xfa, 0x04, 0xdd, 0xf5, 0xb1, 0xba, 0x74, 0xd9, 0x95,
+ 0x14, 0x7d, 0x03, 0x9f, 0xa0, 0xcc, 0x24, 0x84, 0xe2, 0xee, 0xfc, 0xe7, 0xff, 0xfe, 0x73, 0x0e,
+ 0x1c, 0x74, 0x15, 0xfb, 0x41, 0xc4, 0x5c, 0x92, 0x65, 0xc2, 0x95, 0x9c, 0x50, 0xf1, 0x02, 0xdc,
+ 0xdd, 0x8c, 0xab, 0xda, 0xc9, 0x38, 0x93, 0x0c, 0x77, 0x35, 0xe6, 0x28, 0xcc, 0xa9, 0xac, 0xcd,
+ 0xb8, 0xdf, 0x89, 0x58, 0xc4, 0x34, 0xe2, 0xaa, 0xaa, 0xa0, 0xed, 0x57, 0xd4, 0x7b, 0xcc, 0x69,
+ 0x14, 0xfb, 0x09, 0x2c, 0xd8, 0x1a, 0xe8, 0x9c, 0x04, 0x6b, 0x90, 0x33, 0x22, 0x09, 0xee, 0xa0,
+ 0xdf, 0x21, 0x50, 0x96, 0x9a, 0xc6, 0xd0, 0x18, 0xb5, 0xbd, 0x42, 0xe0, 0x2e, 0x6a, 0x92, 0x94,
+ 0xe5, 0x54, 0x9a, 0xf5, 0xa1, 0x31, 0x6a, 0x78, 0xa5, 0x52, 0x7d, 0x01, 0x34, 0x04, 0x6e, 0xfe,
+ 0xd2, 0x78, 0xa9, 0x70, 0x1f, 0xb5, 0x38, 0x04, 0x10, 0x6f, 0x80, 0x9b, 0x0d, 0xed, 0x54, 0xda,
+ 0xbe, 0x43, 0x68, 0xa6, 0x86, 0x2e, 0x38, 0x09, 0x00, 0x63, 0xd4, 0xc8, 0x88, 0x5c, 0x95, 0xeb,
+ 0x74, 0x8d, 0x2f, 0x11, 0xf2, 0x89, 0x80, 0x65, 0x71, 0x48, 0x5d, 0x3b, 0x6d, 0xd5, 0xd1, 0x39,
+ 0xfb, 0xcd, 0x40, 0xcd, 0x39, 0xe1, 0x24, 0x15, 0x78, 0x82, 0xfe, 0xaa, 0x8d, 0x4b, 0xa0, 0xc4,
+ 0x4f, 0x20, 0xd4, 0x53, 0x5a, 0xd3, 0xde, 0x69, 0x3f, 0xb8, 0xd8, 0x92, 0x34, 0x99, 0xd8, 0x3f,
+ 0x5d, 0xdb, 0xfb, 0xa3, 0xe4, 0x43, 0xa1, 0xf0, 0x3d, 0xfa, 0x5f, 0xde, 0x54, 0xc5, 0xeb, 0x3a,
+ 0xde, 0x3f, 0xed, 0x07, 0xdd, 0x22, 0x7e, 0x06, 0xd8, 0xde, 0xbf, 0xb2, 0x53, 0x0e, 0x99, 0x3e,
+ 0x7d, 0x1c, 0x2c, 0x63, 0x77, 0xb0, 0x8c, 0xaf, 0x83, 0x65, 0xbc, 0x1f, 0xad, 0xda, 0xee, 0x68,
+ 0xd5, 0x3e, 0x8f, 0x56, 0xed, 0xf9, 0x36, 0x8a, 0xe5, 0x2a, 0xf7, 0x9d, 0x80, 0xa5, 0x6e, 0xc0,
+ 0x44, 0xca, 0x84, 0x1b, 0xfb, 0xc1, 0x75, 0xc4, 0xdc, 0x94, 0x85, 0x79, 0x02, 0xe2, 0xec, 0xa7,
+ 0x72, 0x9b, 0x81, 0xf0, 0x9b, 0xfa, 0x41, 0x37, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x74,
+ 0xa8, 0xf3, 0xf7, 0x01, 0x00, 0x00,
}
func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) {
diff --git a/apps/transfer/types/tx.pb.go b/modules/apps/transfer/types/tx.pb.go
similarity index 86%
rename from apps/transfer/types/tx.pb.go
rename to modules/apps/transfer/types/tx.pb.go
index 0d5b29f4..8388059f 100644
--- a/apps/transfer/types/tx.pb.go
+++ b/modules/apps/transfer/types/tx.pb.go
@@ -7,7 +7,7 @@ import (
context "context"
fmt "fmt"
types "github.com/cosmos/cosmos-sdk/types"
- types1 "github.com/cosmos/ibc-go/core/02-client/types"
+ types1 "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
grpc1 "github.com/gogo/protobuf/grpc"
proto "github.com/gogo/protobuf/proto"
@@ -130,37 +130,38 @@ func init() {
func init() { proto.RegisterFile("ibcgo/apps/transfer/v1/tx.proto", fileDescriptor_4ca3945bed527d36) }
var fileDescriptor_4ca3945bed527d36 = []byte{
- // 478 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
- 0x14, 0xc7, 0x13, 0xdb, 0xad, 0x75, 0xca, 0x2e, 0x3a, 0xba, 0x25, 0x5b, 0xd6, 0xa4, 0xc6, 0x4b,
- 0x41, 0x9d, 0x21, 0x2b, 0x22, 0xec, 0x49, 0xb2, 0x20, 0x7a, 0x58, 0x90, 0xb0, 0x27, 0x11, 0x96,
- 0x64, 0x7c, 0xa6, 0xc1, 0x26, 0x13, 0x66, 0xa6, 0xc1, 0xfd, 0x06, 0x1e, 0xfd, 0x08, 0xfb, 0x01,
- 0xfc, 0x20, 0x7b, 0xec, 0xd1, 0x53, 0x91, 0xf6, 0xe2, 0xb9, 0x9f, 0x40, 0x92, 0x99, 0xd6, 0x16,
- 0x3d, 0x78, 0x9a, 0x79, 0xef, 0xff, 0x7b, 0xf3, 0xe7, 0xbd, 0x79, 0xc8, 0xcb, 0x12, 0x96, 0x72,
- 0x1a, 0x97, 0xa5, 0xa4, 0x4a, 0xc4, 0x85, 0xfc, 0x04, 0x82, 0x56, 0x01, 0x55, 0x5f, 0x48, 0x29,
- 0xb8, 0xe2, 0xb8, 0xdf, 0x00, 0xa4, 0x06, 0xc8, 0x1a, 0x20, 0x55, 0x30, 0x78, 0x90, 0xf2, 0x94,
- 0x37, 0x08, 0xad, 0x6f, 0x9a, 0x1e, 0xb8, 0x8c, 0xcb, 0x9c, 0x4b, 0x9a, 0xc4, 0x12, 0x68, 0x15,
- 0x24, 0xa0, 0xe2, 0x80, 0x32, 0x9e, 0x15, 0x46, 0x7f, 0xa4, 0xed, 0x18, 0x17, 0x40, 0xd9, 0x24,
- 0x83, 0x42, 0xd5, 0x66, 0xfa, 0xa6, 0x11, 0xff, 0x7b, 0x0b, 0xf5, 0xce, 0x65, 0x7a, 0x61, 0xbc,
- 0xf0, 0x4b, 0xd4, 0x93, 0x7c, 0x2a, 0x18, 0x5c, 0x96, 0x5c, 0x28, 0xc7, 0x1e, 0xda, 0xa3, 0x3b,
- 0x61, 0x7f, 0x35, 0xf7, 0xf0, 0x55, 0x9c, 0x4f, 0x4e, 0xfd, 0x2d, 0xd1, 0x8f, 0x90, 0x8e, 0xde,
- 0x71, 0xa1, 0xf0, 0x2b, 0x74, 0x60, 0x34, 0x36, 0x8e, 0x8b, 0x02, 0x26, 0xce, 0xad, 0xa6, 0xf6,
- 0x68, 0x35, 0xf7, 0x0e, 0x77, 0x6a, 0x8d, 0xee, 0x47, 0xfb, 0x3a, 0x71, 0xa6, 0x63, 0xfc, 0x02,
- 0xed, 0x29, 0xfe, 0x19, 0x0a, 0xa7, 0x35, 0xb4, 0x47, 0xbd, 0x93, 0x23, 0xa2, 0xbb, 0x23, 0x75,
- 0x77, 0xc4, 0x74, 0x47, 0xce, 0x78, 0x56, 0x84, 0xed, 0x9b, 0xb9, 0x67, 0x45, 0x9a, 0xc6, 0x7d,
- 0xd4, 0x91, 0x50, 0x7c, 0x04, 0xe1, 0xb4, 0x6b, 0xc3, 0xc8, 0x44, 0x78, 0x80, 0xba, 0x02, 0x18,
- 0x64, 0x15, 0x08, 0x67, 0xaf, 0x51, 0x36, 0x31, 0x4e, 0xd0, 0x81, 0xca, 0x72, 0xe0, 0x53, 0x75,
- 0x39, 0x86, 0x2c, 0x1d, 0x2b, 0xa7, 0xd3, 0x78, 0x1e, 0x13, 0x3d, 0xff, 0x7a, 0x62, 0xc4, 0xcc,
- 0xa9, 0x0a, 0xc8, 0x9b, 0x86, 0x09, 0x1f, 0xd6, 0xb6, 0x7f, 0xda, 0xd9, 0x7d, 0xc1, 0x8f, 0xf6,
- 0x4d, 0x42, 0xd3, 0xf8, 0x2d, 0xba, 0xb7, 0x26, 0xea, 0x53, 0xaa, 0x38, 0x2f, 0x9d, 0xdb, 0x43,
- 0x7b, 0xd4, 0x0e, 0x8f, 0x57, 0x73, 0xcf, 0xd9, 0x7d, 0x64, 0x83, 0xf8, 0xd1, 0x5d, 0x93, 0xbb,
- 0x58, 0xa7, 0x4e, 0xbb, 0x5f, 0xaf, 0x3d, 0xeb, 0xd7, 0xb5, 0x67, 0xf9, 0x87, 0xe8, 0xfe, 0xd6,
- 0x6f, 0x45, 0x20, 0x4b, 0x5e, 0x48, 0x38, 0x61, 0xa8, 0x75, 0x2e, 0x53, 0xfc, 0x01, 0x75, 0x37,
- 0x1f, 0xf9, 0x98, 0xfc, 0x7b, 0x95, 0xc8, 0x56, 0xfd, 0xe0, 0xc9, 0x7f, 0x40, 0x6b, 0x93, 0xf0,
- 0xf5, 0xcd, 0xc2, 0xb5, 0x67, 0x0b, 0xd7, 0xfe, 0xb9, 0x70, 0xed, 0x6f, 0x4b, 0xd7, 0x9a, 0x2d,
- 0x5d, 0xeb, 0xc7, 0xd2, 0xb5, 0xde, 0x3f, 0x4d, 0x33, 0x35, 0x9e, 0x26, 0x84, 0xf1, 0x9c, 0x9a,
- 0x95, 0xcc, 0x12, 0xf6, 0xec, 0xaf, 0x4d, 0x57, 0x57, 0x25, 0xc8, 0xa4, 0xd3, 0x6c, 0xde, 0xf3,
- 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x48, 0x06, 0x61, 0x0d, 0x03, 0x00, 0x00,
+ // 485 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x6f, 0xd3, 0x30,
+ 0x14, 0xc7, 0x13, 0xda, 0x95, 0xe2, 0x6a, 0x13, 0x18, 0x56, 0x65, 0xd5, 0x48, 0x4a, 0xb8, 0x54,
+ 0x42, 0xd8, 0xca, 0x00, 0x21, 0xed, 0x84, 0xb2, 0x0b, 0x1c, 0x86, 0x50, 0xb4, 0x13, 0x42, 0x9a,
+ 0x12, 0xcf, 0xa4, 0x11, 0x4d, 0x5e, 0x64, 0xbb, 0x11, 0xfb, 0x06, 0x1c, 0xf9, 0x08, 0xfb, 0x00,
+ 0x7c, 0x90, 0x1d, 0x77, 0xe4, 0x54, 0xa1, 0xf6, 0xc2, 0xb9, 0x9f, 0x00, 0x25, 0x76, 0x4b, 0x8b,
+ 0x38, 0xec, 0x64, 0xbf, 0xf7, 0xff, 0x3d, 0xff, 0xf5, 0x9e, 0x1f, 0xf2, 0xb2, 0x84, 0xa5, 0x40,
+ 0xe3, 0xb2, 0x94, 0x54, 0x89, 0xb8, 0x90, 0x9f, 0xb9, 0xa0, 0x55, 0x40, 0xd5, 0x57, 0x52, 0x0a,
+ 0x50, 0x80, 0xfb, 0x0d, 0x40, 0x6a, 0x80, 0xac, 0x00, 0x52, 0x05, 0x83, 0x47, 0x29, 0xa4, 0xd0,
+ 0x20, 0xb4, 0xbe, 0x69, 0x7a, 0xe0, 0x32, 0x90, 0x39, 0x48, 0x9a, 0xc4, 0x92, 0xd3, 0x2a, 0x48,
+ 0xb8, 0x8a, 0x03, 0xca, 0x20, 0x2b, 0x8c, 0xfe, 0x44, 0xdb, 0x31, 0x10, 0x9c, 0xb2, 0x49, 0xc6,
+ 0x0b, 0x55, 0x9b, 0xe9, 0x9b, 0x46, 0xfc, 0x1f, 0x2d, 0xd4, 0x3b, 0x95, 0xe9, 0x99, 0xf1, 0xc2,
+ 0xaf, 0x51, 0x4f, 0xc2, 0x54, 0x30, 0x7e, 0x5e, 0x82, 0x50, 0x8e, 0x3d, 0xb4, 0x47, 0xf7, 0xc2,
+ 0xfe, 0x72, 0xe6, 0xe1, 0xcb, 0x38, 0x9f, 0x1c, 0xfb, 0x1b, 0xa2, 0x1f, 0x21, 0x1d, 0x7d, 0x00,
+ 0xa1, 0xf0, 0x1b, 0xb4, 0x67, 0x34, 0x36, 0x8e, 0x8b, 0x82, 0x4f, 0x9c, 0x3b, 0x4d, 0xed, 0xc1,
+ 0x72, 0xe6, 0xed, 0x6f, 0xd5, 0x1a, 0xdd, 0x8f, 0x76, 0x75, 0xe2, 0x44, 0xc7, 0xf8, 0x15, 0xda,
+ 0x51, 0xf0, 0x85, 0x17, 0x4e, 0x6b, 0x68, 0x8f, 0x7a, 0x47, 0x07, 0x44, 0x77, 0x47, 0xea, 0xee,
+ 0x88, 0xe9, 0x8e, 0x9c, 0x40, 0x56, 0x84, 0xed, 0xeb, 0x99, 0x67, 0x45, 0x9a, 0xc6, 0x7d, 0xd4,
+ 0x91, 0xbc, 0xb8, 0xe0, 0xc2, 0x69, 0xd7, 0x86, 0x91, 0x89, 0xf0, 0x00, 0x75, 0x05, 0x67, 0x3c,
+ 0xab, 0xb8, 0x70, 0x76, 0x1a, 0x65, 0x1d, 0xe3, 0x04, 0xed, 0xa9, 0x2c, 0xe7, 0x30, 0x55, 0xe7,
+ 0x63, 0x9e, 0xa5, 0x63, 0xe5, 0x74, 0x1a, 0xcf, 0x43, 0xa2, 0xe7, 0x5f, 0x4f, 0x8c, 0x98, 0x39,
+ 0x55, 0x01, 0x79, 0xdb, 0x30, 0xe1, 0xe3, 0xda, 0xf6, 0x6f, 0x3b, 0xdb, 0x2f, 0xf8, 0xd1, 0xae,
+ 0x49, 0x68, 0x1a, 0xbf, 0x43, 0x0f, 0x56, 0x44, 0x7d, 0x4a, 0x15, 0xe7, 0xa5, 0x73, 0x77, 0x68,
+ 0x8f, 0xda, 0xe1, 0xe1, 0x72, 0xe6, 0x39, 0xdb, 0x8f, 0xac, 0x11, 0x3f, 0xba, 0x6f, 0x72, 0x67,
+ 0xab, 0xd4, 0x71, 0xf7, 0xdb, 0x95, 0x67, 0xfd, 0xbe, 0xf2, 0x2c, 0x7f, 0x1f, 0x3d, 0xdc, 0xf8,
+ 0xad, 0x88, 0xcb, 0x12, 0x0a, 0xc9, 0x8f, 0x18, 0x6a, 0x9d, 0xca, 0x14, 0x7f, 0x42, 0xdd, 0xf5,
+ 0x47, 0x3e, 0x25, 0xff, 0x5f, 0x25, 0xb2, 0x51, 0x3f, 0x78, 0x76, 0x0b, 0x68, 0x65, 0x12, 0xbe,
+ 0xbf, 0x9e, 0xbb, 0xf6, 0xcd, 0xdc, 0xb5, 0x7f, 0xcd, 0x5d, 0xfb, 0xfb, 0xc2, 0xb5, 0x6e, 0x16,
+ 0xae, 0xf5, 0x73, 0xe1, 0x5a, 0x1f, 0x5f, 0xa6, 0x99, 0x1a, 0x4f, 0x13, 0xc2, 0x20, 0xa7, 0x66,
+ 0x25, 0xb3, 0x84, 0x3d, 0x4f, 0x81, 0xe6, 0x70, 0x31, 0x9d, 0x70, 0xf9, 0xcf, 0xc6, 0xab, 0xcb,
+ 0x92, 0xcb, 0xa4, 0xd3, 0x6c, 0xe0, 0x8b, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x7e, 0x3d,
+ 0x25, 0x15, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/02-client/abci.go b/modules/core/02-client/abci.go
similarity index 87%
rename from core/02-client/abci.go
rename to modules/core/02-client/abci.go
index b5ddef8c..4e1068c2 100644
--- a/core/02-client/abci.go
+++ b/modules/core/02-client/abci.go
@@ -2,9 +2,9 @@ package client
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/02-client/keeper"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
// BeginBlocker updates an existing localhost client with the latest block height.
diff --git a/core/02-client/abci_test.go b/modules/core/02-client/abci_test.go
similarity index 90%
rename from core/02-client/abci_test.go
rename to modules/core/02-client/abci_test.go
index 3e466d38..cbf63d85 100644
--- a/core/02-client/abci_test.go
+++ b/modules/core/02-client/abci_test.go
@@ -7,11 +7,11 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
- client "github.com/cosmos/ibc-go/core/02-client"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ client "github.com/cosmos/ibc-go/modules/core/02-client"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/client/cli/cli.go b/modules/core/02-client/client/cli/cli.go
similarity index 95%
rename from core/02-client/client/cli/cli.go
rename to modules/core/02-client/client/cli/cli.go
index 74bb72be..9146e598 100644
--- a/core/02-client/client/cli/cli.go
+++ b/modules/core/02-client/client/cli/cli.go
@@ -4,7 +4,7 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// GetQueryCmd returns the query commands for IBC clients
diff --git a/core/02-client/client/cli/query.go b/modules/core/02-client/client/cli/query.go
similarity index 97%
rename from core/02-client/client/cli/query.go
rename to modules/core/02-client/client/cli/query.go
index 2a5ea8e7..9f32383d 100644
--- a/core/02-client/client/cli/query.go
+++ b/modules/core/02-client/client/cli/query.go
@@ -9,9 +9,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/ibc-go/core/02-client/client/utils"
- "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/02-client/client/utils"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/core/02-client/client/cli/tx.go b/modules/core/02-client/client/cli/tx.go
similarity index 99%
rename from core/02-client/client/cli/tx.go
rename to modules/core/02-client/client/cli/tx.go
index ad1d0acc..b7908fed 100644
--- a/core/02-client/client/cli/tx.go
+++ b/modules/core/02-client/client/cli/tx.go
@@ -16,8 +16,8 @@ import (
"github.com/cosmos/cosmos-sdk/version"
govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/client/proposal_handler.go b/modules/core/02-client/client/proposal_handler.go
similarity index 92%
rename from core/02-client/client/proposal_handler.go
rename to modules/core/02-client/client/proposal_handler.go
index f4f2fa7b..8f773dd4 100644
--- a/core/02-client/client/proposal_handler.go
+++ b/modules/core/02-client/client/proposal_handler.go
@@ -7,7 +7,7 @@ import (
"github.com/cosmos/cosmos-sdk/types/rest"
govclient "github.com/cosmos/cosmos-sdk/x/gov/client"
govrest "github.com/cosmos/cosmos-sdk/x/gov/client/rest"
- "github.com/cosmos/ibc-go/core/02-client/client/cli"
+ "github.com/cosmos/ibc-go/modules/core/02-client/client/cli"
)
var (
diff --git a/core/02-client/client/utils/utils.go b/modules/core/02-client/client/utils/utils.go
similarity index 93%
rename from core/02-client/client/utils/utils.go
rename to modules/core/02-client/client/utils/utils.go
index d6080c91..b7614146 100644
--- a/core/02-client/client/utils/utils.go
+++ b/modules/core/02-client/client/utils/utils.go
@@ -9,12 +9,12 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- ibcclient "github.com/cosmos/ibc-go/core/client"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/modules/core/client"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
// QueryClientState returns a client state. If prove is true, it performs an ABCI store query
diff --git a/core/02-client/doc.go b/modules/core/02-client/doc.go
similarity index 100%
rename from core/02-client/doc.go
rename to modules/core/02-client/doc.go
diff --git a/core/02-client/genesis.go b/modules/core/02-client/genesis.go
similarity index 92%
rename from core/02-client/genesis.go
rename to modules/core/02-client/genesis.go
index 4516cfb1..d6e0a217 100644
--- a/core/02-client/genesis.go
+++ b/modules/core/02-client/genesis.go
@@ -4,9 +4,9 @@ import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/02-client/keeper"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// InitGenesis initializes the ibc client submodule's state from a provided genesis
diff --git a/core/02-client/keeper/client.go b/modules/core/02-client/keeper/client.go
similarity index 98%
rename from core/02-client/keeper/client.go
rename to modules/core/02-client/keeper/client.go
index 750bac2c..e8288da5 100644
--- a/core/02-client/keeper/client.go
+++ b/modules/core/02-client/keeper/client.go
@@ -8,8 +8,8 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CreateClient creates a new client state and populates it with a given consensus
diff --git a/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go
similarity index 98%
rename from core/02-client/keeper/client_test.go
rename to modules/core/02-client/keeper/client_test.go
index 2466e5ce..21002d17 100644
--- a/core/02-client/keeper/client_test.go
+++ b/modules/core/02-client/keeper/client_test.go
@@ -7,12 +7,12 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
- "github.com/cosmos/ibc-go/core/02-client/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
diff --git a/core/02-client/keeper/encoding.go b/modules/core/02-client/keeper/encoding.go
similarity index 93%
rename from core/02-client/keeper/encoding.go
rename to modules/core/02-client/keeper/encoding.go
index 1e4750b5..cf1b3b11 100644
--- a/core/02-client/keeper/encoding.go
+++ b/modules/core/02-client/keeper/encoding.go
@@ -1,8 +1,8 @@
package keeper
import (
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// UnmarshalClientState attempts to decode and return an ClientState object from
diff --git a/core/02-client/keeper/grpc_query.go b/modules/core/02-client/keeper/grpc_query.go
similarity index 97%
rename from core/02-client/keeper/grpc_query.go
rename to modules/core/02-client/keeper/grpc_query.go
index 5d98ab16..cb353a57 100644
--- a/core/02-client/keeper/grpc_query.go
+++ b/modules/core/02-client/keeper/grpc_query.go
@@ -14,9 +14,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ types.QueryServer = Keeper{}
diff --git a/core/02-client/keeper/grpc_query_test.go b/modules/core/02-client/keeper/grpc_query_test.go
similarity index 97%
rename from core/02-client/keeper/grpc_query_test.go
rename to modules/core/02-client/keeper/grpc_query_test.go
index e0542ce4..5d3671df 100644
--- a/core/02-client/keeper/grpc_query_test.go
+++ b/modules/core/02-client/keeper/grpc_query_test.go
@@ -8,10 +8,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/02-client/keeper/keeper.go b/modules/core/02-client/keeper/keeper.go
similarity index 97%
rename from core/02-client/keeper/keeper.go
rename to modules/core/02-client/keeper/keeper.go
index e7808058..7bac00e5 100644
--- a/core/02-client/keeper/keeper.go
+++ b/modules/core/02-client/keeper/keeper.go
@@ -12,11 +12,11 @@ import (
"github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go
similarity index 97%
rename from core/02-client/keeper/keeper_test.go
rename to modules/core/02-client/keeper/keeper_test.go
index 2233583b..c6b3329c 100644
--- a/core/02-client/keeper/keeper_test.go
+++ b/modules/core/02-client/keeper/keeper_test.go
@@ -15,12 +15,12 @@ import (
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
"github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/02-client/keeper"
- "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
diff --git a/core/02-client/keeper/params.go b/modules/core/02-client/keeper/params.go
similarity index 91%
rename from core/02-client/keeper/params.go
rename to modules/core/02-client/keeper/params.go
index 882372d1..c9b88acd 100644
--- a/core/02-client/keeper/params.go
+++ b/modules/core/02-client/keeper/params.go
@@ -2,7 +2,7 @@ package keeper
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// GetAllowedClients retrieves the receive enabled boolean from the paramstore
diff --git a/core/02-client/keeper/params_test.go b/modules/core/02-client/keeper/params_test.go
similarity index 89%
rename from core/02-client/keeper/params_test.go
rename to modules/core/02-client/keeper/params_test.go
index 36cbea10..fdcaad5f 100644
--- a/core/02-client/keeper/params_test.go
+++ b/modules/core/02-client/keeper/params_test.go
@@ -1,7 +1,7 @@
package keeper_test
import (
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
func (suite *KeeperTestSuite) TestParams() {
diff --git a/core/02-client/keeper/proposal.go b/modules/core/02-client/keeper/proposal.go
similarity index 97%
rename from core/02-client/keeper/proposal.go
rename to modules/core/02-client/keeper/proposal.go
index c8beb7cd..b381b26e 100644
--- a/core/02-client/keeper/proposal.go
+++ b/modules/core/02-client/keeper/proposal.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// ClientUpdateProposal will retrieve the subject and substitute client.
diff --git a/core/02-client/keeper/proposal_test.go b/modules/core/02-client/keeper/proposal_test.go
similarity index 98%
rename from core/02-client/keeper/proposal_test.go
rename to modules/core/02-client/keeper/proposal_test.go
index 5f98a04e..bd381052 100644
--- a/core/02-client/keeper/proposal_test.go
+++ b/modules/core/02-client/keeper/proposal_test.go
@@ -2,9 +2,9 @@ package keeper_test
import (
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/module.go b/modules/core/02-client/module.go
similarity index 83%
rename from core/02-client/module.go
rename to modules/core/02-client/module.go
index 78749db6..9e731ba8 100644
--- a/core/02-client/module.go
+++ b/modules/core/02-client/module.go
@@ -4,8 +4,8 @@ import (
"github.com/gogo/protobuf/grpc"
"github.com/spf13/cobra"
- "github.com/cosmos/ibc-go/core/02-client/client/cli"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/client/cli"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// Name returns the IBC client name
diff --git a/core/02-client/proposal_handler.go b/modules/core/02-client/proposal_handler.go
similarity index 85%
rename from core/02-client/proposal_handler.go
rename to modules/core/02-client/proposal_handler.go
index cb1426b3..d3595458 100644
--- a/core/02-client/proposal_handler.go
+++ b/modules/core/02-client/proposal_handler.go
@@ -4,8 +4,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/core/02-client/keeper"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// NewClientProposalHandler defines the 02-client proposal handler
diff --git a/core/02-client/proposal_handler_test.go b/modules/core/02-client/proposal_handler_test.go
similarity index 91%
rename from core/02-client/proposal_handler_test.go
rename to modules/core/02-client/proposal_handler_test.go
index 98480ee2..ad7873f5 100644
--- a/core/02-client/proposal_handler_test.go
+++ b/modules/core/02-client/proposal_handler_test.go
@@ -4,10 +4,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- client "github.com/cosmos/ibc-go/core/02-client"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ client "github.com/cosmos/ibc-go/modules/core/02-client"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/02-client/simulation/decoder.go b/modules/core/02-client/simulation/decoder.go
similarity index 88%
rename from core/02-client/simulation/decoder.go
rename to modules/core/02-client/simulation/decoder.go
index 70736e3b..2e4a7838 100644
--- a/core/02-client/simulation/decoder.go
+++ b/modules/core/02-client/simulation/decoder.go
@@ -5,9 +5,9 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/core/02-client/keeper"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ ClientUnmarshaler = (*keeper.Keeper)(nil)
diff --git a/core/02-client/simulation/decoder_test.go b/modules/core/02-client/simulation/decoder_test.go
similarity index 86%
rename from core/02-client/simulation/decoder_test.go
rename to modules/core/02-client/simulation/decoder_test.go
index 27add5fb..4903fefe 100644
--- a/core/02-client/simulation/decoder_test.go
+++ b/modules/core/02-client/simulation/decoder_test.go
@@ -9,10 +9,10 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/core/02-client/simulation"
- "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/simulation"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/02-client/simulation/genesis.go b/modules/core/02-client/simulation/genesis.go
similarity index 83%
rename from core/02-client/simulation/genesis.go
rename to modules/core/02-client/simulation/genesis.go
index cc9c1601..c80b02ac 100644
--- a/core/02-client/simulation/genesis.go
+++ b/modules/core/02-client/simulation/genesis.go
@@ -4,7 +4,7 @@ import (
"math/rand"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// GenClientGenesis returns the default client genesis state.
diff --git a/core/02-client/types/client.go b/modules/core/02-client/types/client.go
similarity index 97%
rename from core/02-client/types/client.go
rename to modules/core/02-client/types/client.go
index 40d25ced..536b7d23 100644
--- a/core/02-client/types/client.go
+++ b/modules/core/02-client/types/client.go
@@ -10,8 +10,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/02-client/types/client.pb.go b/modules/core/02-client/types/client.pb.go
similarity index 91%
rename from core/02-client/types/client.pb.go
rename to modules/core/02-client/types/client.pb.go
index f19ea3ae..06d6b285 100644
--- a/core/02-client/types/client.pb.go
+++ b/modules/core/02-client/types/client.pb.go
@@ -402,53 +402,54 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/client/v1/client.proto", fileDescriptor_3cc2cf764ecc47af) }
var fileDescriptor_3cc2cf764ecc47af = []byte{
- // 736 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xbf, 0x6f, 0xdb, 0x38,
- 0x14, 0xb6, 0x1c, 0xc7, 0x88, 0xe9, 0x9c, 0x9d, 0x53, 0xec, 0x8b, 0x93, 0xcb, 0x59, 0x3e, 0xe2,
- 0x06, 0x2f, 0x91, 0xce, 0x3e, 0xe0, 0x06, 0x6f, 0x67, 0x0f, 0x97, 0x0c, 0x77, 0x70, 0x55, 0x04,
- 0x2d, 0xba, 0x18, 0xfa, 0xc1, 0xc8, 0x0c, 0x64, 0xd1, 0x10, 0x29, 0x17, 0xee, 0x5f, 0xd0, 0xb1,
- 0x63, 0x87, 0x0e, 0xf9, 0x13, 0xfa, 0x57, 0x14, 0x19, 0xb3, 0x14, 0xe8, 0x24, 0x14, 0xc9, 0xd2,
- 0x59, 0x6b, 0x97, 0x42, 0x24, 0xe5, 0xd8, 0x6e, 0x52, 0x04, 0xed, 0x46, 0x3e, 0x7e, 0xef, 0x7b,
- 0xdf, 0xfb, 0xa8, 0x47, 0x81, 0xdf, 0xb1, 0xed, 0x78, 0xc4, 0x70, 0x48, 0x88, 0x0c, 0xc7, 0xc7,
- 0x28, 0x60, 0xc6, 0xac, 0x23, 0x57, 0xfa, 0x34, 0x24, 0x8c, 0xa8, 0x35, 0x0e, 0xd1, 0x53, 0x88,
- 0x2e, 0x0f, 0x66, 0x9d, 0x83, 0x9a, 0x47, 0x3c, 0xc2, 0x01, 0x46, 0xba, 0x12, 0xd8, 0x83, 0x7d,
- 0x8f, 0x10, 0xcf, 0x47, 0x06, 0xdf, 0xd9, 0xd1, 0x99, 0x61, 0x05, 0x73, 0x79, 0xf4, 0x87, 0x43,
- 0xe8, 0x84, 0x50, 0x23, 0x9a, 0x7a, 0xa1, 0xe5, 0x22, 0x63, 0xd6, 0xb1, 0x11, 0xb3, 0x3a, 0xd9,
- 0x5e, 0xa0, 0xe0, 0x1b, 0x05, 0xd4, 0x4f, 0x5c, 0x14, 0x30, 0x7c, 0x86, 0x91, 0x3b, 0xe0, 0xe5,
- 0x1e, 0x33, 0x8b, 0x21, 0xb5, 0x03, 0x4a, 0xa2, 0xfa, 0x08, 0xbb, 0x0d, 0xa5, 0xa5, 0xb4, 0x4b,
- 0xfd, 0x5a, 0x12, 0x6b, 0x3b, 0x73, 0x6b, 0xe2, 0xf7, 0xe0, 0xe2, 0x08, 0x9a, 0x5b, 0x62, 0x7d,
- 0xe2, 0xaa, 0x43, 0xb0, 0x2d, 0xe3, 0x34, 0xa5, 0x68, 0xe4, 0x5b, 0x4a, 0xbb, 0xdc, 0xad, 0xe9,
- 0x42, 0xa4, 0x9e, 0x89, 0xd4, 0xff, 0x09, 0xe6, 0xfd, 0xbd, 0x24, 0xd6, 0x76, 0x57, 0xb8, 0x78,
- 0x0e, 0x34, 0xcb, 0xce, 0xad, 0x08, 0xf8, 0x56, 0x01, 0x8d, 0x01, 0x09, 0x28, 0x0a, 0x68, 0x44,
- 0x79, 0xe8, 0x09, 0x66, 0xe3, 0x63, 0x84, 0xbd, 0x31, 0x53, 0x7b, 0xa0, 0x38, 0xe6, 0x2b, 0x2e,
- 0xaf, 0xdc, 0x3d, 0xd4, 0xef, 0x72, 0x4e, 0x17, 0xe8, 0x7e, 0xe1, 0x32, 0xd6, 0x72, 0xa6, 0xcc,
- 0x50, 0x9f, 0x82, 0xaa, 0x93, 0xf1, 0x3e, 0x40, 0xed, 0x7e, 0x12, 0x6b, 0xf5, 0x54, 0x2d, 0x5c,
- 0xcb, 0x82, 0x66, 0xc5, 0x59, 0xd1, 0x07, 0xdf, 0x29, 0xa0, 0x2e, 0x7c, 0x5c, 0x15, 0x4e, 0xbf,
- 0xc7, 0xd1, 0x17, 0x60, 0x67, 0xad, 0x20, 0x6d, 0xe4, 0x5b, 0x1b, 0xed, 0x72, 0x57, 0xbf, 0xbb,
- 0xd9, 0xfb, 0xcc, 0xea, 0x6b, 0x69, 0xfb, 0x49, 0xac, 0xed, 0xc9, 0x6a, 0x6b, 0xac, 0xd0, 0xac,
- 0xae, 0xf6, 0x41, 0xe1, 0xfb, 0x3c, 0xa8, 0x89, 0x46, 0x4e, 0xa7, 0xae, 0xc5, 0xd0, 0x30, 0x24,
- 0x53, 0x42, 0x2d, 0x5f, 0xad, 0x81, 0x4d, 0x86, 0x99, 0x8f, 0x44, 0x0f, 0xa6, 0xd8, 0xa8, 0x2d,
- 0x50, 0x76, 0x11, 0x75, 0x42, 0x3c, 0x65, 0x98, 0x04, 0xdc, 0xcd, 0x92, 0xb9, 0x1c, 0x52, 0x8f,
- 0xc1, 0xcf, 0x34, 0xb2, 0xcf, 0x91, 0xc3, 0x46, 0xb7, 0x3e, 0x6c, 0x70, 0x1f, 0x0e, 0x93, 0x58,
- 0x6b, 0x08, 0x65, 0x5f, 0x41, 0xa0, 0x59, 0x95, 0xb1, 0x41, 0x66, 0xcb, 0x23, 0x50, 0xa3, 0x91,
- 0x4d, 0x19, 0x66, 0x11, 0x43, 0x4b, 0x64, 0x05, 0x4e, 0xa6, 0x25, 0xb1, 0xf6, 0x6b, 0x46, 0x46,
- 0xed, 0x75, 0x14, 0x34, 0xd5, 0xdb, 0xe4, 0x05, 0xa5, 0x0d, 0x2a, 0x38, 0xc0, 0x0c, 0x5b, 0xfe,
- 0x48, 0x7e, 0x54, 0x9b, 0x0f, 0xf8, 0xa8, 0x7e, 0x93, 0xae, 0xd6, 0x45, 0xb9, 0x55, 0x06, 0x68,
- 0xfe, 0x24, 0x03, 0x02, 0xdd, 0x2b, 0xbc, 0xbc, 0xd0, 0x72, 0xf0, 0xb3, 0x02, 0xaa, 0xa7, 0x62,
- 0x08, 0x7f, 0xd8, 0xd2, 0xbf, 0x41, 0x61, 0xea, 0x5b, 0x01, 0x77, 0x31, 0xd5, 0x2a, 0x66, 0x5e,
- 0xcf, 0x66, 0x5c, 0xce, 0xbc, 0x3e, 0xf4, 0xad, 0x40, 0x0e, 0x00, 0xc7, 0xab, 0xe7, 0xa0, 0x2e,
- 0x31, 0xee, 0x68, 0x65, 0x64, 0x0b, 0xdf, 0x18, 0x82, 0x56, 0x12, 0x6b, 0x87, 0xa2, 0xd1, 0x3b,
- 0x93, 0xa1, 0xb9, 0x9b, 0xc5, 0x97, 0x1e, 0x92, 0xde, 0x76, 0xda, 0xf5, 0xeb, 0x0b, 0x2d, 0xf7,
- 0xe9, 0x42, 0x53, 0xd2, 0x07, 0xa7, 0x28, 0xe7, 0x77, 0x00, 0xaa, 0x21, 0x9a, 0x61, 0x8a, 0x49,
- 0x30, 0x0a, 0xa2, 0x89, 0x8d, 0x42, 0xde, 0x7e, 0xa1, 0x7f, 0x90, 0xc4, 0xda, 0x2f, 0xa2, 0xd0,
- 0x1a, 0x00, 0x9a, 0x95, 0x2c, 0xf2, 0x3f, 0x0f, 0xac, 0x90, 0xc8, 0x8b, 0xcb, 0xdf, 0x4b, 0x92,
- 0xdd, 0xcb, 0x82, 0x44, 0x5e, 0xcc, 0x56, 0x26, 0x11, 0xfe, 0x07, 0x8a, 0x43, 0x2b, 0xb4, 0x26,
- 0x34, 0x25, 0xb6, 0x7c, 0x9f, 0x3c, 0x5f, 0x34, 0x49, 0x1b, 0x4a, 0x6b, 0xa3, 0x5d, 0x5a, 0x26,
- 0x5e, 0x03, 0x40, 0xb3, 0x22, 0x23, 0xa2, 0x7f, 0xda, 0xff, 0xf7, 0xf2, 0xba, 0xa9, 0x5c, 0x5d,
- 0x37, 0x95, 0x8f, 0xd7, 0x4d, 0xe5, 0xd5, 0x4d, 0x33, 0x77, 0x75, 0xd3, 0xcc, 0x7d, 0xb8, 0x69,
- 0xe6, 0x9e, 0x1d, 0x79, 0x98, 0x8d, 0x23, 0x5b, 0x77, 0xc8, 0xc4, 0x90, 0x2f, 0x35, 0xb6, 0x9d,
- 0xa3, 0xec, 0xdf, 0xf0, 0x67, 0xf7, 0x48, 0xfe, 0x1e, 0xd8, 0x7c, 0x8a, 0xa8, 0x5d, 0xe4, 0x37,
- 0xf1, 0xd7, 0x97, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x31, 0x08, 0x26, 0x40, 0x06, 0x00, 0x00,
+ // 743 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xbf, 0x6f, 0xdb, 0x46,
+ 0x14, 0x16, 0x65, 0x59, 0xb0, 0x4e, 0xae, 0xe4, 0xd2, 0x52, 0x2d, 0xbb, 0xae, 0xa8, 0x1e, 0x3a,
+ 0x68, 0x31, 0x59, 0xa9, 0x68, 0x07, 0x6d, 0x95, 0x16, 0x7b, 0x68, 0xab, 0x32, 0x30, 0x12, 0x64,
+ 0x11, 0xf8, 0xe3, 0x4c, 0x9d, 0x41, 0xf1, 0x04, 0xde, 0x51, 0x81, 0xf2, 0x17, 0x64, 0xcc, 0x98,
+ 0x21, 0x83, 0xff, 0x84, 0xfc, 0x15, 0x81, 0x47, 0x2f, 0x01, 0x32, 0x11, 0x81, 0xbd, 0x64, 0xe6,
+ 0x9a, 0x25, 0xe0, 0xdd, 0x51, 0x96, 0x14, 0x3b, 0x30, 0x92, 0xed, 0xee, 0xdd, 0xf7, 0xbe, 0xf7,
+ 0xbd, 0xef, 0xf8, 0x8e, 0xe0, 0x57, 0x6c, 0x3b, 0x1e, 0x31, 0x1c, 0x12, 0x22, 0xc3, 0xf1, 0x31,
+ 0x0a, 0x98, 0x31, 0xeb, 0xc8, 0x95, 0x3e, 0x0d, 0x09, 0x23, 0x6a, 0x8d, 0x43, 0xf4, 0x14, 0xa2,
+ 0xcb, 0x83, 0x59, 0xe7, 0xa0, 0xe6, 0x11, 0x8f, 0x70, 0x80, 0x91, 0xae, 0x04, 0xf6, 0x60, 0xdf,
+ 0x23, 0xc4, 0xf3, 0x91, 0xc1, 0x77, 0x76, 0x74, 0x66, 0x58, 0xc1, 0x5c, 0x1e, 0xfd, 0xe6, 0x10,
+ 0x3a, 0x21, 0xd4, 0x88, 0xa6, 0x5e, 0x68, 0xb9, 0xc8, 0x98, 0x75, 0x6c, 0xc4, 0xac, 0x4e, 0xb6,
+ 0x17, 0x28, 0xf8, 0x5a, 0x01, 0xf5, 0x13, 0x17, 0x05, 0x0c, 0x9f, 0x61, 0xe4, 0x0e, 0x78, 0xb9,
+ 0x47, 0xcc, 0x62, 0x48, 0xed, 0x80, 0x92, 0xa8, 0x3e, 0xc2, 0x6e, 0x43, 0x69, 0x29, 0xed, 0x52,
+ 0xbf, 0x96, 0xc4, 0xda, 0xce, 0xdc, 0x9a, 0xf8, 0x3d, 0xb8, 0x38, 0x82, 0xe6, 0x96, 0x58, 0x9f,
+ 0xb8, 0xea, 0x10, 0x6c, 0xcb, 0x38, 0x4d, 0x29, 0x1a, 0xf9, 0x96, 0xd2, 0x2e, 0x77, 0x6b, 0xba,
+ 0x10, 0xa9, 0x67, 0x22, 0xf5, 0xbf, 0x83, 0x79, 0x7f, 0x2f, 0x89, 0xb5, 0xdd, 0x15, 0x2e, 0x9e,
+ 0x03, 0xcd, 0xb2, 0x73, 0x2b, 0x02, 0xbe, 0x51, 0x40, 0x63, 0x40, 0x02, 0x8a, 0x02, 0x1a, 0x51,
+ 0x1e, 0x7a, 0x8c, 0xd9, 0xf8, 0x18, 0x61, 0x6f, 0xcc, 0xd4, 0x1e, 0x28, 0x8e, 0xf9, 0x8a, 0xcb,
+ 0x2b, 0x77, 0x0f, 0xf5, 0xbb, 0x9c, 0xd3, 0x05, 0xba, 0x5f, 0xb8, 0x8c, 0xb5, 0x9c, 0x29, 0x33,
+ 0xd4, 0x27, 0xa0, 0xea, 0x64, 0xbc, 0x0f, 0x50, 0xbb, 0x9f, 0xc4, 0x5a, 0x3d, 0x55, 0x0b, 0xd7,
+ 0xb2, 0xa0, 0x59, 0x71, 0x56, 0xf4, 0xc1, 0xb7, 0x0a, 0xa8, 0x0b, 0x1f, 0x57, 0x85, 0xd3, 0x6f,
+ 0x71, 0xf4, 0x39, 0xd8, 0x59, 0x2b, 0x48, 0x1b, 0xf9, 0xd6, 0x46, 0xbb, 0xdc, 0xd5, 0xef, 0x6e,
+ 0xf6, 0x3e, 0xb3, 0xfa, 0x5a, 0xda, 0x7e, 0x12, 0x6b, 0x7b, 0xb2, 0xda, 0x1a, 0x2b, 0x34, 0xab,
+ 0xab, 0x7d, 0x50, 0xf8, 0x2e, 0x0f, 0x6a, 0xa2, 0x91, 0xd3, 0xa9, 0x6b, 0x31, 0x34, 0x0c, 0xc9,
+ 0x94, 0x50, 0xcb, 0x57, 0x6b, 0x60, 0x93, 0x61, 0xe6, 0x23, 0xd1, 0x83, 0x29, 0x36, 0x6a, 0x0b,
+ 0x94, 0x5d, 0x44, 0x9d, 0x10, 0x4f, 0x19, 0x26, 0x01, 0x77, 0xb3, 0x64, 0x2e, 0x87, 0xd4, 0x63,
+ 0xf0, 0x23, 0x8d, 0xec, 0x73, 0xe4, 0xb0, 0xd1, 0xad, 0x0f, 0x1b, 0xdc, 0x87, 0xc3, 0x24, 0xd6,
+ 0x1a, 0x42, 0xd9, 0x17, 0x10, 0x68, 0x56, 0x65, 0x6c, 0x90, 0xd9, 0xf2, 0x3f, 0xa8, 0xd1, 0xc8,
+ 0xa6, 0x0c, 0xb3, 0x88, 0xa1, 0x25, 0xb2, 0x02, 0x27, 0xd3, 0x92, 0x58, 0xfb, 0x39, 0x23, 0xa3,
+ 0xf6, 0x3a, 0x0a, 0x9a, 0xea, 0x6d, 0xf2, 0x82, 0xd2, 0x06, 0x15, 0x1c, 0x60, 0x86, 0x2d, 0x7f,
+ 0x24, 0x3f, 0xaa, 0xcd, 0x07, 0x7c, 0x54, 0xbf, 0x48, 0x57, 0xeb, 0xa2, 0xdc, 0x2a, 0x03, 0x34,
+ 0x7f, 0x90, 0x01, 0x81, 0xee, 0x15, 0x5e, 0x5c, 0x68, 0x39, 0xf8, 0x49, 0x01, 0xd5, 0x53, 0x31,
+ 0x84, 0xdf, 0x6d, 0xe9, 0x5f, 0xa0, 0x30, 0xf5, 0xad, 0x80, 0xbb, 0x98, 0x6a, 0x15, 0x33, 0xaf,
+ 0x67, 0x33, 0x2e, 0x67, 0x5e, 0x1f, 0xfa, 0x56, 0x20, 0x07, 0x80, 0xe3, 0xd5, 0x73, 0x50, 0x97,
+ 0x18, 0x77, 0xb4, 0x32, 0xb2, 0x85, 0xaf, 0x0c, 0x41, 0x2b, 0x89, 0xb5, 0x43, 0xd1, 0xe8, 0x9d,
+ 0xc9, 0xd0, 0xdc, 0xcd, 0xe2, 0x4b, 0x0f, 0x49, 0x6f, 0x3b, 0xed, 0xfa, 0xd5, 0x85, 0x96, 0xfb,
+ 0x78, 0xa1, 0x29, 0xe9, 0x83, 0x53, 0x94, 0xf3, 0x3b, 0x00, 0xd5, 0x10, 0xcd, 0x30, 0xc5, 0x24,
+ 0x18, 0x05, 0xd1, 0xc4, 0x46, 0x21, 0x6f, 0xbf, 0xd0, 0x3f, 0x48, 0x62, 0xed, 0x27, 0x51, 0x68,
+ 0x0d, 0x00, 0xcd, 0x4a, 0x16, 0xf9, 0x97, 0x07, 0x56, 0x48, 0xe4, 0xc5, 0xe5, 0xef, 0x25, 0xc9,
+ 0xee, 0x65, 0x41, 0x22, 0x2f, 0x66, 0x2b, 0x93, 0x08, 0xff, 0x01, 0xc5, 0xa1, 0x15, 0x5a, 0x13,
+ 0x9a, 0x12, 0x5b, 0xbe, 0x4f, 0x9e, 0x2d, 0x9a, 0xa4, 0x0d, 0xa5, 0xb5, 0xd1, 0x2e, 0x2d, 0x13,
+ 0xaf, 0x01, 0xa0, 0x59, 0x91, 0x11, 0xd1, 0x3f, 0xed, 0xff, 0x77, 0x79, 0xdd, 0x54, 0xae, 0xae,
+ 0x9b, 0xca, 0x87, 0xeb, 0xa6, 0xf2, 0xf2, 0xa6, 0x99, 0xbb, 0xba, 0x69, 0xe6, 0xde, 0xdf, 0x34,
+ 0x73, 0x4f, 0xff, 0xf4, 0x30, 0x1b, 0x47, 0xb6, 0xee, 0x90, 0x89, 0x21, 0x5f, 0x6a, 0x6c, 0x3b,
+ 0x47, 0x1e, 0x31, 0x26, 0xc4, 0x8d, 0x7c, 0x44, 0xc5, 0x3f, 0xe2, 0xf7, 0xee, 0x91, 0xfc, 0x4d,
+ 0xb0, 0xf9, 0x14, 0x51, 0xbb, 0xc8, 0x6f, 0xe4, 0x8f, 0xcf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x18,
+ 0x9f, 0x9f, 0xd0, 0x48, 0x06, 0x00, 0x00,
}
func (this *UpgradeProposal) Equal(that interface{}) bool {
diff --git a/core/02-client/types/client_test.go b/modules/core/02-client/types/client_test.go
similarity index 95%
rename from core/02-client/types/client_test.go
rename to modules/core/02-client/types/client_test.go
index 8854f189..e51d7ead 100644
--- a/core/02-client/types/client_test.go
+++ b/modules/core/02-client/types/client_test.go
@@ -5,8 +5,8 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/02-client/types/codec.go b/modules/core/02-client/types/codec.go
similarity index 99%
rename from core/02-client/types/codec.go
rename to modules/core/02-client/types/codec.go
index ab378bfd..41afe9c7 100644
--- a/core/02-client/types/codec.go
+++ b/modules/core/02-client/types/codec.go
@@ -8,7 +8,7 @@ import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/msgservice"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces registers the client interfaces to protobuf Any.
diff --git a/core/02-client/types/codec_test.go b/modules/core/02-client/types/codec_test.go
similarity index 92%
rename from core/02-client/types/codec_test.go
rename to modules/core/02-client/types/codec_test.go
index 35913352..a0bfda60 100644
--- a/core/02-client/types/codec_test.go
+++ b/modules/core/02-client/types/codec_test.go
@@ -2,11 +2,11 @@ package types_test
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/02-client/types/encoding.go b/modules/core/02-client/types/encoding.go
similarity index 98%
rename from core/02-client/types/encoding.go
rename to modules/core/02-client/types/encoding.go
index 2bd45b67..327dd163 100644
--- a/core/02-client/types/encoding.go
+++ b/modules/core/02-client/types/encoding.go
@@ -4,7 +4,7 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// MustUnmarshalClientState attempts to decode and return an ClientState object from
diff --git a/core/02-client/types/encoding_test.go b/modules/core/02-client/types/encoding_test.go
similarity index 80%
rename from core/02-client/types/encoding_test.go
rename to modules/core/02-client/types/encoding_test.go
index 066aecbf..a528c792 100644
--- a/core/02-client/types/encoding_test.go
+++ b/modules/core/02-client/types/encoding_test.go
@@ -1,8 +1,8 @@
package types_test
import (
- "github.com/cosmos/ibc-go/core/02-client/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
func (suite *TypesTestSuite) TestMarshalHeader() {
diff --git a/core/02-client/types/errors.go b/modules/core/02-client/types/errors.go
similarity index 100%
rename from core/02-client/types/errors.go
rename to modules/core/02-client/types/errors.go
diff --git a/core/02-client/types/events.go b/modules/core/02-client/types/events.go
similarity index 92%
rename from core/02-client/types/events.go
rename to modules/core/02-client/types/events.go
index d9f91c51..464ad4d4 100644
--- a/core/02-client/types/events.go
+++ b/modules/core/02-client/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// IBC client events
diff --git a/core/02-client/types/expected_keepers.go b/modules/core/02-client/types/expected_keepers.go
similarity index 100%
rename from core/02-client/types/expected_keepers.go
rename to modules/core/02-client/types/expected_keepers.go
diff --git a/core/02-client/types/genesis.go b/modules/core/02-client/types/genesis.go
similarity index 98%
rename from core/02-client/types/genesis.go
rename to modules/core/02-client/types/genesis.go
index e18059b1..06ac6dad 100644
--- a/core/02-client/types/genesis.go
+++ b/modules/core/02-client/types/genesis.go
@@ -5,8 +5,8 @@ import (
"sort"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/02-client/types/genesis.pb.go b/modules/core/02-client/types/genesis.pb.go
similarity index 88%
rename from core/02-client/types/genesis.pb.go
rename to modules/core/02-client/types/genesis.pb.go
index e4246f5c..71b9b2b3 100644
--- a/core/02-client/types/genesis.pb.go
+++ b/modules/core/02-client/types/genesis.pb.go
@@ -220,41 +220,41 @@ func init() {
}
var fileDescriptor_a1110e97fc5e4abf = []byte{
- // 531 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x40,
- 0x14, 0xcc, 0xb6, 0x69, 0x68, 0xb7, 0x15, 0x0d, 0xab, 0xa8, 0x98, 0x16, 0xd9, 0xc1, 0x12, 0x92,
- 0x25, 0x14, 0x9b, 0x84, 0x5b, 0x2e, 0x48, 0xae, 0x44, 0x55, 0x09, 0x24, 0x30, 0x37, 0x2e, 0xd6,
- 0x66, 0xbd, 0xb8, 0x16, 0xb6, 0x37, 0x64, 0x37, 0x11, 0x11, 0x3f, 0xc0, 0x91, 0x03, 0x1f, 0xc0,
- 0x99, 0x8f, 0xe0, 0xdc, 0x63, 0x8f, 0x9c, 0x42, 0x95, 0xfc, 0x41, 0xbe, 0x00, 0x79, 0x77, 0x4d,
- 0x5b, 0x63, 0xe0, 0xf6, 0x32, 0x9e, 0x99, 0x37, 0x9a, 0x97, 0x85, 0x76, 0x32, 0x22, 0x31, 0xf3,
- 0x08, 0x9b, 0x50, 0x8f, 0xa4, 0x09, 0xcd, 0x85, 0x37, 0xeb, 0x7b, 0x31, 0xcd, 0x29, 0x4f, 0xb8,
- 0x3b, 0x9e, 0x30, 0xc1, 0x50, 0x47, 0x72, 0xdc, 0x82, 0xe3, 0x2a, 0x8e, 0x3b, 0xeb, 0x1f, 0x3e,
- 0xa8, 0x55, 0xea, 0xef, 0x52, 0x78, 0xd8, 0x89, 0x59, 0xcc, 0xe4, 0xe8, 0x15, 0x93, 0x42, 0xed,
- 0xcb, 0x26, 0xdc, 0x3b, 0x51, 0x0b, 0x5e, 0x0b, 0x2c, 0x28, 0xa2, 0xf0, 0x96, 0x92, 0x71, 0x03,
- 0x74, 0x37, 0x9d, 0xdd, 0xc1, 0x23, 0xb7, 0x6e, 0xa3, 0x7b, 0x1a, 0xd1, 0x5c, 0x24, 0x6f, 0x13,
- 0x1a, 0x1d, 0x4b, 0x4c, 0xaa, 0x7d, 0xf3, 0x7c, 0x61, 0x35, 0xbe, 0xfd, 0xb4, 0x0e, 0x6a, 0x3f,
- 0xf3, 0xa0, 0xf4, 0x46, 0x5f, 0x00, 0xbc, 0xa3, 0xe7, 0x90, 0xb0, 0x9c, 0xd3, 0x9c, 0x4f, 0xb9,
- 0xb1, 0xf1, 0xaf, 0x8d, 0xca, 0xe8, 0xb8, 0x24, 0x2b, 0x47, 0x7f, 0x58, 0x6c, 0x5c, 0x2f, 0x2c,
- 0x63, 0x8e, 0xb3, 0x74, 0x68, 0xff, 0xe1, 0x69, 0x17, 0x69, 0x94, 0x94, 0x57, 0xb4, 0x41, 0x9b,
- 0x54, 0x70, 0xf4, 0x11, 0x96, 0x58, 0x98, 0x51, 0x81, 0x23, 0x2c, 0xb0, 0xb1, 0x29, 0x43, 0x79,
- 0xff, 0xab, 0x41, 0xb7, 0xf8, 0x42, 0xcb, 0x7c, 0x4b, 0x07, 0xbb, 0x7b, 0x33, 0x58, 0x69, 0x6b,
- 0x07, 0xfb, 0x1a, 0x2a, 0x15, 0x68, 0x08, 0x5b, 0x63, 0x3c, 0xc1, 0x19, 0x37, 0x9a, 0x5d, 0xe0,
- 0xec, 0x0e, 0xee, 0xd7, 0xaf, 0x7c, 0x29, 0x39, 0x7e, 0xb3, 0xf0, 0x0f, 0xb4, 0x02, 0x3d, 0x83,
- 0x6d, 0x32, 0xa1, 0x58, 0xd0, 0x30, 0x65, 0x04, 0xa7, 0x67, 0x8c, 0x0b, 0x63, 0xab, 0x0b, 0x9c,
- 0x6d, 0xff, 0xe8, 0x5a, 0x86, 0x0a, 0xa3, 0xc8, 0x20, 0xa1, 0xe7, 0x25, 0x82, 0x5e, 0xc1, 0x4e,
- 0x4e, 0x3f, 0x88, 0x50, 0xad, 0x0b, 0x39, 0x7d, 0x3f, 0xa5, 0x39, 0xa1, 0x46, 0xab, 0x0b, 0x9c,
- 0xa6, 0x6f, 0xad, 0x17, 0xd6, 0x91, 0xf2, 0xaa, 0x63, 0xd9, 0x01, 0x2a, 0x60, 0x7d, 0xf1, 0x12,
- 0x7c, 0x0a, 0xf7, 0x2b, 0xdd, 0xa0, 0x36, 0xdc, 0x7c, 0x47, 0xe7, 0x06, 0xe8, 0x02, 0x67, 0x2f,
- 0x28, 0x46, 0xd4, 0x81, 0x5b, 0x33, 0x9c, 0x4e, 0xa9, 0xb1, 0x21, 0x31, 0xf5, 0x63, 0xd8, 0xfc,
- 0xf4, 0xd5, 0x6a, 0xd8, 0xdf, 0x01, 0xbc, 0xf7, 0xd7, 0x9e, 0x51, 0x1f, 0xee, 0xe8, 0x18, 0x49,
- 0x24, 0x1d, 0x77, 0xfc, 0xce, 0x7a, 0x61, 0xb5, 0xaf, 0xd7, 0x1e, 0x26, 0x91, 0x1d, 0x6c, 0xab,
- 0xf9, 0x34, 0x42, 0x39, 0xd4, 0xdd, 0x5f, 0x1d, 0x59, 0xfd, 0xf3, 0x1e, 0xd6, 0x37, 0x5e, 0x3d,
- 0xad, 0xa9, 0x4f, 0x7b, 0x70, 0x63, 0xc7, 0xd5, 0x65, 0x6f, 0x2b, 0xe4, 0x37, 0xff, 0xe4, 0x7c,
- 0x69, 0x82, 0x8b, 0xa5, 0x09, 0x2e, 0x97, 0x26, 0xf8, 0xbc, 0x32, 0x1b, 0x17, 0x2b, 0xb3, 0xf1,
- 0x63, 0x65, 0x36, 0xde, 0xf4, 0xe2, 0x44, 0x9c, 0x4d, 0x47, 0x2e, 0x61, 0x99, 0x47, 0x18, 0xcf,
- 0x18, 0xf7, 0x92, 0x11, 0xe9, 0x95, 0x4f, 0xf9, 0xf1, 0xa0, 0xa7, 0x5f, 0xb3, 0x98, 0x8f, 0x29,
- 0x1f, 0xb5, 0xe4, 0xa3, 0x7d, 0xf2, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x8f, 0x24, 0x27, 0x29,
- 0x04, 0x00, 0x00,
+ // 541 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x4c,
+ 0x18, 0xcc, 0x36, 0x69, 0xfe, 0x76, 0x5b, 0xfd, 0x0d, 0xab, 0xa8, 0x98, 0x16, 0xd9, 0xc1, 0x12,
+ 0x52, 0x24, 0x54, 0x9b, 0x04, 0x71, 0xc9, 0x05, 0xc9, 0x95, 0x40, 0x95, 0x40, 0x80, 0xb9, 0x71,
+ 0xb1, 0x9c, 0xf5, 0x87, 0x6b, 0x61, 0x7b, 0x43, 0x76, 0x13, 0x11, 0xf1, 0x02, 0x1c, 0x39, 0xf0,
+ 0x00, 0x9c, 0x79, 0x08, 0xce, 0x3d, 0xf6, 0xc8, 0x29, 0x54, 0xc9, 0x1b, 0xe4, 0x09, 0x90, 0x77,
+ 0xd7, 0xb4, 0x0d, 0x06, 0x6e, 0x5f, 0xc6, 0x33, 0xf3, 0x8d, 0xe6, 0xcb, 0x62, 0x3b, 0x19, 0xd2,
+ 0x98, 0xb9, 0x94, 0x8d, 0xc1, 0xa5, 0x69, 0x02, 0xb9, 0x70, 0xa7, 0x3d, 0x37, 0x86, 0x1c, 0x78,
+ 0xc2, 0x9d, 0xd1, 0x98, 0x09, 0x46, 0xda, 0x92, 0xe3, 0x14, 0x1c, 0x47, 0x71, 0x9c, 0x69, 0xef,
+ 0xe0, 0x4e, 0xa5, 0x52, 0x7f, 0x97, 0xc2, 0x83, 0x76, 0xcc, 0x62, 0x26, 0x47, 0xb7, 0x98, 0x14,
+ 0x6a, 0x5f, 0x34, 0xf0, 0xee, 0x13, 0xb5, 0xe0, 0x95, 0x08, 0x05, 0x10, 0xc0, 0xff, 0x29, 0x19,
+ 0x37, 0x50, 0xa7, 0xde, 0xdd, 0xe9, 0xdf, 0x73, 0xaa, 0x36, 0x3a, 0x27, 0x11, 0xe4, 0x22, 0x79,
+ 0x93, 0x40, 0x74, 0x2c, 0x31, 0xa9, 0xf6, 0xcc, 0xb3, 0xb9, 0x55, 0xfb, 0xfa, 0xc3, 0xda, 0xaf,
+ 0xfc, 0xcc, 0xfd, 0xd2, 0x9b, 0x7c, 0x46, 0xf8, 0x86, 0x9e, 0x03, 0xca, 0x72, 0x0e, 0x39, 0x9f,
+ 0x70, 0x63, 0xe3, 0x6f, 0x1b, 0x95, 0xd1, 0x71, 0x49, 0x56, 0x8e, 0xde, 0xa0, 0xd8, 0xb8, 0x9a,
+ 0x5b, 0xc6, 0x2c, 0xcc, 0xd2, 0x81, 0xfd, 0x9b, 0xa7, 0x5d, 0xa4, 0x51, 0x52, 0xbe, 0xa6, 0xf5,
+ 0x5b, 0x74, 0x0d, 0x27, 0x1f, 0x70, 0x89, 0x05, 0x19, 0x88, 0x30, 0x0a, 0x45, 0x68, 0xd4, 0x65,
+ 0x28, 0xf7, 0x5f, 0x35, 0xe8, 0x16, 0x9f, 0x69, 0x99, 0x67, 0xe9, 0x60, 0x37, 0xaf, 0x07, 0x2b,
+ 0x6d, 0x6d, 0x7f, 0x4f, 0x43, 0xa5, 0x82, 0x0c, 0x70, 0x73, 0x14, 0x8e, 0xc3, 0x8c, 0x1b, 0x8d,
+ 0x0e, 0xea, 0xee, 0xf4, 0x6f, 0x57, 0xaf, 0x7c, 0x21, 0x39, 0x5e, 0xa3, 0xf0, 0xf7, 0xb5, 0x82,
+ 0x3c, 0xc6, 0x2d, 0x3a, 0x86, 0x50, 0x40, 0x90, 0x32, 0x1a, 0xa6, 0xa7, 0x8c, 0x0b, 0x63, 0xb3,
+ 0x83, 0xba, 0x5b, 0xde, 0xe1, 0x95, 0x0c, 0x6b, 0x8c, 0x22, 0x83, 0x84, 0x9e, 0x96, 0x08, 0x79,
+ 0x89, 0xdb, 0x39, 0xbc, 0x17, 0x81, 0x5a, 0x17, 0x70, 0x78, 0x37, 0x81, 0x9c, 0x82, 0xd1, 0xec,
+ 0xa0, 0x6e, 0xc3, 0xb3, 0x56, 0x73, 0xeb, 0x50, 0x79, 0x55, 0xb1, 0x6c, 0x9f, 0x14, 0xb0, 0xbe,
+ 0x78, 0x09, 0x3e, 0xc2, 0x7b, 0x6b, 0xdd, 0x90, 0x16, 0xae, 0xbf, 0x85, 0x99, 0x81, 0x3a, 0xa8,
+ 0xbb, 0xeb, 0x17, 0x23, 0x69, 0xe3, 0xcd, 0x69, 0x98, 0x4e, 0xc0, 0xd8, 0x90, 0x98, 0xfa, 0x31,
+ 0x68, 0x7c, 0xfc, 0x62, 0xd5, 0xec, 0x6f, 0x08, 0xdf, 0xfa, 0x63, 0xcf, 0xa4, 0x87, 0xb7, 0x75,
+ 0x8c, 0x24, 0x92, 0x8e, 0xdb, 0x5e, 0x7b, 0x35, 0xb7, 0x5a, 0x57, 0x6b, 0x0f, 0x92, 0xc8, 0xf6,
+ 0xb7, 0xd4, 0x7c, 0x12, 0x91, 0x1c, 0xeb, 0xee, 0x2f, 0x8f, 0xac, 0xfe, 0x79, 0x77, 0xab, 0x1b,
+ 0x5f, 0x3f, 0xad, 0xa9, 0x4f, 0xbb, 0x7f, 0x6d, 0xc7, 0xe5, 0x65, 0xff, 0x57, 0xc8, 0x2f, 0xfe,
+ 0xf3, 0xb3, 0x85, 0x89, 0xce, 0x17, 0x26, 0xba, 0x58, 0x98, 0xe8, 0xd3, 0xd2, 0xac, 0x9d, 0x2f,
+ 0xcd, 0xda, 0xf7, 0xa5, 0x59, 0x7b, 0xfd, 0x30, 0x4e, 0xc4, 0xe9, 0x64, 0xe8, 0x50, 0x96, 0xb9,
+ 0x94, 0xf1, 0x8c, 0x71, 0x37, 0x19, 0xd2, 0xa3, 0x98, 0xb9, 0x19, 0x8b, 0x26, 0x29, 0x70, 0xf5,
+ 0xa4, 0xef, 0xf7, 0x8f, 0xf4, 0xab, 0x16, 0xb3, 0x11, 0xf0, 0x61, 0x53, 0x3e, 0xde, 0x07, 0x3f,
+ 0x03, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x5b, 0x1b, 0x7c, 0x31, 0x04, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/core/02-client/types/genesis_test.go b/modules/core/02-client/types/genesis_test.go
similarity index 97%
rename from core/02-client/types/genesis_test.go
rename to modules/core/02-client/types/genesis_test.go
index c50f5a30..c3d207c1 100644
--- a/core/02-client/types/genesis_test.go
+++ b/modules/core/02-client/types/genesis_test.go
@@ -5,13 +5,13 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
- client "github.com/cosmos/ibc-go/core/02-client"
- "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ client "github.com/cosmos/ibc-go/modules/core/02-client"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/core/02-client/types/height.go b/modules/core/02-client/types/height.go
similarity index 99%
rename from core/02-client/types/height.go
rename to modules/core/02-client/types/height.go
index 9e19b92c..5f4d025b 100644
--- a/core/02-client/types/height.go
+++ b/modules/core/02-client/types/height.go
@@ -9,7 +9,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.Height = (*Height)(nil)
diff --git a/core/02-client/types/height_test.go b/modules/core/02-client/types/height_test.go
similarity index 98%
rename from core/02-client/types/height_test.go
rename to modules/core/02-client/types/height_test.go
index ca8c0092..145a897b 100644
--- a/core/02-client/types/height_test.go
+++ b/modules/core/02-client/types/height_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
func TestZeroHeight(t *testing.T) {
diff --git a/core/02-client/types/keys.go b/modules/core/02-client/types/keys.go
similarity index 97%
rename from core/02-client/types/keys.go
rename to modules/core/02-client/types/keys.go
index 58e01c88..b7f6ddf9 100644
--- a/core/02-client/types/keys.go
+++ b/modules/core/02-client/types/keys.go
@@ -7,7 +7,7 @@ import (
"strings"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/core/02-client/types/keys_test.go b/modules/core/02-client/types/keys_test.go
similarity index 96%
rename from core/02-client/types/keys_test.go
rename to modules/core/02-client/types/keys_test.go
index b9188d0e..f3935454 100644
--- a/core/02-client/types/keys_test.go
+++ b/modules/core/02-client/types/keys_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
)
// tests ParseClientIdentifier and IsValidClientID
diff --git a/core/02-client/types/msgs.go b/modules/core/02-client/types/msgs.go
similarity index 99%
rename from core/02-client/types/msgs.go
rename to modules/core/02-client/types/msgs.go
index 1092668c..bc17c675 100644
--- a/core/02-client/types/msgs.go
+++ b/modules/core/02-client/types/msgs.go
@@ -4,8 +4,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// message types for the IBC client
diff --git a/core/02-client/types/msgs_test.go b/modules/core/02-client/types/msgs_test.go
similarity index 98%
rename from core/02-client/types/msgs_test.go
rename to modules/core/02-client/types/msgs_test.go
index 7a5aa8db..9019f133 100644
--- a/core/02-client/types/msgs_test.go
+++ b/modules/core/02-client/types/msgs_test.go
@@ -7,11 +7,11 @@ import (
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/suite"
- "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/02-client/types/params.go b/modules/core/02-client/types/params.go
similarity index 97%
rename from core/02-client/types/params.go
rename to modules/core/02-client/types/params.go
index a652aa1a..f9c50f15 100644
--- a/core/02-client/types/params.go
+++ b/modules/core/02-client/types/params.go
@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
diff --git a/core/02-client/types/params_test.go b/modules/core/02-client/types/params_test.go
similarity index 91%
rename from core/02-client/types/params_test.go
rename to modules/core/02-client/types/params_test.go
index d29a864b..1f3d0571 100644
--- a/core/02-client/types/params_test.go
+++ b/modules/core/02-client/types/params_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
func TestValidateParams(t *testing.T) {
diff --git a/core/02-client/types/proposal.go b/modules/core/02-client/types/proposal.go
similarity index 98%
rename from core/02-client/types/proposal.go
rename to modules/core/02-client/types/proposal.go
index 36b6e992..4946d374 100644
--- a/core/02-client/types/proposal.go
+++ b/modules/core/02-client/types/proposal.go
@@ -6,7 +6,7 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/types/proposal_test.go b/modules/core/02-client/types/proposal_test.go
similarity index 97%
rename from core/02-client/types/proposal_test.go
rename to modules/core/02-client/types/proposal_test.go
index 52cdd563..ab3fd362 100644
--- a/core/02-client/types/proposal_test.go
+++ b/modules/core/02-client/types/proposal_test.go
@@ -7,9 +7,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/core/02-client/types/query.go b/modules/core/02-client/types/query.go
similarity index 97%
rename from core/02-client/types/query.go
rename to modules/core/02-client/types/query.go
index 2794d8aa..5dc9d73d 100644
--- a/core/02-client/types/query.go
+++ b/modules/core/02-client/types/query.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/02-client/types/query.pb.go b/modules/core/02-client/types/query.pb.go
similarity index 93%
rename from core/02-client/types/query.pb.go
rename to modules/core/02-client/types/query.pb.go
index f586604e..9709353c 100644
--- a/core/02-client/types/query.pb.go
+++ b/modules/core/02-client/types/query.pb.go
@@ -707,64 +707,64 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/client/v1/query.proto", fileDescriptor_833c7bc6da1addd1) }
var fileDescriptor_833c7bc6da1addd1 = []byte{
- // 904 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x8f, 0xdb, 0x44,
- 0x18, 0xde, 0xd9, 0xdd, 0x56, 0xdb, 0x49, 0xba, 0x41, 0xd3, 0x94, 0xa6, 0xee, 0x92, 0xa4, 0xae,
- 0x44, 0x03, 0x28, 0x33, 0x9b, 0x40, 0x97, 0x5e, 0x40, 0x62, 0x11, 0xfd, 0xb8, 0xa0, 0xd6, 0x08,
+ // 911 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x8f, 0xdb, 0x44,
+ 0x14, 0xde, 0xd9, 0xdd, 0x56, 0xdb, 0x49, 0xba, 0x41, 0xd3, 0x94, 0xa6, 0xee, 0x92, 0xa4, 0xae,
+ 0x44, 0x03, 0x28, 0x33, 0x9b, 0xc0, 0x2e, 0xbd, 0x80, 0xc4, 0x22, 0x4a, 0x7b, 0x81, 0xd6, 0x08,
0x21, 0x71, 0x89, 0x6c, 0x67, 0xe2, 0x58, 0x4a, 0x3c, 0xae, 0xc7, 0x8e, 0xb4, 0xaa, 0xf6, 0xd2,
- 0x1f, 0x80, 0x90, 0xb8, 0x71, 0xe5, 0xc6, 0xa9, 0xe2, 0xc6, 0x81, 0x1b, 0x87, 0x3d, 0x56, 0x42,
- 0x42, 0x9c, 0x00, 0x6d, 0xf8, 0x21, 0xc8, 0x33, 0xe3, 0xd4, 0xce, 0x4e, 0xba, 0x0e, 0xa2, 0x37,
- 0xfb, 0xfd, 0x7c, 0xde, 0xe7, 0xfd, 0xb0, 0x61, 0xdb, 0x77, 0x5c, 0x8f, 0x11, 0x97, 0x45, 0x94,
- 0xb8, 0x13, 0x9f, 0x06, 0x31, 0x99, 0xf5, 0xc8, 0x93, 0x84, 0x46, 0x47, 0x38, 0x8c, 0x58, 0xcc,
- 0x50, 0x5d, 0x58, 0xe0, 0xd4, 0x02, 0x4b, 0x0b, 0x3c, 0xeb, 0x19, 0xef, 0xba, 0x8c, 0x4f, 0x19,
- 0x27, 0x8e, 0xcd, 0xa9, 0x34, 0x27, 0xb3, 0x9e, 0x43, 0x63, 0xbb, 0x47, 0x42, 0xdb, 0xf3, 0x03,
- 0x3b, 0xf6, 0x59, 0x20, 0x23, 0x18, 0x37, 0xb5, 0x39, 0x54, 0x2c, 0x69, 0x72, 0xdd, 0x63, 0xcc,
- 0x9b, 0x50, 0x22, 0xde, 0x9c, 0x64, 0x44, 0xec, 0x40, 0xe5, 0x37, 0xf6, 0x94, 0xca, 0x0e, 0x7d,
- 0x62, 0x07, 0x01, 0x8b, 0x45, 0x68, 0xae, 0xb4, 0x75, 0x8f, 0x79, 0x4c, 0x3c, 0x92, 0xf4, 0x49,
- 0x4a, 0xcd, 0x03, 0x78, 0xed, 0x71, 0x8a, 0xe9, 0x53, 0x91, 0xe3, 0x8b, 0xd8, 0x8e, 0xa9, 0x45,
- 0x9f, 0x24, 0x94, 0xc7, 0xe8, 0x06, 0xbc, 0x24, 0x33, 0x0f, 0xfc, 0x61, 0x03, 0xb4, 0x41, 0xe7,
- 0x92, 0xb5, 0x23, 0x05, 0x0f, 0x87, 0xe6, 0x73, 0x00, 0x1b, 0x67, 0x1d, 0x79, 0xc8, 0x02, 0x4e,
- 0xd1, 0x87, 0xb0, 0xaa, 0x3c, 0x79, 0x2a, 0x17, 0xce, 0x95, 0x7e, 0x1d, 0x4b, 0x7c, 0x38, 0x83,
- 0x8e, 0x3f, 0x09, 0x8e, 0xac, 0x8a, 0xfb, 0x32, 0x00, 0xaa, 0xc3, 0x0b, 0x61, 0xc4, 0xd8, 0xa8,
- 0xb1, 0xd9, 0x06, 0x9d, 0xaa, 0x25, 0x5f, 0xd0, 0x67, 0xb0, 0x2a, 0x1e, 0x06, 0x63, 0xea, 0x7b,
- 0xe3, 0xb8, 0xb1, 0x25, 0xc2, 0xed, 0x61, 0x1d, 0xdd, 0xf8, 0x81, 0xb0, 0x39, 0xdc, 0x3e, 0xf9,
- 0xb3, 0xb5, 0x61, 0x55, 0x84, 0x9f, 0x14, 0x99, 0xce, 0x59, 0xc4, 0x3c, 0xab, 0xf5, 0x1e, 0x84,
- 0x2f, 0x9b, 0xa1, 0xf0, 0xbe, 0x8d, 0x65, 0xe7, 0x70, 0xda, 0x39, 0x2c, 0x1b, 0xad, 0x3a, 0x87,
- 0x1f, 0xd9, 0x5e, 0xc6, 0x93, 0x95, 0xf3, 0x34, 0x7f, 0x07, 0xf0, 0xba, 0x26, 0x89, 0xe2, 0x25,
- 0x84, 0x97, 0xf3, 0xbc, 0xf0, 0x06, 0x68, 0x6f, 0x75, 0x2a, 0xfd, 0xf7, 0xf4, 0x95, 0x3c, 0x1c,
- 0xd2, 0x20, 0xf6, 0x47, 0x3e, 0x1d, 0xe6, 0x82, 0x1d, 0x36, 0xd3, 0xc2, 0x7e, 0xfc, 0xab, 0xf5,
- 0xa6, 0x56, 0xcd, 0xad, 0x6a, 0x8e, 0x4f, 0x8e, 0xee, 0x17, 0xea, 0xda, 0x14, 0x75, 0xdd, 0x3e,
- 0xb7, 0x2e, 0x09, 0xb7, 0x50, 0xd8, 0x73, 0x00, 0x0d, 0x59, 0x58, 0xaa, 0x0a, 0x78, 0xc2, 0x4b,
- 0xcf, 0x0a, 0xba, 0x0d, 0x6b, 0x11, 0x9d, 0xf9, 0xdc, 0x67, 0xc1, 0x20, 0x48, 0xa6, 0x0e, 0x8d,
- 0x04, 0x92, 0x6d, 0x6b, 0x37, 0x13, 0x7f, 0x2e, 0xa4, 0x05, 0xc3, 0x5c, 0xaf, 0x73, 0x86, 0xb2,
- 0x95, 0xe8, 0x16, 0xbc, 0x3c, 0x49, 0xeb, 0x8b, 0x33, 0xb3, 0xed, 0x36, 0xe8, 0xec, 0x58, 0x55,
- 0x29, 0x54, 0xfd, 0xfe, 0x19, 0xc0, 0x1b, 0x5a, 0xc8, 0xaa, 0x1b, 0x1f, 0xc1, 0x9a, 0x9b, 0x69,
- 0x4a, 0x0c, 0xea, 0xae, 0x5b, 0x08, 0xf3, 0x7a, 0x67, 0xf5, 0x99, 0x1e, 0x3b, 0x2f, 0xc5, 0xf7,
- 0x3d, 0x4d, 0xd3, 0xff, 0xcb, 0x30, 0x9f, 0x00, 0xb8, 0xa7, 0x07, 0xa1, 0x18, 0x1c, 0xc0, 0x37,
- 0x96, 0x18, 0xcc, 0x46, 0x1a, 0xeb, 0x0b, 0x2e, 0x06, 0xfa, 0xca, 0x8f, 0xc7, 0x05, 0x0a, 0x6a,
- 0x45, 0x8a, 0xff, 0xc7, 0xf1, 0x35, 0x0a, 0xbb, 0xff, 0xc8, 0x8e, 0xec, 0x69, 0xc6, 0xa5, 0xf9,
- 0xb8, 0xb0, 0xb2, 0x99, 0x4e, 0x95, 0xf8, 0x01, 0xbc, 0x18, 0x0a, 0x89, 0x9a, 0x8d, 0x15, 0x9d,
- 0x54, 0x5e, 0xca, 0xd6, 0x1c, 0xc0, 0x96, 0x08, 0xf9, 0x65, 0xe8, 0x45, 0xf6, 0xb0, 0xb0, 0xa1,
- 0xa5, 0x3a, 0xd8, 0x82, 0x95, 0x70, 0x62, 0x2f, 0x96, 0x20, 0x2d, 0x7c, 0xcb, 0x82, 0xa9, 0x48,
- 0xcd, 0xc7, 0x04, 0xb6, 0x57, 0x27, 0x50, 0xd0, 0x1f, 0xc0, 0xab, 0x89, 0x52, 0x0f, 0x4a, 0x9f,
- 0xe3, 0x2b, 0xc9, 0xd9, 0x88, 0xfd, 0x5f, 0x76, 0xe0, 0x05, 0x91, 0x0e, 0xfd, 0x00, 0x60, 0x25,
- 0xa7, 0x41, 0x5d, 0x3d, 0x1d, 0x2b, 0x3e, 0x29, 0x06, 0x2e, 0x6b, 0x2e, 0x4b, 0x30, 0xef, 0x3c,
- 0xfb, 0xed, 0x9f, 0xef, 0x36, 0x09, 0xea, 0x12, 0xdf, 0x71, 0xf5, 0x9f, 0x45, 0x35, 0x77, 0xe4,
- 0xe9, 0x82, 0xcd, 0x63, 0xf4, 0x3d, 0x80, 0xd5, 0xfc, 0x51, 0x44, 0x25, 0xf3, 0x66, 0x23, 0x61,
- 0x90, 0xd2, 0xf6, 0x0a, 0xe8, 0x3b, 0x02, 0xe8, 0x2d, 0x74, 0xf3, 0x5c, 0xa0, 0x68, 0x0e, 0xe0,
- 0x6e, 0x71, 0x0f, 0xd0, 0xfe, 0xab, 0xd2, 0xe9, 0xee, 0xad, 0xd1, 0x5b, 0xc3, 0x43, 0x41, 0x9c,
- 0x08, 0x88, 0x23, 0x34, 0xd4, 0x42, 0x5c, 0x5a, 0xe3, 0x3c, 0x9d, 0x24, 0x3b, 0xbe, 0xe4, 0xe9,
- 0xd2, 0x19, 0x3f, 0x26, 0x72, 0x40, 0x73, 0x0a, 0x29, 0x38, 0x46, 0x3f, 0x01, 0x58, 0x5b, 0x3a,
- 0x1b, 0xa8, 0x3c, 0xe8, 0x45, 0x23, 0xfa, 0xeb, 0xb8, 0xa8, 0x42, 0xef, 0x8a, 0x42, 0xfb, 0x68,
- 0x7f, 0xdd, 0x42, 0xd1, 0x37, 0x8b, 0xb9, 0x91, 0xfb, 0x5c, 0x62, 0x6e, 0x0a, 0xa7, 0xa4, 0xc4,
- 0xdc, 0x14, 0xcf, 0x8b, 0xf9, 0x96, 0xc0, 0x7a, 0x0d, 0x5d, 0x95, 0x58, 0x17, 0x30, 0xe5, 0x1d,
- 0x41, 0xbf, 0x02, 0x78, 0x45, 0xb3, 0xe2, 0xe8, 0xce, 0x2b, 0xf2, 0xac, 0xbe, 0x39, 0xc6, 0xc1,
- 0xba, 0x6e, 0x0a, 0xe5, 0xc7, 0x02, 0xe5, 0x5d, 0x74, 0xa0, 0x63, 0x54, 0x7b, 0x63, 0x0a, 0xbc,
- 0x1e, 0xde, 0x3f, 0x39, 0x6d, 0x82, 0x17, 0xa7, 0x4d, 0xf0, 0xf7, 0x69, 0x13, 0x7c, 0x3b, 0x6f,
- 0x6e, 0xbc, 0x98, 0x37, 0x37, 0xfe, 0x98, 0x37, 0x37, 0xbe, 0xee, 0x7a, 0x7e, 0x3c, 0x4e, 0x1c,
- 0xec, 0xb2, 0x29, 0x51, 0xff, 0xc9, 0xbe, 0xe3, 0x76, 0xb3, 0x7f, 0xe0, 0xfd, 0x7e, 0x57, 0x25,
- 0x8a, 0x8f, 0x42, 0xca, 0x9d, 0x8b, 0xe2, 0x56, 0xbd, 0xff, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0xa1, 0x2d, 0x40, 0x65, 0x8c, 0x0b, 0x00, 0x00,
+ 0x3f, 0x00, 0x21, 0x71, 0xe3, 0xca, 0x8d, 0x53, 0xc5, 0x8d, 0x03, 0x37, 0x0e, 0x7b, 0xac, 0x84,
+ 0x84, 0x38, 0x01, 0xda, 0xf0, 0x87, 0x20, 0xcf, 0x8c, 0x53, 0x3b, 0x3b, 0x69, 0x1d, 0x44, 0x6f,
+ 0xde, 0xf7, 0xf3, 0xfb, 0xbe, 0xf7, 0xe6, 0x65, 0x61, 0xdb, 0x77, 0x5c, 0x8f, 0x11, 0x97, 0x45,
+ 0x94, 0xb8, 0x13, 0x9f, 0x06, 0x31, 0x99, 0xf5, 0xc8, 0xa3, 0x84, 0x46, 0xc7, 0x38, 0x8c, 0x58,
+ 0xcc, 0x50, 0x5d, 0x44, 0xe0, 0x34, 0x02, 0xcb, 0x08, 0x3c, 0xeb, 0x19, 0x6f, 0xbb, 0x8c, 0x4f,
+ 0x19, 0x27, 0x8e, 0xcd, 0xa9, 0x0c, 0x27, 0xb3, 0x9e, 0x43, 0x63, 0xbb, 0x47, 0x42, 0xdb, 0xf3,
+ 0x03, 0x3b, 0xf6, 0x59, 0x20, 0x2b, 0x18, 0x37, 0xb5, 0x3d, 0x54, 0x2d, 0x19, 0x72, 0xdd, 0x63,
+ 0xcc, 0x9b, 0x50, 0x22, 0xfe, 0x72, 0x92, 0x11, 0xb1, 0x03, 0xd5, 0xdf, 0xd8, 0x53, 0x2e, 0x3b,
+ 0xf4, 0x89, 0x1d, 0x04, 0x2c, 0x16, 0xa5, 0xb9, 0xf2, 0xd6, 0x3d, 0xe6, 0x31, 0xf1, 0x49, 0xd2,
+ 0x2f, 0x69, 0x35, 0x0f, 0xe1, 0xb5, 0x87, 0x29, 0xa6, 0x8f, 0x45, 0x8f, 0x2f, 0x62, 0x3b, 0xa6,
+ 0x16, 0x7d, 0x94, 0x50, 0x1e, 0xa3, 0x1b, 0xf0, 0x92, 0xec, 0x3c, 0xf0, 0x87, 0x0d, 0xd0, 0x06,
+ 0x9d, 0x4b, 0xd6, 0x8e, 0x34, 0xdc, 0x1f, 0x9a, 0x4f, 0x01, 0x6c, 0x9c, 0x4f, 0xe4, 0x21, 0x0b,
+ 0x38, 0x45, 0xef, 0xc3, 0xaa, 0xca, 0xe4, 0xa9, 0x5d, 0x24, 0x57, 0xfa, 0x75, 0x2c, 0xf1, 0xe1,
+ 0x0c, 0x3a, 0xfe, 0x28, 0x38, 0xb6, 0x2a, 0xee, 0xf3, 0x02, 0xa8, 0x0e, 0x2f, 0x84, 0x11, 0x63,
+ 0xa3, 0xc6, 0x66, 0x1b, 0x74, 0xaa, 0x96, 0xfc, 0x03, 0x7d, 0x02, 0xab, 0xe2, 0x63, 0x30, 0xa6,
+ 0xbe, 0x37, 0x8e, 0x1b, 0x5b, 0xa2, 0xdc, 0x1e, 0xd6, 0xc9, 0x8d, 0xef, 0x89, 0x98, 0xa3, 0xed,
+ 0xd3, 0x3f, 0x5b, 0x1b, 0x56, 0x45, 0xe4, 0x49, 0x93, 0xe9, 0x9c, 0x47, 0xcc, 0x33, 0xae, 0x77,
+ 0x21, 0x7c, 0x3e, 0x0c, 0x85, 0xf7, 0x4d, 0x2c, 0x27, 0x87, 0xd3, 0xc9, 0x61, 0x39, 0x68, 0x35,
+ 0x39, 0xfc, 0xc0, 0xf6, 0x32, 0x9d, 0xac, 0x5c, 0xa6, 0xf9, 0x3b, 0x80, 0xd7, 0x35, 0x4d, 0x94,
+ 0x2e, 0x21, 0xbc, 0x9c, 0xd7, 0x85, 0x37, 0x40, 0x7b, 0xab, 0x53, 0xe9, 0xbf, 0xa3, 0x67, 0x72,
+ 0x7f, 0x48, 0x83, 0xd8, 0x1f, 0xf9, 0x74, 0x98, 0x2b, 0x76, 0xd4, 0x4c, 0x89, 0xfd, 0xf8, 0x57,
+ 0xeb, 0x75, 0xad, 0x9b, 0x5b, 0xd5, 0x9c, 0x9e, 0x1c, 0x7d, 0x5a, 0xe0, 0xb5, 0x29, 0x78, 0xdd,
+ 0x7e, 0x29, 0x2f, 0x09, 0xb7, 0x40, 0xec, 0x29, 0x80, 0x86, 0x24, 0x96, 0xba, 0x02, 0x9e, 0xf0,
+ 0xd2, 0xbb, 0x82, 0x6e, 0xc3, 0x5a, 0x44, 0x67, 0x3e, 0xf7, 0x59, 0x30, 0x08, 0x92, 0xa9, 0x43,
+ 0x23, 0x81, 0x64, 0xdb, 0xda, 0xcd, 0xcc, 0x9f, 0x09, 0x6b, 0x21, 0x30, 0x37, 0xeb, 0x5c, 0xa0,
+ 0x1c, 0x25, 0xba, 0x05, 0x2f, 0x4f, 0x52, 0x7e, 0x71, 0x16, 0xb6, 0xdd, 0x06, 0x9d, 0x1d, 0xab,
+ 0x2a, 0x8d, 0x6a, 0xde, 0x3f, 0x03, 0x78, 0x43, 0x0b, 0x59, 0x4d, 0xe3, 0x03, 0x58, 0x73, 0x33,
+ 0x4f, 0x89, 0x45, 0xdd, 0x75, 0x0b, 0x65, 0x5e, 0xed, 0xae, 0x3e, 0xd1, 0x63, 0xe7, 0xa5, 0xf4,
+ 0xbe, 0xab, 0x19, 0xfa, 0x7f, 0x59, 0xe6, 0x53, 0x00, 0xf7, 0xf4, 0x20, 0x94, 0x82, 0x03, 0xf8,
+ 0xda, 0x92, 0x82, 0xd9, 0x4a, 0x63, 0x3d, 0xe1, 0x62, 0xa1, 0xaf, 0xfc, 0x78, 0x5c, 0x90, 0xa0,
+ 0x56, 0x94, 0xf8, 0x7f, 0x5c, 0x5f, 0xa3, 0xf0, 0xf6, 0x1f, 0xd8, 0x91, 0x3d, 0xcd, 0xb4, 0x34,
+ 0x1f, 0x16, 0x9e, 0x6c, 0xe6, 0x53, 0x14, 0xdf, 0x83, 0x17, 0x43, 0x61, 0x51, 0xbb, 0xb1, 0x62,
+ 0x92, 0x2a, 0x4b, 0xc5, 0x9a, 0x03, 0xd8, 0x12, 0x25, 0xbf, 0x0c, 0xbd, 0xc8, 0x1e, 0x16, 0x5e,
+ 0x68, 0xa9, 0x09, 0xb6, 0x60, 0x25, 0x9c, 0xd8, 0x8b, 0x47, 0x90, 0x12, 0xdf, 0xb2, 0x60, 0x6a,
+ 0x52, 0xfb, 0x31, 0x81, 0xed, 0xd5, 0x0d, 0x14, 0xf4, 0x7b, 0xf0, 0x6a, 0xa2, 0xdc, 0x83, 0xd2,
+ 0xe7, 0xf8, 0x4a, 0x72, 0xbe, 0x62, 0xff, 0x97, 0x1d, 0x78, 0x41, 0xb4, 0x43, 0x3f, 0x00, 0x58,
+ 0xc9, 0x79, 0x50, 0x57, 0x2f, 0xc7, 0x8a, 0x9f, 0x14, 0x03, 0x97, 0x0d, 0x97, 0x14, 0xcc, 0x83,
+ 0x27, 0xbf, 0xfd, 0xf3, 0xdd, 0x26, 0x41, 0x5d, 0xe2, 0x3b, 0xae, 0xfe, 0x67, 0x51, 0xed, 0x1d,
+ 0x79, 0xbc, 0x50, 0xf3, 0x04, 0x7d, 0x0f, 0x60, 0x35, 0x7f, 0x14, 0x51, 0xc9, 0xbe, 0xd9, 0x4a,
+ 0x18, 0xa4, 0x74, 0xbc, 0x02, 0xfa, 0x96, 0x00, 0x7a, 0x0b, 0xdd, 0x7c, 0x29, 0x50, 0x34, 0x07,
+ 0x70, 0xb7, 0xf8, 0x0e, 0xd0, 0xfe, 0x8b, 0xda, 0xe9, 0xee, 0xad, 0xd1, 0x5b, 0x23, 0x43, 0x41,
+ 0x9c, 0x08, 0x88, 0x23, 0x34, 0xd4, 0x42, 0x5c, 0x7a, 0xc6, 0x79, 0x39, 0x49, 0x76, 0x7c, 0xc9,
+ 0xe3, 0xa5, 0x33, 0x7e, 0x42, 0xe4, 0x82, 0xe6, 0x1c, 0xd2, 0x70, 0x82, 0x7e, 0x02, 0xb0, 0xb6,
+ 0x74, 0x36, 0x50, 0x79, 0xd0, 0x8b, 0x41, 0xf4, 0xd7, 0x49, 0x51, 0x44, 0xef, 0x08, 0xa2, 0x7d,
+ 0xb4, 0xbf, 0x2e, 0x51, 0xf4, 0xcd, 0x62, 0x6f, 0xe4, 0x7b, 0x2e, 0xb1, 0x37, 0x85, 0x53, 0x52,
+ 0x62, 0x6f, 0x8a, 0xe7, 0xc5, 0x7c, 0x43, 0x60, 0xbd, 0x86, 0xae, 0x4a, 0xac, 0x0b, 0x98, 0xf2,
+ 0x8e, 0xa0, 0x5f, 0x01, 0xbc, 0xa2, 0x79, 0xe2, 0xe8, 0xe0, 0x05, 0x7d, 0x56, 0xdf, 0x1c, 0xe3,
+ 0x70, 0xdd, 0x34, 0x85, 0xf2, 0x43, 0x81, 0xf2, 0x0e, 0x3a, 0xd4, 0x29, 0xaa, 0xbd, 0x31, 0x05,
+ 0x5d, 0x8f, 0x3e, 0x3f, 0x3d, 0x6b, 0x82, 0x67, 0x67, 0x4d, 0xf0, 0xf7, 0x59, 0x13, 0x7c, 0x3b,
+ 0x6f, 0x6e, 0x3c, 0x9b, 0x37, 0x37, 0xfe, 0x98, 0x37, 0x37, 0xbe, 0x3e, 0xf0, 0xfc, 0x78, 0x9c,
+ 0x38, 0xd8, 0x65, 0x53, 0xa2, 0xfe, 0x4f, 0xf6, 0x1d, 0xb7, 0xeb, 0x31, 0x32, 0x65, 0xc3, 0x64,
+ 0x42, 0xb9, 0xec, 0xb6, 0xdf, 0xef, 0xaa, 0x86, 0xf1, 0x71, 0x48, 0xb9, 0x73, 0x51, 0xdc, 0xac,
+ 0x77, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x42, 0xbb, 0x44, 0xc2, 0x94, 0x0b, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/02-client/types/query.pb.gw.go b/modules/core/02-client/types/query.pb.gw.go
similarity index 100%
rename from core/02-client/types/query.pb.gw.go
rename to modules/core/02-client/types/query.pb.gw.go
diff --git a/core/02-client/types/tx.pb.go b/modules/core/02-client/types/tx.pb.go
similarity index 93%
rename from core/02-client/types/tx.pb.go
rename to modules/core/02-client/types/tx.pb.go
index 1adac387..1067063e 100644
--- a/core/02-client/types/tx.pb.go
+++ b/modules/core/02-client/types/tx.pb.go
@@ -375,45 +375,45 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/client/v1/tx.proto", fileDescriptor_3848774a44f81317) }
var fileDescriptor_3848774a44f81317 = []byte{
- // 600 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x3d, 0x6f, 0xd3, 0x40,
- 0x18, 0x8e, 0x1b, 0x88, 0xda, 0x6b, 0xa0, 0x95, 0x09, 0x6d, 0x6a, 0x14, 0x3b, 0x58, 0x80, 0x22,
- 0x41, 0xce, 0x24, 0x5d, 0x50, 0x37, 0xd2, 0x01, 0x31, 0x44, 0x02, 0x57, 0x0c, 0xb0, 0x44, 0xfe,
- 0xb8, 0x5e, 0x4e, 0x24, 0xbe, 0xc8, 0x67, 0x47, 0xe4, 0x1f, 0x30, 0x32, 0xf0, 0x03, 0x3a, 0xf1,
- 0x03, 0xf8, 0x15, 0x8c, 0x1d, 0x18, 0x98, 0xa2, 0x2a, 0x59, 0x98, 0xf3, 0x0b, 0x50, 0xee, 0x9c,
- 0x10, 0x1b, 0xc7, 0x0a, 0x5f, 0x9b, 0xdf, 0x7b, 0x9f, 0x7b, 0x9e, 0xf7, 0xf1, 0xfb, 0xde, 0x1d,
- 0xa8, 0x10, 0xdb, 0xc1, 0xd4, 0x70, 0xa8, 0x8f, 0x0c, 0xa7, 0x47, 0x90, 0x17, 0x18, 0xc3, 0x86,
- 0x11, 0xbc, 0x83, 0x03, 0x9f, 0x06, 0x54, 0x2e, 0xf1, 0x34, 0x9c, 0xa7, 0xa1, 0x48, 0xc3, 0x61,
- 0x43, 0x29, 0x61, 0x8a, 0x29, 0x07, 0x18, 0xf3, 0x2f, 0x81, 0x55, 0x8e, 0x30, 0xa5, 0xb8, 0x87,
- 0x0c, 0x1e, 0xd9, 0xe1, 0xb9, 0x61, 0x79, 0xa3, 0x28, 0x75, 0x37, 0x55, 0x25, 0x22, 0xe4, 0x10,
- 0xfd, 0x4a, 0x02, 0x7b, 0x6d, 0x86, 0x4f, 0x7d, 0x64, 0x05, 0xe8, 0x94, 0x67, 0xe4, 0x17, 0xa0,
- 0x28, 0x30, 0x1d, 0x16, 0x58, 0x01, 0x2a, 0x4b, 0x55, 0xa9, 0xb6, 0xdb, 0x2c, 0x41, 0x21, 0x04,
- 0x17, 0x42, 0xf0, 0xa9, 0x37, 0x6a, 0x1d, 0xce, 0xc6, 0xda, 0xad, 0x91, 0xd5, 0xef, 0x9d, 0xe8,
- 0xab, 0x7b, 0x74, 0x73, 0x57, 0x84, 0x67, 0xf3, 0x48, 0x7e, 0x0d, 0xf6, 0x1c, 0xea, 0x31, 0xe4,
- 0xb1, 0x90, 0x45, 0xa4, 0x5b, 0x19, 0xa4, 0xca, 0x6c, 0xac, 0x1d, 0x44, 0xa4, 0xf1, 0x6d, 0xba,
- 0x79, 0x73, 0xb9, 0x22, 0xa8, 0x0f, 0x40, 0x81, 0x11, 0xec, 0x21, 0xbf, 0x9c, 0xaf, 0x4a, 0xb5,
- 0x1d, 0x33, 0x8a, 0x4e, 0xb6, 0xdf, 0x5f, 0x68, 0xb9, 0xef, 0x17, 0x5a, 0x4e, 0x3f, 0x02, 0x87,
- 0x09, 0x87, 0x26, 0x62, 0x83, 0x39, 0x8b, 0xfe, 0x51, 0xb8, 0x7f, 0x35, 0x70, 0x7f, 0xba, 0x6f,
- 0x80, 0x9d, 0xc8, 0x09, 0x71, 0xb9, 0xf5, 0x9d, 0x56, 0x69, 0x36, 0xd6, 0xf6, 0x63, 0x26, 0x89,
- 0xab, 0x9b, 0xdb, 0xe2, 0xfb, 0xb9, 0x2b, 0x3f, 0x02, 0x85, 0x2e, 0xb2, 0x5c, 0xe4, 0x67, 0xb9,
- 0x32, 0x23, 0xcc, 0xc6, 0x15, 0xaf, 0x56, 0xb5, 0xac, 0xf8, 0x6b, 0x1e, 0xec, 0xf3, 0x1c, 0xf6,
- 0x2d, 0xf7, 0x2f, 0x4a, 0x4e, 0xf6, 0x78, 0xeb, 0x7f, 0xf4, 0x38, 0xff, 0x8f, 0x7a, 0xfc, 0x12,
- 0x94, 0x06, 0x3e, 0xa5, 0xe7, 0x9d, 0x50, 0xd8, 0xee, 0x08, 0xdd, 0xf2, 0xb5, 0xaa, 0x54, 0x2b,
- 0xb6, 0xb4, 0xd9, 0x58, 0xbb, 0x23, 0x98, 0xd2, 0x50, 0xba, 0x29, 0xf3, 0xe5, 0xf8, 0x2f, 0x7b,
- 0x0b, 0x2a, 0x09, 0x70, 0xa2, 0xf6, 0xeb, 0x9c, 0xbb, 0x36, 0x1b, 0x6b, 0xf7, 0x52, 0xb9, 0x93,
- 0x35, 0x2b, 0x31, 0x91, 0x75, 0x33, 0x5a, 0x58, 0xd3, 0x71, 0x05, 0x94, 0x93, 0x5d, 0x5d, 0xb6,
- 0xfc, 0x93, 0x04, 0x6e, 0xb7, 0x19, 0x3e, 0x0b, 0xed, 0x3e, 0x09, 0xda, 0x84, 0xd9, 0xa8, 0x6b,
- 0x0d, 0x09, 0x0d, 0xfd, 0x3f, 0xe9, 0xfb, 0x13, 0x50, 0xec, 0xaf, 0x50, 0x64, 0x0e, 0x6c, 0x0c,
- 0xb9, 0xc1, 0xd8, 0x6a, 0xa0, 0x92, 0x5a, 0xe7, 0xc2, 0x49, 0xf3, 0x73, 0x1e, 0xe4, 0xdb, 0x0c,
- 0xcb, 0x2e, 0x28, 0xc6, 0x2e, 0x9c, 0xfb, 0x30, 0xed, 0xbe, 0x83, 0x89, 0x53, 0xab, 0xd4, 0x37,
- 0x82, 0x2d, 0xd4, 0xe6, 0x2a, 0xb1, 0x83, 0xbd, 0x5e, 0x65, 0x15, 0x96, 0xa1, 0x92, 0x76, 0x20,
- 0x65, 0x0c, 0x6e, 0xc4, 0x27, 0xeb, 0x41, 0xc6, 0xfe, 0x15, 0x9c, 0x02, 0x37, 0xc3, 0x2d, 0x85,
- 0x86, 0x40, 0x4e, 0x19, 0x81, 0x87, 0x6b, 0x59, 0x7e, 0x05, 0x2b, 0xc7, 0xbf, 0x01, 0x5e, 0xe8,
- 0xb6, 0x9e, 0x7d, 0x99, 0xa8, 0xd2, 0xe5, 0x44, 0x95, 0xae, 0x26, 0xaa, 0xf4, 0x61, 0xaa, 0xe6,
- 0x2e, 0xa7, 0x6a, 0xee, 0xdb, 0x54, 0xcd, 0xbd, 0xa9, 0x63, 0x12, 0x74, 0x43, 0x1b, 0x3a, 0xb4,
- 0x6f, 0x38, 0x94, 0xf5, 0x29, 0x33, 0x88, 0xed, 0xd4, 0x17, 0x2f, 0xce, 0xe3, 0x66, 0x3d, 0x7a,
- 0x74, 0x82, 0xd1, 0x00, 0x31, 0xbb, 0xc0, 0x87, 0xeb, 0xf8, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0xb4, 0xb9, 0x0f, 0xae, 0xfc, 0x06, 0x00, 0x00,
+ // 607 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcb, 0x6e, 0xd3, 0x40,
+ 0x14, 0x8d, 0x1b, 0x88, 0x9a, 0x69, 0xa0, 0x95, 0x09, 0x6d, 0x6a, 0x14, 0x3b, 0x58, 0x80, 0x22,
+ 0x41, 0xc6, 0x24, 0x15, 0x12, 0xea, 0x8e, 0x74, 0xc5, 0x22, 0x02, 0x5c, 0xb1, 0x80, 0x4d, 0xe4,
+ 0xc7, 0x74, 0x62, 0x11, 0x7b, 0x22, 0x8f, 0x1d, 0x91, 0x3f, 0x60, 0xc9, 0x82, 0x0f, 0xe8, 0x8a,
+ 0x0f, 0xe0, 0x2b, 0x58, 0x76, 0xc1, 0x82, 0x55, 0x54, 0x25, 0x1b, 0xd6, 0xf9, 0x02, 0x94, 0x19,
+ 0x27, 0xc4, 0xc6, 0xb1, 0xc2, 0xa3, 0x3b, 0xdf, 0xb9, 0x67, 0xce, 0xb9, 0xc7, 0xf7, 0xce, 0x0c,
+ 0xa8, 0x3a, 0xa6, 0x85, 0x89, 0x66, 0x11, 0x1f, 0x69, 0x56, 0xdf, 0x41, 0x5e, 0xa0, 0x0d, 0x9b,
+ 0x5a, 0xf0, 0x1e, 0x0e, 0x7c, 0x12, 0x10, 0xb1, 0xcc, 0xd2, 0x70, 0x9e, 0x86, 0x3c, 0x0d, 0x87,
+ 0x4d, 0xa9, 0x8c, 0x09, 0x26, 0x0c, 0xa0, 0xcd, 0xbf, 0x38, 0x56, 0x3a, 0xc4, 0x84, 0xe0, 0x3e,
+ 0xd2, 0x58, 0x64, 0x86, 0x67, 0x9a, 0xe1, 0x8d, 0xa2, 0xd4, 0xdd, 0x54, 0x95, 0x88, 0x90, 0x41,
+ 0xd4, 0x4b, 0x01, 0xec, 0x76, 0x28, 0x3e, 0xf1, 0x91, 0x11, 0xa0, 0x13, 0x96, 0x11, 0x5f, 0x82,
+ 0x12, 0xc7, 0x74, 0x69, 0x60, 0x04, 0xa8, 0x22, 0xd4, 0x84, 0xfa, 0x4e, 0xab, 0x0c, 0xb9, 0x10,
+ 0x5c, 0x08, 0xc1, 0x67, 0xde, 0xa8, 0x7d, 0x30, 0x1b, 0x2b, 0xb7, 0x46, 0x86, 0xdb, 0x3f, 0x56,
+ 0x57, 0xf7, 0xa8, 0xfa, 0x0e, 0x0f, 0x4f, 0xe7, 0x91, 0xf8, 0x06, 0xec, 0x5a, 0xc4, 0xa3, 0xc8,
+ 0xa3, 0x21, 0x8d, 0x48, 0xb7, 0x32, 0x48, 0xa5, 0xd9, 0x58, 0xd9, 0x8f, 0x48, 0xe3, 0xdb, 0x54,
+ 0xfd, 0xe6, 0x72, 0x85, 0x53, 0xef, 0x83, 0x02, 0x75, 0xb0, 0x87, 0xfc, 0x4a, 0xbe, 0x26, 0xd4,
+ 0x8b, 0x7a, 0x14, 0x1d, 0x6f, 0x7f, 0x38, 0x57, 0x72, 0x3f, 0xce, 0x95, 0x9c, 0x7a, 0x08, 0x0e,
+ 0x12, 0x0e, 0x75, 0x44, 0x07, 0x73, 0x16, 0xf5, 0x13, 0x77, 0xff, 0x7a, 0x60, 0xff, 0x72, 0xdf,
+ 0x04, 0xc5, 0xc8, 0x89, 0x63, 0x33, 0xeb, 0xc5, 0x76, 0x79, 0x36, 0x56, 0xf6, 0x62, 0x26, 0x1d,
+ 0x5b, 0xd5, 0xb7, 0xf9, 0xf7, 0x73, 0x5b, 0x7c, 0x04, 0x0a, 0x3d, 0x64, 0xd8, 0xc8, 0xcf, 0x72,
+ 0xa5, 0x47, 0x98, 0x8d, 0x2b, 0x5e, 0xad, 0x6a, 0x59, 0xf1, 0xb7, 0x3c, 0xd8, 0x63, 0x39, 0xec,
+ 0x1b, 0xf6, 0x3f, 0x94, 0x9c, 0xec, 0xf1, 0xd6, 0x55, 0xf4, 0x38, 0xff, 0x9f, 0x7a, 0xfc, 0x0a,
+ 0x94, 0x07, 0x3e, 0x21, 0x67, 0xdd, 0x90, 0xdb, 0xee, 0x72, 0xdd, 0xca, 0xb5, 0x9a, 0x50, 0x2f,
+ 0xb5, 0x95, 0xd9, 0x58, 0xb9, 0xc3, 0x99, 0xd2, 0x50, 0xaa, 0x2e, 0xb2, 0xe5, 0xf8, 0x2f, 0x7b,
+ 0x07, 0xaa, 0x09, 0x70, 0xa2, 0xf6, 0xeb, 0x8c, 0xbb, 0x3e, 0x1b, 0x2b, 0xf7, 0x52, 0xb9, 0x93,
+ 0x35, 0x4b, 0x31, 0x91, 0x75, 0x33, 0x5a, 0x58, 0xd3, 0x71, 0x09, 0x54, 0x92, 0x5d, 0x5d, 0xb6,
+ 0xfc, 0xb3, 0x00, 0x6e, 0x77, 0x28, 0x3e, 0x0d, 0x4d, 0xd7, 0x09, 0x3a, 0x0e, 0x35, 0x51, 0xcf,
+ 0x18, 0x3a, 0x24, 0xf4, 0xff, 0xa6, 0xef, 0x4f, 0x41, 0xc9, 0x5d, 0xa1, 0xc8, 0x1c, 0xd8, 0x18,
+ 0x72, 0x83, 0xb1, 0x55, 0x40, 0x35, 0xb5, 0xce, 0x85, 0x93, 0xd6, 0x97, 0x3c, 0xc8, 0x77, 0x28,
+ 0x16, 0x6d, 0x50, 0x8a, 0x5d, 0x38, 0xf7, 0x61, 0xda, 0x7d, 0x07, 0x13, 0xa7, 0x56, 0x6a, 0x6c,
+ 0x04, 0x5b, 0xa8, 0xcd, 0x55, 0x62, 0x07, 0x7b, 0xbd, 0xca, 0x2a, 0x2c, 0x43, 0x25, 0xed, 0x40,
+ 0x8a, 0x18, 0xdc, 0x88, 0x4f, 0xd6, 0x83, 0x8c, 0xfd, 0x2b, 0x38, 0x09, 0x6e, 0x86, 0x5b, 0x0a,
+ 0x0d, 0x81, 0x98, 0x32, 0x02, 0x0f, 0xd7, 0xb2, 0xfc, 0x0e, 0x96, 0x8e, 0xfe, 0x00, 0xbc, 0xd0,
+ 0x6d, 0xbf, 0xf8, 0x3a, 0x91, 0x85, 0x8b, 0x89, 0x2c, 0x5c, 0x4e, 0x64, 0xe1, 0xe3, 0x54, 0xce,
+ 0x5d, 0x4c, 0xe5, 0xdc, 0xf7, 0xa9, 0x9c, 0x7b, 0xfb, 0x04, 0x3b, 0x41, 0x2f, 0x34, 0xa1, 0x45,
+ 0x5c, 0xcd, 0x22, 0xd4, 0x25, 0x54, 0x73, 0x4c, 0xab, 0x81, 0x89, 0xe6, 0x12, 0x3b, 0xec, 0x23,
+ 0xca, 0x5f, 0x9e, 0xc7, 0xad, 0x46, 0xf4, 0xf8, 0x04, 0xa3, 0x01, 0xa2, 0x66, 0x81, 0x0d, 0xd9,
+ 0xd1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0xb7, 0x53, 0x8d, 0x04, 0x07, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/03-connection/client/cli/cli.go b/modules/core/03-connection/client/cli/cli.go
similarity index 94%
rename from core/03-connection/client/cli/cli.go
rename to modules/core/03-connection/client/cli/cli.go
index 05c3770c..a7024055 100644
--- a/core/03-connection/client/cli/cli.go
+++ b/modules/core/03-connection/client/cli/cli.go
@@ -4,7 +4,7 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
)
// GetQueryCmd returns the query commands for IBC connections
diff --git a/core/03-connection/client/cli/query.go b/modules/core/03-connection/client/cli/query.go
similarity index 94%
rename from core/03-connection/client/cli/query.go
rename to modules/core/03-connection/client/cli/query.go
index 5771063c..56cf597c 100644
--- a/core/03-connection/client/cli/query.go
+++ b/modules/core/03-connection/client/cli/query.go
@@ -8,9 +8,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/ibc-go/core/03-connection/client/utils"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/client/utils"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// GetCmdQueryConnections defines the command to query all the connection ends
diff --git a/core/03-connection/client/cli/tx.go b/modules/core/03-connection/client/cli/tx.go
similarity index 97%
rename from core/03-connection/client/cli/tx.go
rename to modules/core/03-connection/client/cli/tx.go
index 68115ec0..87c8de75 100644
--- a/core/03-connection/client/cli/tx.go
+++ b/modules/core/03-connection/client/cli/tx.go
@@ -14,10 +14,10 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/types/msgservice"
"github.com/cosmos/cosmos-sdk/version"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/client/utils"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/client/utils"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/core/03-connection/client/utils/utils.go b/modules/core/03-connection/client/utils/utils.go
similarity index 94%
rename from core/03-connection/client/utils/utils.go
rename to modules/core/03-connection/client/utils/utils.go
index 035fb508..1091236f 100644
--- a/core/03-connection/client/utils/utils.go
+++ b/modules/core/03-connection/client/utils/utils.go
@@ -10,13 +10,13 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clientutils "github.com/cosmos/ibc-go/core/02-client/client/utils"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- ibcclient "github.com/cosmos/ibc-go/core/client"
- "github.com/cosmos/ibc-go/core/exported"
+ clientutils "github.com/cosmos/ibc-go/modules/core/02-client/client/utils"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/modules/core/client"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// QueryConnection returns a connection end.
diff --git a/core/03-connection/genesis.go b/modules/core/03-connection/genesis.go
similarity index 88%
rename from core/03-connection/genesis.go
rename to modules/core/03-connection/genesis.go
index c97dcc40..ca2d9e7e 100644
--- a/core/03-connection/genesis.go
+++ b/modules/core/03-connection/genesis.go
@@ -2,8 +2,8 @@ package connection
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/03-connection/keeper"
- "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/keeper"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
)
// InitGenesis initializes the ibc connection submodule's state from a provided genesis
diff --git a/core/03-connection/keeper/grpc_query.go b/modules/core/03-connection/keeper/grpc_query.go
similarity index 96%
rename from core/03-connection/keeper/grpc_query.go
rename to modules/core/03-connection/keeper/grpc_query.go
index e8399f4e..e43af8d3 100644
--- a/core/03-connection/keeper/grpc_query.go
+++ b/modules/core/03-connection/keeper/grpc_query.go
@@ -10,9 +10,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
var _ types.QueryServer = Keeper{}
diff --git a/core/03-connection/keeper/grpc_query_test.go b/modules/core/03-connection/keeper/grpc_query_test.go
similarity index 97%
rename from core/03-connection/keeper/grpc_query_test.go
rename to modules/core/03-connection/keeper/grpc_query_test.go
index d0f45308..42892dfd 100644
--- a/core/03-connection/keeper/grpc_query_test.go
+++ b/modules/core/03-connection/keeper/grpc_query_test.go
@@ -5,10 +5,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/03-connection/keeper/handshake.go b/modules/core/03-connection/keeper/handshake.go
similarity index 98%
rename from core/03-connection/keeper/handshake.go
rename to modules/core/03-connection/keeper/handshake.go
index fe2715c6..50df67ed 100644
--- a/core/03-connection/keeper/handshake.go
+++ b/modules/core/03-connection/keeper/handshake.go
@@ -8,10 +8,10 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// ConnOpenInit initialises a connection attempt on chain A. The generated connection identifier
diff --git a/core/03-connection/keeper/handshake_test.go b/modules/core/03-connection/keeper/handshake_test.go
similarity index 98%
rename from core/03-connection/keeper/handshake_test.go
rename to modules/core/03-connection/keeper/handshake_test.go
index 9cad93d6..1a94fb39 100644
--- a/core/03-connection/keeper/handshake_test.go
+++ b/modules/core/03-connection/keeper/handshake_test.go
@@ -3,11 +3,11 @@ package keeper_test
import (
"time"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
// TestConnOpenInit - chainA initializes (INIT state) a connection with
diff --git a/core/03-connection/keeper/keeper.go b/modules/core/03-connection/keeper/keeper.go
similarity index 95%
rename from core/03-connection/keeper/keeper.go
rename to modules/core/03-connection/keeper/keeper.go
index 1dfea3d5..235b92b7 100644
--- a/core/03-connection/keeper/keeper.go
+++ b/modules/core/03-connection/keeper/keeper.go
@@ -6,11 +6,11 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// Keeper defines the IBC connection keeper
diff --git a/core/03-connection/keeper/keeper_test.go b/modules/core/03-connection/keeper/keeper_test.go
similarity index 97%
rename from core/03-connection/keeper/keeper_test.go
rename to modules/core/03-connection/keeper/keeper_test.go
index a2c30e44..a6a6807e 100644
--- a/core/03-connection/keeper/keeper_test.go
+++ b/modules/core/03-connection/keeper/keeper_test.go
@@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/suite"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/03-connection/keeper/verify.go b/modules/core/03-connection/keeper/verify.go
similarity index 98%
rename from core/03-connection/keeper/verify.go
rename to modules/core/03-connection/keeper/verify.go
index c8b57ea6..9355d4ef 100644
--- a/core/03-connection/keeper/verify.go
+++ b/modules/core/03-connection/keeper/verify.go
@@ -3,8 +3,8 @@ package keeper
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// VerifyClientState verifies a proof of a client state of the running machine
diff --git a/core/03-connection/keeper/verify_test.go b/modules/core/03-connection/keeper/verify_test.go
similarity index 97%
rename from core/03-connection/keeper/verify_test.go
rename to modules/core/03-connection/keeper/verify_test.go
index f9a71e29..d11db9d7 100644
--- a/core/03-connection/keeper/verify_test.go
+++ b/modules/core/03-connection/keeper/verify_test.go
@@ -4,12 +4,12 @@ import (
"fmt"
"time"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/core/03-connection/module.go b/modules/core/03-connection/module.go
similarity index 83%
rename from core/03-connection/module.go
rename to modules/core/03-connection/module.go
index dc3432cb..c0bbc68f 100644
--- a/core/03-connection/module.go
+++ b/modules/core/03-connection/module.go
@@ -4,8 +4,8 @@ import (
"github.com/gogo/protobuf/grpc"
"github.com/spf13/cobra"
- "github.com/cosmos/ibc-go/core/03-connection/client/cli"
- "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/client/cli"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
)
// Name returns the IBC connection ICS name.
diff --git a/core/03-connection/simulation/decoder.go b/modules/core/03-connection/simulation/decoder.go
similarity index 90%
rename from core/03-connection/simulation/decoder.go
rename to modules/core/03-connection/simulation/decoder.go
index 95766356..8c485230 100644
--- a/core/03-connection/simulation/decoder.go
+++ b/modules/core/03-connection/simulation/decoder.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
diff --git a/core/03-connection/simulation/decoder_test.go b/modules/core/03-connection/simulation/decoder_test.go
similarity index 89%
rename from core/03-connection/simulation/decoder_test.go
rename to modules/core/03-connection/simulation/decoder_test.go
index c4c66644..e9b72657 100644
--- a/core/03-connection/simulation/decoder_test.go
+++ b/modules/core/03-connection/simulation/decoder_test.go
@@ -8,9 +8,9 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/core/03-connection/simulation"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/simulation"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/03-connection/simulation/genesis.go b/modules/core/03-connection/simulation/genesis.go
similarity index 82%
rename from core/03-connection/simulation/genesis.go
rename to modules/core/03-connection/simulation/genesis.go
index 4f20cb73..e49132d9 100644
--- a/core/03-connection/simulation/genesis.go
+++ b/modules/core/03-connection/simulation/genesis.go
@@ -4,7 +4,7 @@ import (
"math/rand"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
)
// GenConnectionGenesis returns the default connection genesis state.
diff --git a/core/03-connection/types/codec.go b/modules/core/03-connection/types/codec.go
similarity index 96%
rename from core/03-connection/types/codec.go
rename to modules/core/03-connection/types/codec.go
index bd87d414..9a93e96a 100644
--- a/core/03-connection/types/codec.go
+++ b/modules/core/03-connection/types/codec.go
@@ -5,7 +5,7 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf
diff --git a/core/03-connection/types/connection.go b/modules/core/03-connection/types/connection.go
similarity index 95%
rename from core/03-connection/types/connection.go
rename to modules/core/03-connection/types/connection.go
index 5eed1958..4430e5d6 100644
--- a/core/03-connection/types/connection.go
+++ b/modules/core/03-connection/types/connection.go
@@ -2,9 +2,9 @@ package types
import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.ConnectionI = (*ConnectionEnd)(nil)
diff --git a/core/03-connection/types/connection.pb.go b/modules/core/03-connection/types/connection.pb.go
similarity index 91%
rename from core/03-connection/types/connection.pb.go
rename to modules/core/03-connection/types/connection.pb.go
index ec417b75..c5586069 100644
--- a/core/03-connection/types/connection.pb.go
+++ b/modules/core/03-connection/types/connection.pb.go
@@ -5,7 +5,7 @@ package types
import (
fmt "fmt"
- types "github.com/cosmos/ibc-go/core/23-commitment/types"
+ types "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
@@ -369,48 +369,48 @@ func init() {
}
var fileDescriptor_278e9c8044b4f86b = []byte{
- // 648 bytes of a gzipped FileDescriptorProto
+ // 656 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xc1, 0x6e, 0xda, 0x4a,
- 0x14, 0xf5, 0x18, 0x93, 0xc0, 0x10, 0xde, 0xa3, 0x53, 0xa4, 0x5a, 0x96, 0x6a, 0xbb, 0xae, 0x54,
- 0xd1, 0x4a, 0x81, 0x92, 0xa8, 0x5d, 0x44, 0xea, 0x22, 0x10, 0x2a, 0x59, 0x69, 0x29, 0x72, 0x48,
- 0xa5, 0x66, 0x83, 0xc0, 0x9e, 0x90, 0x51, 0xc1, 0x83, 0xec, 0x09, 0x2a, 0x7f, 0x10, 0x65, 0xd5,
+ 0x14, 0xb5, 0x8d, 0x49, 0x60, 0x08, 0xef, 0xd1, 0x29, 0x52, 0x2d, 0x4b, 0xb5, 0x5d, 0x57, 0xaa,
+ 0x68, 0xa5, 0xe0, 0x92, 0xa8, 0x95, 0x1a, 0xa9, 0x8b, 0x40, 0xa8, 0x64, 0xb5, 0xa5, 0xc8, 0x90,
+ 0x4a, 0xcd, 0x06, 0x81, 0x3d, 0x21, 0xa3, 0x82, 0x07, 0xd9, 0x03, 0x2a, 0x7f, 0x10, 0x65, 0xd5,
0x6d, 0x17, 0x91, 0x2a, 0xf5, 0x67, 0xb2, 0xc8, 0x22, 0xcb, 0xae, 0x50, 0x95, 0xfc, 0x01, 0x5f,
0x50, 0xd9, 0x63, 0x8c, 0xd3, 0x8a, 0x45, 0x92, 0xee, 0xee, 0xf1, 0x3d, 0xe7, 0x30, 0xf7, 0xcc,
- 0x65, 0xe0, 0x73, 0xd2, 0xb3, 0xfb, 0xb4, 0x62, 0x53, 0x0f, 0x57, 0x6c, 0xea, 0xba, 0xd8, 0x66,
- 0x84, 0xba, 0x95, 0x71, 0x35, 0x81, 0xca, 0x23, 0x8f, 0x32, 0x8a, 0xe4, 0x90, 0x5a, 0x0e, 0xa8,
- 0xe5, 0x44, 0x73, 0x5c, 0x55, 0x8a, 0x7d, 0xda, 0xa7, 0x21, 0xa9, 0x12, 0x54, 0x9c, 0xaf, 0xdc,
- 0xb4, 0x1e, 0x0e, 0x09, 0x1b, 0x62, 0x97, 0x71, 0xeb, 0x39, 0xe2, 0x54, 0xe3, 0x42, 0x84, 0xf9,
- 0x7a, 0x6c, 0xd9, 0x70, 0x1d, 0x54, 0x85, 0x59, 0x7b, 0x40, 0xb0, 0xcb, 0x3a, 0xc4, 0x91, 0x81,
- 0x0e, 0x4a, 0xd9, 0x5a, 0x71, 0x36, 0xd5, 0x0a, 0x93, 0xee, 0x70, 0xb0, 0x65, 0xc4, 0x2d, 0xc3,
- 0xca, 0xf0, 0xda, 0x74, 0xd0, 0x1b, 0x98, 0x19, 0x63, 0xcf, 0x27, 0xd4, 0xf5, 0x65, 0x51, 0x4f,
- 0x95, 0x72, 0x1b, 0x4f, 0xca, 0xcb, 0x8e, 0x5c, 0xfe, 0xc8, 0x99, 0x56, 0x2c, 0x41, 0xaf, 0x60,
- 0xda, 0x67, 0x5d, 0x86, 0xe5, 0x94, 0x0e, 0x4a, 0xff, 0x6d, 0x68, 0xcb, 0xb5, 0x7b, 0x01, 0xcd,
- 0xe2, 0x6c, 0xd4, 0x82, 0x6b, 0x36, 0x3d, 0x76, 0x19, 0xf6, 0x46, 0x5d, 0x8f, 0x4d, 0x64, 0x49,
- 0x07, 0xa5, 0xdc, 0xc6, 0xb3, 0xe5, 0xea, 0x7a, 0x82, 0x5d, 0x93, 0xce, 0xa7, 0x9a, 0x60, 0xdd,
- 0x70, 0x40, 0x5b, 0x70, 0xcd, 0xc1, 0x83, 0xee, 0xa4, 0x33, 0xc2, 0x1e, 0xa1, 0x8e, 0x9c, 0xd6,
- 0x41, 0x49, 0xaa, 0x3d, 0x9a, 0x4d, 0xb5, 0x87, 0x7c, 0xfa, 0x64, 0xd7, 0xb0, 0x72, 0x21, 0x6c,
- 0x85, 0x68, 0x4b, 0x3a, 0xf9, 0xae, 0x09, 0xc6, 0x4c, 0x84, 0x45, 0xd3, 0xc1, 0x2e, 0x23, 0x87,
- 0x04, 0x3b, 0x8b, 0x60, 0xd1, 0x63, 0x28, 0xc6, 0x71, 0xe6, 0x67, 0x53, 0x2d, 0xcb, 0x0d, 0x83,
- 0x1c, 0x45, 0xf2, 0x47, 0xe8, 0xe2, 0xad, 0x43, 0x4f, 0xdd, 0x23, 0x74, 0xe9, 0x5e, 0xa1, 0xa7,
- 0xff, 0x79, 0xe8, 0x2b, 0xb7, 0x0e, 0xfd, 0x02, 0xc0, 0xb5, 0xe4, 0xcf, 0xdc, 0x6d, 0x85, 0xf3,
- 0x8b, 0x73, 0x2f, 0x2e, 0x41, 0x9e, 0x4d, 0xb5, 0x62, 0x24, 0x4b, 0xb6, 0x8d, 0x60, 0x88, 0x39,
- 0x36, 0x1d, 0xb4, 0x03, 0x57, 0x46, 0x1e, 0x3e, 0x24, 0x5f, 0xc2, 0x1d, 0xfe, 0x2b, 0x90, 0xf8,
- 0x4f, 0x37, 0xae, 0x96, 0xdf, 0x63, 0xef, 0xf3, 0x00, 0xb7, 0x42, 0x76, 0x14, 0x48, 0xa4, 0x8d,
- 0xc6, 0x79, 0x0a, 0x73, 0xf5, 0xf0, 0x58, 0xad, 0x2e, 0x3b, 0xf2, 0x51, 0x11, 0xa6, 0x47, 0x41,
- 0x21, 0x03, 0x3d, 0x55, 0xca, 0x5a, 0x1c, 0x18, 0x07, 0xf0, 0xff, 0xc5, 0x76, 0x71, 0xe2, 0x1d,
- 0xa6, 0x8e, 0xbd, 0xc5, 0xa4, 0xf7, 0x2e, 0x5c, 0x8d, 0xf6, 0x05, 0xa9, 0x10, 0x92, 0xf9, 0x3a,
- 0x7b, 0xdc, 0xd4, 0x4a, 0x7c, 0x41, 0x0a, 0xcc, 0x1c, 0xe2, 0x2e, 0x3b, 0xf6, 0xf0, 0xdc, 0x23,
- 0xc6, 0x7c, 0x9a, 0x17, 0xdf, 0x00, 0x4c, 0x87, 0x1b, 0x84, 0x5e, 0x43, 0x6d, 0xaf, 0xbd, 0xdd,
- 0x6e, 0x74, 0xf6, 0x9b, 0x66, 0xd3, 0x6c, 0x9b, 0xdb, 0xef, 0xcc, 0x83, 0xc6, 0x4e, 0x67, 0xbf,
- 0xb9, 0xd7, 0x6a, 0xd4, 0xcd, 0xb7, 0x66, 0x63, 0xa7, 0x20, 0x28, 0x0f, 0x4e, 0xcf, 0xf4, 0xfc,
- 0x0d, 0x02, 0x92, 0x21, 0xe4, 0xba, 0xe0, 0x63, 0x01, 0x28, 0x99, 0xd3, 0x33, 0x5d, 0x0a, 0x6a,
- 0xa4, 0xc2, 0x3c, 0xef, 0xb4, 0xad, 0x4f, 0x1f, 0x5a, 0x8d, 0x66, 0x41, 0x54, 0x72, 0xa7, 0x67,
- 0xfa, 0x6a, 0x04, 0x17, 0xca, 0xb0, 0x99, 0xe2, 0xca, 0xa0, 0x56, 0xa4, 0x93, 0x1f, 0xaa, 0x50,
- 0xdb, 0x3d, 0xbf, 0x52, 0xc1, 0xe5, 0x95, 0x0a, 0x7e, 0x5d, 0xa9, 0xe0, 0xeb, 0xb5, 0x2a, 0x5c,
- 0x5e, 0xab, 0xc2, 0xcf, 0x6b, 0x55, 0x38, 0xa8, 0xf6, 0x09, 0x3b, 0x3a, 0xee, 0x05, 0x57, 0x57,
- 0xb1, 0xa9, 0x3f, 0xa4, 0x7e, 0x85, 0xf4, 0xec, 0xf5, 0xf9, 0xa3, 0xfa, 0x72, 0x73, 0x3d, 0xf1,
- 0x64, 0xb3, 0xc9, 0x08, 0xfb, 0xbd, 0x95, 0xf0, 0x41, 0xdd, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff,
- 0x7d, 0x5b, 0xa0, 0xa3, 0xd8, 0x05, 0x00, 0x00,
+ 0x65, 0xc0, 0x53, 0xdc, 0xb3, 0xfb, 0xc4, 0xb0, 0x89, 0x87, 0x0c, 0x9b, 0xb8, 0x2e, 0xb2, 0x29,
+ 0x26, 0xae, 0x31, 0xa9, 0x24, 0x50, 0x79, 0xe4, 0x11, 0x4a, 0xa0, 0x14, 0x52, 0xcb, 0x01, 0xb5,
+ 0x9c, 0x68, 0x4e, 0x2a, 0x72, 0xb1, 0x4f, 0xfa, 0x24, 0x24, 0x19, 0x41, 0xc5, 0xf8, 0xf2, 0x75,
+ 0xeb, 0xe1, 0x10, 0xd3, 0x21, 0x72, 0x29, 0xb3, 0x5e, 0x20, 0x46, 0xd5, 0xcf, 0x05, 0x90, 0xaf,
+ 0xc5, 0x96, 0x75, 0xd7, 0x81, 0x15, 0x90, 0xb5, 0x07, 0x18, 0xb9, 0xb4, 0x83, 0x1d, 0x89, 0xd7,
+ 0xf8, 0x52, 0xb6, 0x5a, 0x9c, 0xcf, 0xd4, 0xc2, 0xb4, 0x3b, 0x1c, 0xec, 0xe8, 0x71, 0x4b, 0xb7,
+ 0x32, 0xac, 0x36, 0x1d, 0xf8, 0x1a, 0x64, 0x26, 0xc8, 0xf3, 0x31, 0x71, 0x7d, 0x49, 0xd0, 0x52,
+ 0xa5, 0xdc, 0xd6, 0xa3, 0xf2, 0xaa, 0x23, 0x97, 0x3f, 0x32, 0xa6, 0x15, 0x4b, 0xe0, 0x0b, 0x90,
+ 0xf6, 0x69, 0x97, 0x22, 0x29, 0xa5, 0xf1, 0xa5, 0xff, 0xb6, 0xd4, 0xd5, 0xda, 0x56, 0x40, 0xb3,
+ 0x18, 0x1b, 0x36, 0xc1, 0x86, 0x4d, 0xc6, 0x2e, 0x45, 0xde, 0xa8, 0xeb, 0xd1, 0xa9, 0x24, 0x6a,
+ 0x7c, 0x29, 0xb7, 0xf5, 0x64, 0xb5, 0xba, 0x96, 0x60, 0x57, 0xc5, 0xb3, 0x99, 0xca, 0x59, 0xd7,
+ 0x1c, 0xe0, 0x0e, 0xd8, 0x70, 0xd0, 0xa0, 0x3b, 0xed, 0x8c, 0x90, 0x87, 0x89, 0x23, 0xa5, 0x35,
+ 0xbe, 0x24, 0x56, 0x1f, 0xcc, 0x67, 0xea, 0x7d, 0x36, 0x7d, 0xb2, 0xab, 0x5b, 0xb9, 0x10, 0x36,
+ 0x43, 0xb4, 0x23, 0x1e, 0x7f, 0x57, 0x39, 0x7d, 0x2e, 0x80, 0xa2, 0xe9, 0x20, 0x97, 0xe2, 0x43,
+ 0x8c, 0x9c, 0x65, 0xb0, 0xf0, 0x21, 0x10, 0xe2, 0x38, 0xf3, 0xf3, 0x99, 0x9a, 0x65, 0x86, 0x41,
+ 0x8e, 0x02, 0xfe, 0x23, 0x74, 0xe1, 0xc6, 0xa1, 0xa7, 0xee, 0x10, 0xba, 0x78, 0xa7, 0xd0, 0xd3,
+ 0xff, 0x3c, 0xf4, 0xb5, 0x1b, 0x87, 0x7e, 0xce, 0x83, 0x8d, 0xe4, 0xcf, 0xdc, 0x6e, 0x85, 0xf3,
+ 0xcb, 0x73, 0x2f, 0x2f, 0x41, 0x9a, 0xcf, 0xd4, 0x62, 0x24, 0x4b, 0xb6, 0xf5, 0x60, 0x88, 0x05,
+ 0x36, 0x1d, 0xb8, 0x07, 0xd6, 0x46, 0x1e, 0x3a, 0xc4, 0x5f, 0xc2, 0x1d, 0xfe, 0x2b, 0x90, 0xf8,
+ 0x4f, 0x37, 0xa9, 0x94, 0xdf, 0x23, 0xef, 0xf3, 0x00, 0x35, 0x43, 0x76, 0x14, 0x48, 0xa4, 0x8d,
+ 0xc6, 0x79, 0x0c, 0x72, 0xb5, 0xf0, 0x58, 0xcd, 0x2e, 0x3d, 0xf2, 0x61, 0x11, 0xa4, 0x47, 0x41,
+ 0x21, 0xf1, 0x5a, 0xaa, 0x94, 0xb5, 0x18, 0xd0, 0x0f, 0xc0, 0xff, 0xcb, 0xed, 0x62, 0xc4, 0x5b,
+ 0x4c, 0x1d, 0x7b, 0x0b, 0x49, 0xef, 0xb7, 0x60, 0x3d, 0xda, 0x17, 0xa8, 0x00, 0x80, 0x17, 0xeb,
+ 0xec, 0x31, 0x53, 0x2b, 0xf1, 0x05, 0xca, 0x20, 0x73, 0x88, 0xba, 0x74, 0xec, 0xa1, 0x85, 0x47,
+ 0x8c, 0xd9, 0x34, 0xcf, 0xbe, 0xf1, 0x20, 0x1d, 0x6e, 0x10, 0x7c, 0x09, 0xd4, 0x56, 0x7b, 0xb7,
+ 0x5d, 0xef, 0xec, 0x37, 0xcc, 0x86, 0xd9, 0x36, 0x77, 0xdf, 0x99, 0x07, 0xf5, 0xbd, 0xce, 0x7e,
+ 0xa3, 0xd5, 0xac, 0xd7, 0xcc, 0x37, 0x66, 0x7d, 0xaf, 0xc0, 0xc9, 0xf7, 0x4e, 0x4e, 0xb5, 0xfc,
+ 0x35, 0x02, 0x94, 0x00, 0x60, 0xba, 0xe0, 0x63, 0x81, 0x97, 0x33, 0x27, 0xa7, 0x9a, 0x18, 0xd4,
+ 0x50, 0x01, 0x79, 0xd6, 0x69, 0x5b, 0x9f, 0x3e, 0x34, 0xeb, 0x8d, 0x82, 0x20, 0xe7, 0x4e, 0x4e,
+ 0xb5, 0xf5, 0x08, 0x2e, 0x95, 0x61, 0x33, 0xc5, 0x94, 0x41, 0x2d, 0x8b, 0xc7, 0x3f, 0x14, 0xae,
+ 0xda, 0x3a, 0xbb, 0x54, 0xf8, 0x8b, 0x4b, 0x85, 0xff, 0x75, 0xa9, 0xf0, 0x5f, 0xaf, 0x14, 0xee,
+ 0xe2, 0x4a, 0xe1, 0x7e, 0x5e, 0x29, 0xdc, 0xc1, 0xab, 0x3e, 0xa6, 0x47, 0xe3, 0x5e, 0x70, 0x75,
+ 0x86, 0x4d, 0xfc, 0x21, 0xf1, 0x0d, 0xdc, 0xb3, 0x37, 0xfb, 0xc4, 0x18, 0x12, 0x67, 0x3c, 0x40,
+ 0x3e, 0x7b, 0x5c, 0x9f, 0x6f, 0x6f, 0x26, 0x9e, 0x6e, 0x3a, 0x1d, 0x21, 0xbf, 0xb7, 0x16, 0x3e,
+ 0xac, 0xdb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x84, 0x44, 0x1f, 0x0f, 0xe0, 0x05, 0x00, 0x00,
}
func (m *ConnectionEnd) Marshal() (dAtA []byte, err error) {
diff --git a/core/03-connection/types/connection_test.go b/modules/core/03-connection/types/connection_test.go
similarity index 95%
rename from core/03-connection/types/connection_test.go
rename to modules/core/03-connection/types/connection_test.go
index 78390bf0..bc343131 100644
--- a/core/03-connection/types/connection_test.go
+++ b/modules/core/03-connection/types/connection_test.go
@@ -5,9 +5,9 @@ import (
"github.com/stretchr/testify/require"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/03-connection/types/errors.go b/modules/core/03-connection/types/errors.go
similarity index 100%
rename from core/03-connection/types/errors.go
rename to modules/core/03-connection/types/errors.go
diff --git a/core/03-connection/types/events.go b/modules/core/03-connection/types/events.go
similarity index 92%
rename from core/03-connection/types/events.go
rename to modules/core/03-connection/types/events.go
index dbbb69e0..37973ed5 100644
--- a/core/03-connection/types/events.go
+++ b/modules/core/03-connection/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// IBC connection events
diff --git a/core/03-connection/types/expected_keepers.go b/modules/core/03-connection/types/expected_keepers.go
similarity index 92%
rename from core/03-connection/types/expected_keepers.go
rename to modules/core/03-connection/types/expected_keepers.go
index a3e5446a..2199a354 100644
--- a/core/03-connection/types/expected_keepers.go
+++ b/modules/core/03-connection/types/expected_keepers.go
@@ -2,7 +2,7 @@ package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// ClientKeeper expected account IBC client keeper
diff --git a/core/03-connection/types/genesis.go b/modules/core/03-connection/types/genesis.go
similarity index 97%
rename from core/03-connection/types/genesis.go
rename to modules/core/03-connection/types/genesis.go
index 677f9a94..8eb441c5 100644
--- a/core/03-connection/types/genesis.go
+++ b/modules/core/03-connection/types/genesis.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// NewConnectionPaths creates a ConnectionPaths instance.
diff --git a/core/03-connection/types/genesis.pb.go b/modules/core/03-connection/types/genesis.pb.go
similarity index 83%
rename from core/03-connection/types/genesis.pb.go
rename to modules/core/03-connection/types/genesis.pb.go
index 5dce20ca..514fea5e 100644
--- a/core/03-connection/types/genesis.pb.go
+++ b/modules/core/03-connection/types/genesis.pb.go
@@ -94,28 +94,28 @@ func init() {
}
var fileDescriptor_1d3565a164ba596e = []byte{
- // 322 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x3f, 0x4f, 0xc2, 0x40,
- 0x18, 0xc6, 0x7b, 0x40, 0x1c, 0x8a, 0x53, 0xe3, 0x9f, 0x86, 0xe1, 0x4a, 0x6a, 0x42, 0x60, 0xe0,
- 0x4e, 0x64, 0x73, 0xac, 0x83, 0x31, 0x2e, 0x06, 0x12, 0x07, 0x13, 0x43, 0xe8, 0xf1, 0x5a, 0x2e,
- 0x81, 0x3b, 0xe4, 0x0e, 0x22, 0x1f, 0xc1, 0xcd, 0x8f, 0xc5, 0x88, 0x9b, 0x53, 0x63, 0xda, 0x6f,
- 0xc0, 0x27, 0x30, 0x6d, 0xd5, 0x56, 0x93, 0x6e, 0x97, 0xf7, 0xfd, 0x3d, 0xbf, 0xf7, 0x92, 0xc7,
- 0x6c, 0x71, 0x9f, 0x05, 0x92, 0x32, 0xb9, 0x04, 0xca, 0xa4, 0x10, 0xc0, 0x34, 0x97, 0x82, 0xae,
- 0x7b, 0x34, 0x00, 0x01, 0x8a, 0x2b, 0xb2, 0x58, 0x4a, 0x2d, 0x2d, 0x3b, 0xe5, 0x48, 0xc2, 0x91,
- 0x9c, 0x23, 0xeb, 0x5e, 0xe3, 0x28, 0x90, 0x81, 0x4c, 0x21, 0x9a, 0xbc, 0x32, 0xbe, 0xd1, 0x29,
- 0xf5, 0x16, 0xd2, 0x29, 0xea, 0xbe, 0x57, 0xcc, 0xc3, 0xeb, 0xec, 0xd8, 0x50, 0x8f, 0x35, 0x58,
- 0xf7, 0x66, 0x3d, 0x87, 0x94, 0x8d, 0x9a, 0xd5, 0x76, 0xfd, 0x82, 0x90, 0xb2, 0x1f, 0x90, 0x9b,
- 0x09, 0x08, 0xcd, 0x9f, 0x38, 0x4c, 0xae, 0x7e, 0xe7, 0x5e, 0x6d, 0x1b, 0x3a, 0xc6, 0xa0, 0x28,
- 0xb2, 0x5e, 0x91, 0x79, 0xca, 0x66, 0x1c, 0x84, 0x1e, 0xe5, 0xe3, 0xd1, 0x62, 0xac, 0xa7, 0xca,
- 0xae, 0xa4, 0x47, 0x3a, 0xe5, 0x47, 0x72, 0xf5, 0x5d, 0x12, 0xf0, 0x5a, 0x89, 0x7f, 0x1f, 0x3a,
- 0x78, 0x33, 0x9e, 0xcf, 0x2e, 0xdd, 0x12, 0xaf, 0x3b, 0x38, 0xce, 0x36, 0xff, 0xe2, 0xd6, 0xa3,
- 0x69, 0x0b, 0x78, 0xf9, 0x13, 0x50, 0xf0, 0xbc, 0x02, 0xc1, 0xc0, 0xae, 0x36, 0x51, 0xbb, 0xe6,
- 0x9d, 0xed, 0x43, 0xc7, 0xc9, 0xe4, 0x65, 0xa4, 0x3b, 0x38, 0x49, 0x56, 0xb9, 0x7b, 0xf8, 0xbd,
- 0xf0, 0x6e, 0xb7, 0x11, 0x46, 0xbb, 0x08, 0xa3, 0xcf, 0x08, 0xa3, 0xb7, 0x18, 0x1b, 0xbb, 0x18,
- 0x1b, 0x1f, 0x31, 0x36, 0x1e, 0x7a, 0x01, 0xd7, 0xd3, 0x95, 0x4f, 0x98, 0x9c, 0x53, 0x26, 0xd5,
- 0x5c, 0x2a, 0xca, 0x7d, 0xd6, 0xfd, 0xe9, 0xea, 0xbc, 0xdf, 0x2d, 0xd4, 0xa5, 0x37, 0x0b, 0x50,
- 0xfe, 0x41, 0xda, 0x53, 0xff, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x31, 0x41, 0xfb, 0xcb, 0x2c, 0x02,
- 0x00, 0x00,
+ // 330 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xcd, 0x6a, 0xf2, 0x40,
+ 0x14, 0x86, 0x13, 0x95, 0x6f, 0x11, 0xbf, 0x55, 0xe8, 0x4f, 0x70, 0x31, 0x91, 0x14, 0x44, 0x17,
+ 0xce, 0xd4, 0xba, 0x6a, 0x97, 0xe9, 0xa2, 0x74, 0x57, 0x14, 0xba, 0x28, 0x14, 0x31, 0xe3, 0xe9,
+ 0x38, 0x60, 0xe6, 0x58, 0x67, 0x94, 0x7a, 0x09, 0xdd, 0xf5, 0xb2, 0x5c, 0xda, 0x5d, 0x57, 0x52,
+ 0xf4, 0x0e, 0xbc, 0x82, 0x92, 0xa4, 0x34, 0xb6, 0x90, 0xdd, 0x70, 0xce, 0xf3, 0x3e, 0x67, 0xe0,
+ 0x75, 0x1a, 0x32, 0xe2, 0x02, 0x19, 0xc7, 0x19, 0x30, 0x8e, 0x4a, 0x01, 0x37, 0x12, 0x15, 0x5b,
+ 0x74, 0x98, 0x00, 0x05, 0x5a, 0x6a, 0x3a, 0x9d, 0xa1, 0x41, 0xd7, 0x4b, 0x39, 0x9a, 0x70, 0x34,
+ 0xe7, 0xe8, 0xa2, 0x53, 0x3b, 0x12, 0x28, 0x30, 0x85, 0x58, 0xf2, 0xca, 0xf8, 0x5a, 0xab, 0xd0,
+ 0x7b, 0x90, 0x4e, 0xd1, 0xe0, 0xbd, 0xe4, 0xfc, 0xbf, 0xc9, 0x8e, 0xf5, 0xcd, 0xd0, 0x80, 0x7b,
+ 0xef, 0x54, 0x73, 0x48, 0x7b, 0x76, 0xbd, 0xdc, 0xac, 0x5e, 0x50, 0x5a, 0xf4, 0x03, 0x7a, 0x3b,
+ 0x02, 0x65, 0xe4, 0x93, 0x84, 0xd1, 0xf5, 0xcf, 0x3c, 0xac, 0xac, 0x36, 0xbe, 0xd5, 0x3b, 0x14,
+ 0xb9, 0xaf, 0xb6, 0x73, 0xca, 0x27, 0x12, 0x94, 0x19, 0xe4, 0xe3, 0xc1, 0x74, 0x68, 0xc6, 0xda,
+ 0x2b, 0xa5, 0x47, 0x5a, 0xc5, 0x47, 0x72, 0xf5, 0x5d, 0x12, 0x08, 0x1b, 0x89, 0x7f, 0xbf, 0xf1,
+ 0xc9, 0x72, 0x18, 0x4f, 0xae, 0x82, 0x02, 0x6f, 0xd0, 0x3b, 0xce, 0x36, 0x7f, 0xe2, 0xee, 0xa3,
+ 0xe3, 0x29, 0x78, 0xf9, 0x15, 0xd0, 0xf0, 0x3c, 0x07, 0xc5, 0xc1, 0x2b, 0xd7, 0xed, 0x66, 0x25,
+ 0x3c, 0xdb, 0x6f, 0x7c, 0x3f, 0x93, 0x17, 0x91, 0x41, 0xef, 0x24, 0x59, 0xe5, 0xee, 0xfe, 0xf7,
+ 0x22, 0xec, 0xaf, 0xb6, 0xc4, 0x5e, 0x6f, 0x89, 0xfd, 0xb9, 0x25, 0xf6, 0xdb, 0x8e, 0x58, 0xeb,
+ 0x1d, 0xb1, 0x3e, 0x76, 0xc4, 0x7a, 0xb8, 0x14, 0xd2, 0x8c, 0xe7, 0x11, 0xe5, 0x18, 0x33, 0x8e,
+ 0x3a, 0x46, 0xcd, 0x64, 0xc4, 0xdb, 0x02, 0x59, 0x8c, 0xa3, 0xf9, 0x04, 0x74, 0xd6, 0xd9, 0x79,
+ 0xb7, 0x7d, 0x50, 0x9b, 0x59, 0x4e, 0x41, 0x47, 0xff, 0xd2, 0xbe, 0xba, 0x5f, 0x01, 0x00, 0x00,
+ 0xff, 0xff, 0x1e, 0x94, 0x88, 0x7d, 0x34, 0x02, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/core/03-connection/types/genesis_test.go b/modules/core/03-connection/types/genesis_test.go
similarity index 96%
rename from core/03-connection/types/genesis_test.go
rename to modules/core/03-connection/types/genesis_test.go
index 104147be..562890dd 100644
--- a/core/03-connection/types/genesis_test.go
+++ b/modules/core/03-connection/types/genesis_test.go
@@ -5,8 +5,8 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/03-connection/types/keys.go b/modules/core/03-connection/types/keys.go
similarity index 97%
rename from core/03-connection/types/keys.go
rename to modules/core/03-connection/types/keys.go
index a06039eb..073ab821 100644
--- a/core/03-connection/types/keys.go
+++ b/modules/core/03-connection/types/keys.go
@@ -5,7 +5,7 @@ import (
"regexp"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/core/03-connection/types/keys_test.go b/modules/core/03-connection/types/keys_test.go
similarity index 95%
rename from core/03-connection/types/keys_test.go
rename to modules/core/03-connection/types/keys_test.go
index 0650aed1..c4180385 100644
--- a/core/03-connection/types/keys_test.go
+++ b/modules/core/03-connection/types/keys_test.go
@@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
)
// tests ParseConnectionSequence and IsValidConnectionID
diff --git a/core/03-connection/types/msgs.go b/modules/core/03-connection/types/msgs.go
similarity index 98%
rename from core/03-connection/types/msgs.go
rename to modules/core/03-connection/types/msgs.go
index 797ad31e..fcae27b6 100644
--- a/core/03-connection/types/msgs.go
+++ b/modules/core/03-connection/types/msgs.go
@@ -4,10 +4,10 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/03-connection/types/msgs_test.go b/modules/core/03-connection/types/msgs_test.go
similarity index 98%
rename from core/03-connection/types/msgs_test.go
rename to modules/core/03-connection/types/msgs_test.go
index 39709126..a929ff72 100644
--- a/core/03-connection/types/msgs_test.go
+++ b/modules/core/03-connection/types/msgs_test.go
@@ -15,10 +15,10 @@ import (
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/03-connection/types/query.go b/modules/core/03-connection/types/query.go
similarity index 95%
rename from core/03-connection/types/query.go
rename to modules/core/03-connection/types/query.go
index f182c2b5..7677ebd1 100644
--- a/core/03-connection/types/query.go
+++ b/modules/core/03-connection/types/query.go
@@ -2,8 +2,8 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/03-connection/types/query.pb.go b/modules/core/03-connection/types/query.pb.go
similarity index 93%
rename from core/03-connection/types/query.pb.go
rename to modules/core/03-connection/types/query.pb.go
index a03441a1..49e8f074 100644
--- a/core/03-connection/types/query.pb.go
+++ b/modules/core/03-connection/types/query.pb.go
@@ -8,7 +8,7 @@ import (
fmt "fmt"
types1 "github.com/cosmos/cosmos-sdk/codec/types"
query "github.com/cosmos/cosmos-sdk/types/query"
- types "github.com/cosmos/ibc-go/core/02-client/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
grpc1 "github.com/gogo/protobuf/grpc"
proto "github.com/gogo/protobuf/proto"
@@ -635,63 +635,63 @@ func init() {
}
var fileDescriptor_eaccf9805ea75291 = []byte{
- // 889 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
- 0x14, 0xce, 0xa4, 0xdd, 0xd5, 0x76, 0x52, 0x76, 0x61, 0x94, 0x65, 0x4d, 0x58, 0xb2, 0x5d, 0x6f,
- 0x4b, 0x5b, 0xaa, 0xce, 0x34, 0xa9, 0x80, 0xaa, 0xbf, 0x80, 0xa0, 0x42, 0x2b, 0x24, 0x54, 0xcc,
- 0x8d, 0x4b, 0x65, 0x3b, 0x53, 0xc7, 0x52, 0xe3, 0x49, 0x63, 0x27, 0x28, 0xaa, 0x22, 0x24, 0xfe,
- 0x02, 0x24, 0xae, 0x5c, 0x10, 0x17, 0x4e, 0x5c, 0x39, 0x72, 0x43, 0x3d, 0x56, 0xe2, 0xc2, 0x01,
- 0x55, 0x55, 0x8b, 0xb8, 0xc3, 0x5f, 0x80, 0x3c, 0x33, 0xae, 0xc7, 0x49, 0xdc, 0x26, 0xd1, 0xf6,
- 0x96, 0xbc, 0x79, 0x6f, 0xde, 0xf7, 0x7d, 0xef, 0xcd, 0x97, 0xc0, 0x59, 0xd7, 0xb2, 0x1d, 0x46,
- 0x6c, 0xd6, 0xa4, 0xc4, 0x66, 0x9e, 0x47, 0xed, 0xc0, 0x65, 0x1e, 0x69, 0x97, 0xc8, 0x71, 0x8b,
- 0x36, 0x3b, 0xb8, 0xd1, 0x64, 0x01, 0x43, 0x1a, 0xcf, 0xc2, 0x61, 0x16, 0x8e, 0xb3, 0x70, 0xbb,
- 0x54, 0xc8, 0x3b, 0xcc, 0x61, 0x3c, 0x89, 0x84, 0x9f, 0x44, 0x7e, 0xe1, 0x1d, 0x9b, 0xf9, 0x75,
- 0xe6, 0x13, 0xcb, 0xf4, 0xa9, 0xb8, 0x88, 0xb4, 0x4b, 0x16, 0x0d, 0xcc, 0x12, 0x69, 0x98, 0x8e,
- 0xeb, 0x99, 0xbc, 0x5c, 0xe4, 0x3e, 0x57, 0x11, 0x1c, 0xb9, 0xd4, 0x0b, 0xc2, 0xee, 0xe2, 0x93,
- 0x4c, 0x59, 0x4c, 0x05, 0xa9, 0x80, 0x11, 0xa9, 0x4f, 0x1d, 0xc6, 0x9c, 0x23, 0x4a, 0xcc, 0x86,
- 0x4b, 0x4c, 0xcf, 0x63, 0x01, 0x6f, 0xe5, 0xcb, 0xd3, 0x37, 0xe4, 0x29, 0xff, 0x66, 0xb5, 0x0e,
- 0x89, 0xe9, 0x49, 0x8a, 0xfa, 0x16, 0x7c, 0xfd, 0x8b, 0x10, 0xe8, 0xc7, 0xd7, 0x37, 0x1a, 0xf4,
- 0xb8, 0x45, 0xfd, 0x00, 0xbd, 0x80, 0xaf, 0xc4, 0x6d, 0x0e, 0xdc, 0xaa, 0x06, 0x66, 0xc0, 0xc2,
- 0x94, 0x31, 0x1d, 0x07, 0xf7, 0xaa, 0xfa, 0x6f, 0x00, 0x3e, 0xe9, 0xab, 0xf7, 0x1b, 0xcc, 0xf3,
- 0x29, 0xfa, 0x14, 0xc2, 0x38, 0x97, 0x57, 0xe7, 0xca, 0xf3, 0x38, 0x4d, 0x52, 0x1c, 0xdf, 0xb0,
- 0xe3, 0x55, 0x0d, 0xa5, 0x14, 0xe5, 0xe1, 0xbd, 0x46, 0x93, 0xb1, 0x43, 0x2d, 0x3b, 0x03, 0x16,
- 0xa6, 0x0d, 0xf1, 0x05, 0xed, 0xc0, 0x69, 0xfe, 0xe1, 0xa0, 0x46, 0x5d, 0xa7, 0x16, 0x68, 0x13,
- 0xbc, 0xc1, 0xd3, 0x44, 0x03, 0xa1, 0x66, 0xbb, 0x84, 0x77, 0x79, 0x4e, 0x65, 0xf2, 0xf4, 0xfc,
- 0x59, 0xc6, 0xc8, 0xf1, 0x3a, 0x11, 0xd2, 0xcd, 0x3e, 0x02, 0x7e, 0xa4, 0xc0, 0x27, 0x10, 0xc6,
- 0x63, 0x93, 0x04, 0xde, 0xc6, 0x62, 0xc6, 0x38, 0x9c, 0x31, 0x16, 0xcb, 0x22, 0x67, 0x8c, 0xf7,
- 0x4d, 0x87, 0xca, 0x5a, 0x43, 0xa9, 0xd4, 0xff, 0x05, 0x50, 0xeb, 0xef, 0x21, 0x55, 0xda, 0x87,
- 0xb9, 0x98, 0xaa, 0xaf, 0x81, 0x99, 0x89, 0x85, 0x5c, 0x19, 0xa7, 0xcb, 0xb4, 0x57, 0xa5, 0x5e,
- 0xe0, 0x1e, 0xba, 0xb4, 0xaa, 0x48, 0xae, 0x5e, 0x11, 0xea, 0xae, 0xc0, 0xce, 0x4a, 0xdd, 0x6f,
- 0x83, 0x2d, 0xe0, 0xa8, 0xb8, 0xd1, 0x3a, 0xbc, 0x3f, 0xb2, 0xb6, 0xb2, 0x42, 0xdf, 0x84, 0x6f,
- 0x09, 0xca, 0x3c, 0x6d, 0x80, 0xb8, 0x6f, 0xc2, 0x29, 0x71, 0x45, 0xbc, 0x5a, 0x0f, 0x44, 0x60,
- 0xaf, 0xaa, 0xff, 0x0c, 0x60, 0x31, 0xad, 0x5c, 0xea, 0xb6, 0x08, 0x5f, 0x55, 0xd6, 0xb3, 0x61,
- 0x06, 0x35, 0x21, 0xde, 0x94, 0xf1, 0x28, 0x8e, 0xef, 0x87, 0xe1, 0xbb, 0xdd, 0x1f, 0x0b, 0x3e,
- 0xef, 0x99, 0xad, 0xc0, 0xfc, 0x65, 0x60, 0x06, 0xd1, 0x36, 0xa0, 0xad, 0x81, 0x6f, 0xa9, 0xa2,
- 0xfd, 0x77, 0xfe, 0x2c, 0xdf, 0x31, 0xeb, 0x47, 0xeb, 0x7a, 0xe2, 0x58, 0xef, 0x79, 0x65, 0xff,
- 0x00, 0xa8, 0xdf, 0xd4, 0x44, 0x4a, 0x62, 0xc3, 0x27, 0xee, 0xf5, 0x76, 0x1c, 0x48, 0x75, 0xfd,
- 0x30, 0x45, 0x2e, 0xef, 0xd2, 0x60, 0x72, 0xca, 0x4a, 0x29, 0xb7, 0x3e, 0x76, 0x07, 0x85, 0xef,
- 0x56, 0xcc, 0x5f, 0x01, 0x9c, 0xed, 0x25, 0x1a, 0x52, 0xf3, 0xfc, 0x96, 0xff, 0x12, 0x05, 0x45,
- 0xf3, 0xf0, 0x51, 0x93, 0xb6, 0x5d, 0x3f, 0x3c, 0xf5, 0x5a, 0x75, 0x8b, 0x36, 0x39, 0x9d, 0x49,
- 0xe3, 0x61, 0x14, 0xfe, 0x9c, 0x47, 0x13, 0x89, 0x0a, 0x35, 0x25, 0x51, 0x22, 0xbf, 0x00, 0x70,
- 0xee, 0x16, 0xe4, 0x72, 0x4a, 0x5b, 0x30, 0x5c, 0x50, 0x71, 0x92, 0x98, 0x4e, 0x1e, 0x0b, 0x9b,
- 0xc6, 0x91, 0x4d, 0xe3, 0x8f, 0xbc, 0x8e, 0xf1, 0xd0, 0x4e, 0x5c, 0x93, 0x7c, 0x37, 0xd9, 0xe4,
- 0xbb, 0x89, 0x87, 0x33, 0x71, 0xd3, 0x70, 0x26, 0xc7, 0x1a, 0x4e, 0xf9, 0xa7, 0x07, 0xf0, 0x1e,
- 0xa7, 0x88, 0x7e, 0x01, 0x10, 0xc6, 0x3c, 0xd1, 0x4a, 0xba, 0x5b, 0x0d, 0xfe, 0x6d, 0x29, 0x94,
- 0x46, 0xa8, 0x10, 0xb2, 0xe9, 0x1b, 0xdf, 0xfe, 0xf1, 0xf7, 0xf7, 0xd9, 0x77, 0xd1, 0x2a, 0x71,
- 0x2d, 0xfb, 0xe6, 0xdf, 0x44, 0x9f, 0x9c, 0x24, 0xa6, 0xdf, 0x45, 0x3f, 0x02, 0x98, 0x53, 0x4c,
- 0x04, 0x0d, 0xdf, 0x3f, 0xf2, 0xab, 0x42, 0x79, 0x94, 0x12, 0x89, 0x79, 0x89, 0x63, 0x9e, 0x43,
- 0x2f, 0x86, 0xc0, 0x8c, 0x7e, 0x07, 0xf0, 0xb5, 0x3e, 0xbb, 0x43, 0xef, 0xdf, 0xd6, 0x36, 0xc5,
- 0x5f, 0x0b, 0x6b, 0xa3, 0x17, 0x4a, 0xd4, 0xdb, 0x1c, 0xf5, 0x1a, 0x7a, 0x2f, 0x15, 0xb5, 0xd8,
- 0xbf, 0xa4, 0xe0, 0xd1, 0x4e, 0x76, 0xd1, 0x5f, 0x00, 0x3e, 0x1e, 0x68, 0x54, 0x68, 0x63, 0x68,
- 0x0d, 0xfb, 0x3d, 0xb4, 0xb0, 0x39, 0x5e, 0xb1, 0x24, 0xb5, 0xcb, 0x49, 0x55, 0xd0, 0x87, 0x63,
- 0xac, 0x0f, 0x51, 0xad, 0x14, 0xfd, 0x90, 0x85, 0x5a, 0xda, 0x23, 0x47, 0xdb, 0xc3, 0x83, 0x1c,
- 0xe4, 0x6b, 0x85, 0x0f, 0xc6, 0xae, 0x97, 0x3c, 0xbf, 0xe1, 0x3c, 0x3b, 0xe8, 0xeb, 0xb1, 0x78,
- 0x26, 0x7d, 0x89, 0x44, 0x1e, 0x47, 0x4e, 0x7a, 0xdc, 0xb2, 0x4b, 0x84, 0x95, 0x28, 0x07, 0x22,
- 0xd0, 0xad, 0x7c, 0x76, 0x7a, 0x59, 0x04, 0x67, 0x97, 0x45, 0x70, 0x71, 0x59, 0x04, 0xdf, 0x5d,
- 0x15, 0x33, 0x67, 0x57, 0xc5, 0xcc, 0x9f, 0x57, 0xc5, 0xcc, 0x57, 0x25, 0xc7, 0x0d, 0x6a, 0x2d,
- 0x0b, 0xdb, 0xac, 0x4e, 0xe4, 0x1f, 0x65, 0xd7, 0xb2, 0x97, 0xa3, 0x7f, 0xb8, 0x2b, 0xab, 0xcb,
- 0x0a, 0xd2, 0xa0, 0xd3, 0xa0, 0xbe, 0x75, 0x9f, 0x5b, 0xe1, 0xea, 0xff, 0x01, 0x00, 0x00, 0xff,
- 0xff, 0x02, 0x85, 0x22, 0x9e, 0xaf, 0x0b, 0x00, 0x00,
+ // 895 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0x1b, 0x45,
+ 0x14, 0xf6, 0x38, 0x69, 0xd5, 0x8c, 0x43, 0x0b, 0x23, 0x97, 0x2e, 0xa6, 0xb8, 0xe9, 0xb6, 0xa5,
+ 0x29, 0x55, 0x67, 0x6a, 0x47, 0x40, 0xc8, 0x2f, 0xc0, 0x28, 0x90, 0x5c, 0x50, 0xd8, 0xdc, 0xb8,
+ 0x44, 0xbb, 0xeb, 0xc9, 0x7a, 0x25, 0x7b, 0xc7, 0xf1, 0xac, 0x8d, 0xac, 0xc8, 0x42, 0xe2, 0x2f,
+ 0x40, 0xe2, 0xca, 0x05, 0x71, 0xe1, 0xc4, 0x95, 0x23, 0x37, 0x94, 0x63, 0x24, 0x2e, 0x1c, 0x50,
+ 0x14, 0x25, 0x88, 0x3b, 0xfc, 0x05, 0x68, 0x67, 0xc6, 0xd9, 0x59, 0xdb, 0x9b, 0xd8, 0x16, 0xb9,
+ 0x6d, 0xde, 0xbc, 0x37, 0xef, 0xfb, 0xbe, 0xf7, 0xe6, 0x73, 0xe0, 0x63, 0xdf, 0x71, 0x3d, 0x46,
+ 0x5c, 0xd6, 0xa2, 0xc4, 0x65, 0x41, 0x40, 0xdd, 0xd0, 0x67, 0x01, 0xe9, 0x94, 0xc8, 0x41, 0x9b,
+ 0xb6, 0xba, 0xb8, 0xd9, 0x62, 0x21, 0x43, 0x86, 0xc8, 0xc2, 0x51, 0x16, 0x8e, 0xb3, 0x70, 0xa7,
+ 0x54, 0xc8, 0x7b, 0xcc, 0x63, 0x22, 0x89, 0x44, 0x5f, 0x32, 0xbf, 0xf0, 0x8e, 0xcb, 0x78, 0x83,
+ 0x71, 0xe2, 0xd8, 0x9c, 0xca, 0x8b, 0x48, 0xa7, 0xe4, 0xd0, 0xd0, 0x2e, 0x91, 0xa6, 0xed, 0xf9,
+ 0x81, 0x2d, 0xca, 0x65, 0xee, 0x43, 0x1d, 0x41, 0xdd, 0xa7, 0x41, 0x18, 0x75, 0x97, 0x5f, 0x2a,
+ 0xe5, 0x59, 0x2a, 0x48, 0x0d, 0x8c, 0x4c, 0xbd, 0xef, 0x31, 0xe6, 0xd5, 0x29, 0xb1, 0x9b, 0x3e,
+ 0xb1, 0x83, 0x80, 0x85, 0xa2, 0x15, 0x57, 0xa7, 0x6f, 0xa8, 0x53, 0xf1, 0x97, 0xd3, 0xde, 0x27,
+ 0x76, 0xa0, 0x28, 0x9a, 0xeb, 0xf0, 0xf5, 0x2f, 0x22, 0xa0, 0x9f, 0x5c, 0xdc, 0x68, 0xd1, 0x83,
+ 0x36, 0xe5, 0x21, 0x7a, 0x04, 0x5f, 0x89, 0xdb, 0xec, 0xf9, 0x55, 0x03, 0x2c, 0x80, 0xc5, 0x39,
+ 0x6b, 0x3e, 0x0e, 0x6e, 0x57, 0xcd, 0x5f, 0x01, 0xbc, 0x37, 0x54, 0xcf, 0x9b, 0x2c, 0xe0, 0x14,
+ 0x7d, 0x06, 0x61, 0x9c, 0x2b, 0xaa, 0x73, 0xe5, 0xa7, 0x38, 0x4d, 0x52, 0x1c, 0xdf, 0xb0, 0x19,
+ 0x54, 0x2d, 0xad, 0x14, 0xe5, 0xe1, 0x8d, 0x66, 0x8b, 0xb1, 0x7d, 0x23, 0xbb, 0x00, 0x16, 0xe7,
+ 0x2d, 0xf9, 0x07, 0xda, 0x84, 0xf3, 0xe2, 0x63, 0xaf, 0x46, 0x7d, 0xaf, 0x16, 0x1a, 0x33, 0xa2,
+ 0xc1, 0xfd, 0x44, 0x03, 0xa9, 0x66, 0xa7, 0x84, 0xb7, 0x44, 0x4e, 0x65, 0xf6, 0xe8, 0xe4, 0x41,
+ 0xc6, 0xca, 0x89, 0x3a, 0x19, 0x32, 0xed, 0x21, 0x02, 0xbc, 0xaf, 0xc0, 0xa7, 0x10, 0xc6, 0x63,
+ 0x53, 0x04, 0xde, 0xc6, 0x72, 0xc6, 0x38, 0x9a, 0x31, 0x96, 0xcb, 0xa2, 0x66, 0x8c, 0x77, 0x6c,
+ 0x8f, 0xaa, 0x5a, 0x4b, 0xab, 0x34, 0xff, 0x01, 0xd0, 0x18, 0xee, 0xa1, 0x54, 0xda, 0x81, 0xb9,
+ 0x98, 0x2a, 0x37, 0xc0, 0xc2, 0xcc, 0x62, 0xae, 0x8c, 0xd3, 0x65, 0xda, 0xae, 0xd2, 0x20, 0xf4,
+ 0xf7, 0x7d, 0x5a, 0xd5, 0x24, 0xd7, 0xaf, 0x88, 0x74, 0xd7, 0x60, 0x67, 0x95, 0xee, 0x57, 0xc1,
+ 0x96, 0x70, 0x74, 0xdc, 0x68, 0x05, 0xde, 0x9c, 0x58, 0x5b, 0x55, 0x61, 0xae, 0xc1, 0xb7, 0x24,
+ 0x65, 0x91, 0x36, 0x42, 0xdc, 0x37, 0xe1, 0x9c, 0xbc, 0x22, 0x5e, 0xad, 0x5b, 0x32, 0xb0, 0x5d,
+ 0x35, 0x7f, 0x02, 0xb0, 0x98, 0x56, 0xae, 0x74, 0x7b, 0x06, 0x5f, 0xd5, 0xd6, 0xb3, 0x69, 0x87,
+ 0x35, 0x29, 0xde, 0x9c, 0x75, 0x27, 0x8e, 0xef, 0x44, 0xe1, 0xeb, 0xdd, 0x1f, 0x07, 0x3e, 0x1c,
+ 0x98, 0xad, 0xc4, 0xbc, 0x1b, 0xda, 0x61, 0x7f, 0x1b, 0xd0, 0xfa, 0xc8, 0xb7, 0x54, 0x31, 0xfe,
+ 0x3d, 0x79, 0x90, 0xef, 0xda, 0x8d, 0xfa, 0x8a, 0x99, 0x38, 0x36, 0x07, 0x5e, 0xd9, 0xdf, 0x00,
+ 0x9a, 0x97, 0x35, 0x51, 0x92, 0xb8, 0xf0, 0x9e, 0x7f, 0xb1, 0x1d, 0x7b, 0x4a, 0x5d, 0x1e, 0xa5,
+ 0xa8, 0xe5, 0x7d, 0x3e, 0x9a, 0x9c, 0xb6, 0x52, 0xda, 0xad, 0x77, 0xfd, 0x51, 0xe1, 0xeb, 0x15,
+ 0xf3, 0x17, 0x00, 0x1f, 0x0f, 0x12, 0x8d, 0xa8, 0x05, 0xbc, 0xcd, 0xff, 0x47, 0x41, 0xd1, 0x53,
+ 0x78, 0xa7, 0x45, 0x3b, 0x3e, 0x8f, 0x4e, 0x83, 0x76, 0xc3, 0xa1, 0x2d, 0x41, 0x67, 0xd6, 0xba,
+ 0xdd, 0x0f, 0x7f, 0x2e, 0xa2, 0x89, 0x44, 0x8d, 0x9a, 0x96, 0xa8, 0x90, 0x9f, 0x02, 0xf8, 0xe4,
+ 0x0a, 0xe4, 0x6a, 0x4a, 0xeb, 0x30, 0x5a, 0x50, 0x79, 0x92, 0x98, 0x4e, 0x1e, 0x4b, 0x9b, 0xc6,
+ 0x7d, 0x9b, 0xc6, 0x1f, 0x07, 0x5d, 0xeb, 0xb6, 0x9b, 0xb8, 0x26, 0xf9, 0x6e, 0xb2, 0xc9, 0x77,
+ 0x13, 0x0f, 0x67, 0xe6, 0xb2, 0xe1, 0xcc, 0x4e, 0x35, 0x9c, 0xf2, 0x8f, 0xb7, 0xe0, 0x0d, 0x41,
+ 0x11, 0xfd, 0x0c, 0x20, 0x8c, 0x79, 0xa2, 0x97, 0xe9, 0x6e, 0x35, 0xfa, 0xb7, 0xa5, 0x50, 0x9a,
+ 0xa0, 0x42, 0xca, 0x66, 0xae, 0x7e, 0xf3, 0xfb, 0x5f, 0xdf, 0x65, 0xdf, 0x45, 0x4b, 0xc4, 0x77,
+ 0xdc, 0xcb, 0x7f, 0x13, 0x39, 0x39, 0x4c, 0x4c, 0xbf, 0x87, 0x7e, 0x00, 0x30, 0xa7, 0x99, 0x08,
+ 0x1a, 0xbf, 0x7f, 0xdf, 0xaf, 0x0a, 0xe5, 0x49, 0x4a, 0x14, 0xe6, 0xe7, 0x02, 0xf3, 0x13, 0xf4,
+ 0x68, 0x0c, 0xcc, 0xe8, 0x37, 0x00, 0x5f, 0x1b, 0xb2, 0x3b, 0xf4, 0xfe, 0x55, 0x6d, 0x53, 0xfc,
+ 0xb5, 0xb0, 0x3c, 0x79, 0xa1, 0x42, 0xbd, 0x21, 0x50, 0x2f, 0xa3, 0xf7, 0x52, 0x51, 0xcb, 0xfd,
+ 0x4b, 0x0a, 0xde, 0xdf, 0xc9, 0x1e, 0xfa, 0x13, 0xc0, 0xbb, 0x23, 0x8d, 0x0a, 0xad, 0x8e, 0xad,
+ 0xe1, 0xb0, 0x87, 0x16, 0xd6, 0xa6, 0x2b, 0x56, 0xa4, 0xb6, 0x04, 0xa9, 0x0a, 0xfa, 0x68, 0x8a,
+ 0xf5, 0x21, 0xba, 0x95, 0xa2, 0xef, 0xb3, 0xd0, 0x48, 0x7b, 0xe4, 0x68, 0x63, 0x7c, 0x90, 0xa3,
+ 0x7c, 0xad, 0xf0, 0xe1, 0xd4, 0xf5, 0x8a, 0xe7, 0xd7, 0x82, 0x67, 0x17, 0x7d, 0x35, 0x15, 0xcf,
+ 0xa4, 0x2f, 0x91, 0xbe, 0xc7, 0x91, 0xc3, 0x01, 0xb7, 0xec, 0x11, 0x69, 0x25, 0xda, 0x81, 0x0c,
+ 0xf4, 0x2a, 0xbb, 0x47, 0x67, 0x45, 0x70, 0x7c, 0x56, 0x04, 0xa7, 0x67, 0x45, 0xf0, 0xed, 0x79,
+ 0x31, 0x73, 0x7c, 0x5e, 0xcc, 0xfc, 0x71, 0x5e, 0xcc, 0x7c, 0xf9, 0x81, 0xe7, 0x87, 0xb5, 0xb6,
+ 0x83, 0x5d, 0xd6, 0x20, 0xea, 0x1f, 0x65, 0xdf, 0x71, 0x5f, 0x78, 0x8c, 0x34, 0x58, 0xb5, 0x5d,
+ 0xa7, 0x5c, 0xc2, 0x7d, 0xb9, 0xf4, 0x42, 0x43, 0x1c, 0x76, 0x9b, 0x94, 0x3b, 0x37, 0x85, 0x25,
+ 0x2e, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x44, 0x76, 0x5a, 0x8b, 0xb7, 0x0b, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/03-connection/types/query.pb.gw.go b/modules/core/03-connection/types/query.pb.gw.go
similarity index 100%
rename from core/03-connection/types/query.pb.gw.go
rename to modules/core/03-connection/types/query.pb.gw.go
diff --git a/core/03-connection/types/tx.pb.go b/modules/core/03-connection/types/tx.pb.go
similarity index 92%
rename from core/03-connection/types/tx.pb.go
rename to modules/core/03-connection/types/tx.pb.go
index ca9b87f5..3e849954 100644
--- a/core/03-connection/types/tx.pb.go
+++ b/modules/core/03-connection/types/tx.pb.go
@@ -7,7 +7,7 @@ import (
context "context"
fmt "fmt"
types "github.com/cosmos/cosmos-sdk/codec/types"
- types1 "github.com/cosmos/ibc-go/core/02-client/types"
+ types1 "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
grpc1 "github.com/gogo/protobuf/grpc"
proto "github.com/gogo/protobuf/proto"
@@ -388,65 +388,65 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/connection/v1/tx.proto", fileDescriptor_296ab31199620d78) }
var fileDescriptor_296ab31199620d78 = []byte{
- // 913 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0xeb, 0x44,
- 0x14, 0x8e, 0xf3, 0x9f, 0x49, 0xe0, 0xde, 0x6b, 0x92, 0xd6, 0x84, 0xde, 0x38, 0xb1, 0x04, 0x0a,
- 0x8b, 0x6b, 0x93, 0x16, 0x81, 0x14, 0xc4, 0x22, 0xc9, 0x86, 0x0a, 0x55, 0x54, 0xa6, 0x02, 0x09,
- 0x21, 0x45, 0x89, 0x33, 0x75, 0xac, 0x24, 0x1e, 0xcb, 0x76, 0xa2, 0x5a, 0x48, 0x6c, 0x01, 0x89,
- 0x05, 0x2f, 0x80, 0xd4, 0xb7, 0xe0, 0x15, 0xba, 0xec, 0x92, 0x95, 0x85, 0xda, 0x05, 0xac, 0xfd,
- 0x04, 0xc8, 0x33, 0xb6, 0x63, 0x27, 0xb6, 0x54, 0x93, 0xb2, 0x9b, 0x33, 0xe7, 0x3b, 0xe7, 0xcc,
- 0x9c, 0xf3, 0x7d, 0xa3, 0x01, 0x1d, 0x65, 0x2a, 0xc9, 0x48, 0x90, 0x90, 0x0e, 0x05, 0x09, 0xa9,
- 0x2a, 0x94, 0x4c, 0x05, 0xa9, 0xc2, 0xa6, 0x27, 0x98, 0x37, 0xbc, 0xa6, 0x23, 0x13, 0xd1, 0x0c,
- 0x86, 0xf0, 0x2e, 0x84, 0xdf, 0x42, 0xf8, 0x4d, 0xaf, 0x59, 0x97, 0x91, 0x8c, 0x30, 0x48, 0x70,
- 0x57, 0x04, 0xdf, 0x7c, 0x57, 0x46, 0x48, 0x5e, 0x42, 0x01, 0x5b, 0xd3, 0xf5, 0xb5, 0x30, 0x51,
- 0x2d, 0xcf, 0x15, 0xa9, 0xb6, 0x54, 0xa0, 0x6a, 0xba, 0x95, 0xc8, 0xca, 0x83, 0x7c, 0x98, 0x78,
- 0xa0, 0x50, 0x6d, 0x0c, 0xe5, 0xfe, 0xc8, 0x82, 0xc6, 0x85, 0x21, 0x8f, 0x82, 0xfd, 0xaf, 0x34,
- 0xa8, 0x9e, 0xab, 0x8a, 0x49, 0xf7, 0x40, 0x85, 0x24, 0x1d, 0x2b, 0x33, 0x86, 0x6a, 0x53, 0xdd,
- 0xca, 0xb0, 0xee, 0xd8, 0xec, 0x4b, 0x6b, 0xb2, 0x5a, 0xf6, 0xb9, 0xc0, 0xc5, 0x89, 0x65, 0xb2,
- 0x3e, 0x9f, 0xd1, 0x97, 0xa0, 0x26, 0xa1, 0xb5, 0x6a, 0x42, 0x5d, 0x9b, 0xe8, 0xa6, 0xc5, 0x64,
- 0xdb, 0x54, 0xb7, 0x7a, 0xfa, 0x01, 0x9f, 0x74, 0x79, 0x7e, 0x14, 0x42, 0x0f, 0xf3, 0x77, 0x36,
- 0x9b, 0x11, 0x23, 0x19, 0xe8, 0xcf, 0x40, 0x69, 0x03, 0x75, 0x43, 0x41, 0x2a, 0x93, 0xc3, 0xc9,
- 0x3a, 0xc9, 0xc9, 0xbe, 0x21, 0x40, 0xd1, 0x8f, 0xa0, 0xfb, 0xa0, 0x36, 0x83, 0xcb, 0x89, 0x35,
- 0xd6, 0xa0, 0xae, 0xa0, 0x19, 0x93, 0x6f, 0x53, 0xdd, 0xfc, 0xf0, 0xd8, 0xb1, 0xd9, 0x77, 0xc8,
- 0x25, 0xc2, 0x5e, 0x4e, 0xac, 0x62, 0xf3, 0x12, 0x5b, 0xf4, 0x11, 0x28, 0x1a, 0x8a, 0xac, 0x42,
- 0x9d, 0x29, 0xb8, 0x57, 0x17, 0x3d, 0xab, 0x5f, 0xfe, 0xf9, 0x96, 0xcd, 0xfc, 0x73, 0xcb, 0x66,
- 0x38, 0x16, 0xbc, 0x8e, 0x6d, 0x9c, 0x08, 0x0d, 0x0d, 0xa9, 0x06, 0xe4, 0x7e, 0x2f, 0x81, 0xfa,
- 0x1e, 0xe2, 0x4a, 0xb7, 0xfe, 0x4b, 0x67, 0xbf, 0x05, 0x47, 0x9a, 0x0e, 0x37, 0x0a, 0x5a, 0x1b,
- 0xe3, 0xed, 0xad, 0xdd, 0xf8, 0x2c, 0x8e, 0xef, 0x38, 0x36, 0xfb, 0x9a, 0xc4, 0xc7, 0xe3, 0x38,
- 0xb1, 0xee, 0x3b, 0xb6, 0x07, 0xf2, 0x46, 0x46, 0x0a, 0x1a, 0xe6, 0xc4, 0x84, 0x5e, 0x97, 0xeb,
- 0x3c, 0xe1, 0x1f, 0xef, 0xf3, 0x8f, 0x1f, 0xa8, 0x56, 0xb8, 0x73, 0xe1, 0x18, 0x4e, 0xac, 0x12,
- 0xf3, 0x6b, 0xd7, 0xda, 0x23, 0x41, 0xfe, 0x60, 0x12, 0xec, 0xce, 0xb1, 0x90, 0x62, 0x8e, 0x37,
- 0xa0, 0x11, 0xce, 0x35, 0xf6, 0xb8, 0x61, 0x30, 0xc5, 0x76, 0xee, 0x49, 0x74, 0x1a, 0xb6, 0x1d,
- 0x9b, 0x3d, 0xf1, 0x6e, 0x1d, 0x97, 0x89, 0x13, 0xeb, 0xe1, 0x7d, 0x2f, 0xcc, 0xa0, 0xbf, 0x07,
- 0x35, 0x4d, 0x47, 0xe8, 0x7a, 0x3c, 0x87, 0x8a, 0x3c, 0x37, 0x99, 0x12, 0xee, 0xc3, 0x49, 0xa4,
- 0x20, 0x11, 0xed, 0xa6, 0xc7, 0x7f, 0x81, 0x31, 0xc3, 0xf7, 0xdc, 0xdb, 0x6f, 0xef, 0x15, 0x8e,
- 0xe7, 0xc4, 0x2a, 0x36, 0x09, 0x92, 0xfe, 0x18, 0x00, 0xe2, 0x55, 0x54, 0xc5, 0x64, 0xca, 0x6d,
- 0xaa, 0x5b, 0x1b, 0x36, 0x1c, 0x9b, 0x7d, 0x15, 0x8e, 0x74, 0x7d, 0x9c, 0x58, 0xc1, 0x06, 0xd6,
- 0x74, 0xdf, 0x3f, 0x13, 0xa9, 0xcc, 0x54, 0x70, 0xdc, 0xf1, 0x6e, 0x45, 0xe2, 0xf5, 0x2b, 0x8e,
- 0xb0, 0x45, 0x8f, 0xc0, 0x0b, 0xcf, 0xeb, 0xb2, 0x5b, 0x35, 0xd6, 0x06, 0x03, 0x70, 0x78, 0xd3,
- 0xb1, 0xd9, 0xa3, 0x48, 0xb8, 0x0f, 0xe0, 0xc4, 0xb7, 0x49, 0x06, 0x7f, 0x83, 0x9e, 0x83, 0x97,
- 0x81, 0xd7, 0x6f, 0x4c, 0xf5, 0x09, 0x8d, 0x61, 0xbd, 0xc6, 0x1c, 0xfb, 0x83, 0x88, 0xe6, 0xe0,
- 0xc4, 0x17, 0xc1, 0x96, 0xd7, 0xa0, 0xad, 0x80, 0x6b, 0x09, 0x02, 0x6e, 0x81, 0x93, 0x38, 0x79,
- 0x06, 0xfa, 0xfd, 0xbb, 0x10, 0xa3, 0xdf, 0x81, 0xb4, 0xa0, 0x3f, 0x07, 0x6f, 0x45, 0x35, 0x48,
- 0x34, 0xcc, 0x38, 0x36, 0x5b, 0x0f, 0xce, 0x17, 0x96, 0x5e, 0x4d, 0x0a, 0x4b, 0x4e, 0x02, 0xcd,
- 0x08, 0x91, 0xe2, 0xf4, 0xfc, 0xbe, 0x63, 0xb3, 0x9d, 0x18, 0xd2, 0xed, 0x24, 0x66, 0xc2, 0xce,
- 0x88, 0xae, 0x0f, 0x7a, 0x38, 0x77, 0x1f, 0x85, 0xfc, 0xc1, 0x8f, 0xc2, 0xae, 0x18, 0x0a, 0xcf,
- 0x2a, 0x86, 0x1e, 0x20, 0x1c, 0x1f, 0x9b, 0xba, 0xc5, 0x14, 0x31, 0x29, 0x43, 0x0f, 0x6a, 0xe0,
- 0xe2, 0xc4, 0x32, 0x5e, 0xbb, 0x6f, 0xf0, 0xae, 0x12, 0x4a, 0x87, 0x29, 0xa1, 0xfc, 0x2c, 0x4a,
- 0xa8, 0xfc, 0xcf, 0x4a, 0x00, 0x29, 0x94, 0x30, 0x90, 0x16, 0x81, 0x12, 0x7e, 0xcd, 0x02, 0x66,
- 0x0f, 0x30, 0x42, 0xea, 0xb5, 0xa2, 0xaf, 0x0e, 0x55, 0x43, 0x30, 0xbb, 0x89, 0xb4, 0xc0, 0xe4,
- 0x8f, 0x99, 0xdd, 0x44, 0x5a, 0xf8, 0xb3, 0x73, 0xf5, 0xb7, 0x4b, 0xa6, 0xdc, 0xb3, 0x92, 0x69,
- 0xdb, 0xae, 0x7c, 0x42, 0xbb, 0x38, 0xd0, 0x4e, 0xea, 0x86, 0xdf, 0xb2, 0xd3, 0x5f, 0xf2, 0x20,
- 0x77, 0x61, 0xc8, 0xf4, 0x8f, 0x80, 0x8e, 0xf9, 0x5b, 0x09, 0xc9, 0x62, 0x8c, 0xfd, 0x53, 0x34,
- 0x3f, 0x4d, 0x19, 0xe0, 0x9f, 0x83, 0xfe, 0x01, 0xbc, 0xda, 0xff, 0x80, 0xf0, 0x29, 0xb2, 0x5d,
- 0xe9, 0x56, 0xf3, 0x93, 0x74, 0xf8, 0xe4, 0xe2, 0xee, 0xf4, 0xd2, 0x14, 0x1f, 0x48, 0x8b, 0x54,
- 0xc5, 0x43, 0xa4, 0xa5, 0x7f, 0xa2, 0x40, 0x23, 0x9e, 0xb1, 0xa7, 0x29, 0x32, 0x7a, 0x31, 0xcd,
- 0x7e, 0xfa, 0x18, 0xff, 0x24, 0xc3, 0x2f, 0xef, 0x1e, 0x5a, 0xd4, 0xfd, 0x43, 0x8b, 0xfa, 0xeb,
- 0xa1, 0x45, 0xfd, 0xf6, 0xd8, 0xca, 0xdc, 0x3f, 0xb6, 0x32, 0x7f, 0x3e, 0xb6, 0x32, 0xdf, 0xf5,
- 0x64, 0xc5, 0x9c, 0xaf, 0xa7, 0xbc, 0x84, 0x56, 0x82, 0x84, 0x8c, 0x15, 0x32, 0x04, 0x65, 0x2a,
- 0xbd, 0xf1, 0xff, 0xee, 0x1f, 0x9d, 0xbd, 0x09, 0x7d, 0xdf, 0x4d, 0x4b, 0x83, 0xc6, 0xb4, 0x88,
- 0x5f, 0xdf, 0xb3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x0e, 0x60, 0x2e, 0x75, 0x0c, 0x00,
- 0x00,
+ // 922 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x8f, 0xdb, 0x44,
+ 0x14, 0x8f, 0xf3, 0x3f, 0x93, 0x40, 0x5b, 0x93, 0xec, 0x9a, 0xb0, 0x8d, 0x13, 0x4b, 0xa0, 0x70,
+ 0xa8, 0x4d, 0xb6, 0x08, 0x44, 0x10, 0x87, 0x24, 0x17, 0x7a, 0xa8, 0xa8, 0xdc, 0x0a, 0x24, 0x84,
+ 0x14, 0x25, 0xce, 0xac, 0x63, 0x25, 0xf1, 0x58, 0x1e, 0x27, 0xaa, 0x85, 0xc4, 0x15, 0x90, 0x38,
+ 0xf0, 0x05, 0x90, 0xfa, 0x2d, 0xf8, 0x0a, 0x3d, 0xf6, 0xc8, 0xc9, 0x42, 0xbb, 0x07, 0x38, 0xfb,
+ 0x13, 0x20, 0xcf, 0xd8, 0x8e, 0x9d, 0xd8, 0xd2, 0x9a, 0x6c, 0x6f, 0xf3, 0xe6, 0xfd, 0xde, 0x7b,
+ 0x33, 0xef, 0xfd, 0x7e, 0xa3, 0x01, 0x3d, 0x6d, 0xae, 0xa8, 0x48, 0x52, 0x90, 0x09, 0x25, 0x05,
+ 0xe9, 0x3a, 0x54, 0x2c, 0x0d, 0xe9, 0xd2, 0x6e, 0x20, 0x59, 0x2f, 0x45, 0xc3, 0x44, 0x16, 0x62,
+ 0x39, 0x02, 0x11, 0x3d, 0x88, 0xb8, 0x87, 0x88, 0xbb, 0x41, 0xbb, 0xa9, 0x22, 0x15, 0x11, 0x90,
+ 0xe4, 0xad, 0x28, 0xbe, 0xfd, 0xbe, 0x8a, 0x90, 0xba, 0x86, 0x12, 0xb1, 0xe6, 0xdb, 0x2b, 0x69,
+ 0xa6, 0xdb, 0xbe, 0x2b, 0x56, 0x6d, 0xad, 0x41, 0xdd, 0xf2, 0x2a, 0xd1, 0x95, 0x0f, 0xf9, 0x38,
+ 0xf5, 0x40, 0x91, 0xda, 0x04, 0x2a, 0xfc, 0x99, 0x07, 0xad, 0xa7, 0x58, 0x9d, 0x84, 0xfb, 0xdf,
+ 0x18, 0x50, 0x7f, 0xa2, 0x6b, 0x16, 0x3b, 0x00, 0x35, 0x9a, 0x74, 0xaa, 0x2d, 0x38, 0xa6, 0xcb,
+ 0xf4, 0x6b, 0xe3, 0xa6, 0xeb, 0xf0, 0xf7, 0xed, 0xd9, 0x66, 0x3d, 0x14, 0x42, 0x97, 0x20, 0x57,
+ 0xe9, 0xfa, 0xc9, 0x82, 0x7d, 0x06, 0x1a, 0x0a, 0xda, 0xea, 0x16, 0x34, 0x8d, 0x99, 0x69, 0xd9,
+ 0x5c, 0xbe, 0xcb, 0xf4, 0xeb, 0x97, 0x1f, 0x89, 0x69, 0x97, 0x17, 0x27, 0x11, 0xf4, 0xb8, 0xf8,
+ 0xda, 0xe1, 0x73, 0x72, 0x2c, 0x03, 0xfb, 0x25, 0xa8, 0xec, 0xa0, 0x89, 0x35, 0xa4, 0x73, 0x05,
+ 0x92, 0xac, 0x97, 0x9e, 0xec, 0x5b, 0x0a, 0x94, 0x83, 0x08, 0x76, 0x08, 0x1a, 0x0b, 0xb8, 0x9e,
+ 0xd9, 0x53, 0x03, 0x9a, 0x1a, 0x5a, 0x70, 0xc5, 0x2e, 0xd3, 0x2f, 0x8e, 0xcf, 0x5d, 0x87, 0x7f,
+ 0x8f, 0x5e, 0x22, 0xea, 0x15, 0xe4, 0x3a, 0x31, 0x9f, 0x11, 0x8b, 0x3d, 0x03, 0x65, 0xac, 0xa9,
+ 0x3a, 0x34, 0xb9, 0x92, 0x77, 0x75, 0xd9, 0xb7, 0x86, 0xd5, 0x5f, 0x5e, 0xf1, 0xb9, 0x7f, 0x5f,
+ 0xf1, 0x39, 0x81, 0x07, 0x0f, 0x13, 0x1b, 0x27, 0x43, 0x6c, 0x20, 0x1d, 0x43, 0xe1, 0x8f, 0x0a,
+ 0x68, 0x1e, 0x21, 0x5e, 0x98, 0xf6, 0xff, 0xe9, 0xec, 0x77, 0xe0, 0xcc, 0x30, 0xe1, 0x4e, 0x43,
+ 0x5b, 0x3c, 0xdd, 0xdf, 0xda, 0x8b, 0xcf, 0x93, 0xf8, 0x9e, 0xeb, 0xf0, 0x0f, 0x69, 0x7c, 0x32,
+ 0x4e, 0x90, 0x9b, 0x81, 0x63, 0x7f, 0x20, 0x7f, 0x64, 0xb4, 0x20, 0xb6, 0x66, 0x16, 0xf4, 0xbb,
+ 0xdc, 0x14, 0x29, 0xff, 0xc4, 0x80, 0x7f, 0xe2, 0x48, 0xb7, 0xa3, 0x9d, 0x8b, 0xc6, 0x08, 0x72,
+ 0x9d, 0x9a, 0xcf, 0x3d, 0xeb, 0x88, 0x04, 0xc5, 0x93, 0x49, 0x70, 0x38, 0xc7, 0x52, 0x86, 0x39,
+ 0xbe, 0x04, 0xad, 0x68, 0xae, 0xa9, 0xcf, 0x0d, 0xcc, 0x95, 0xbb, 0x85, 0x5b, 0xd1, 0x69, 0xdc,
+ 0x75, 0x1d, 0xfe, 0xc2, 0xbf, 0x75, 0x52, 0x26, 0x41, 0x6e, 0x46, 0xf7, 0xfd, 0x30, 0xcc, 0xfe,
+ 0x00, 0x1a, 0x86, 0x89, 0xd0, 0xd5, 0x74, 0x09, 0x35, 0x75, 0x69, 0x71, 0x15, 0xd2, 0x87, 0x8b,
+ 0x58, 0x41, 0x2a, 0xda, 0xdd, 0x40, 0xfc, 0x9a, 0x60, 0xc6, 0x1f, 0x78, 0xb7, 0xdf, 0xdf, 0x2b,
+ 0x1a, 0x2f, 0xc8, 0x75, 0x62, 0x52, 0x24, 0xfb, 0x29, 0x00, 0xd4, 0xab, 0xe9, 0x9a, 0xc5, 0x55,
+ 0xbb, 0x4c, 0xbf, 0x31, 0x6e, 0xb9, 0x0e, 0xff, 0x20, 0x1a, 0xe9, 0xf9, 0x04, 0xb9, 0x46, 0x0c,
+ 0xa2, 0xe9, 0x61, 0x70, 0x26, 0x5a, 0x99, 0xab, 0x91, 0xb8, 0xf3, 0xc3, 0x8a, 0xd4, 0x1b, 0x54,
+ 0x9c, 0x10, 0x8b, 0x9d, 0x80, 0x7b, 0xbe, 0xd7, 0x63, 0xb7, 0x8e, 0xb7, 0x98, 0x03, 0x24, 0xbc,
+ 0xed, 0x3a, 0xfc, 0x59, 0x2c, 0x3c, 0x00, 0x08, 0xf2, 0xbb, 0x34, 0x43, 0xb0, 0xc1, 0x2e, 0xc1,
+ 0xfd, 0xd0, 0x1b, 0x34, 0xa6, 0x7e, 0x8b, 0xc6, 0xf0, 0x7e, 0x63, 0xce, 0x83, 0x41, 0xc4, 0x73,
+ 0x08, 0xf2, 0xbd, 0x70, 0xcb, 0x6f, 0xd0, 0x5e, 0xc0, 0x8d, 0x14, 0x01, 0x77, 0xc0, 0x45, 0x92,
+ 0x3c, 0x43, 0xfd, 0xfe, 0x53, 0x4a, 0xd0, 0xef, 0x48, 0x59, 0xb1, 0x5f, 0x81, 0x77, 0xe2, 0x1a,
+ 0xa4, 0x1a, 0xe6, 0x5c, 0x87, 0x6f, 0x86, 0xe7, 0x8b, 0x4a, 0xaf, 0xa1, 0x44, 0x25, 0xa7, 0x80,
+ 0x76, 0x8c, 0x48, 0x49, 0x7a, 0xfe, 0xd0, 0x75, 0xf8, 0x5e, 0x02, 0xe9, 0x0e, 0x12, 0x73, 0x51,
+ 0x67, 0x4c, 0xd7, 0x27, 0x3d, 0x9c, 0x87, 0x8f, 0x42, 0xf1, 0xe4, 0x47, 0xe1, 0x50, 0x0c, 0xa5,
+ 0x3b, 0x15, 0xc3, 0x00, 0x50, 0x8e, 0x4f, 0x2d, 0xd3, 0xe6, 0xca, 0x84, 0x94, 0x91, 0x07, 0x35,
+ 0x74, 0x09, 0x72, 0x95, 0xac, 0xbd, 0x37, 0xf8, 0x50, 0x09, 0x95, 0xd3, 0x94, 0x50, 0xbd, 0x13,
+ 0x25, 0xd4, 0xde, 0xb2, 0x12, 0x40, 0x06, 0x25, 0x8c, 0x94, 0x55, 0xa8, 0x84, 0xdf, 0xf2, 0x80,
+ 0x3b, 0x02, 0x4c, 0x90, 0x7e, 0xa5, 0x99, 0x9b, 0x53, 0xd5, 0x10, 0xce, 0x6e, 0xa6, 0xac, 0x08,
+ 0xf9, 0x13, 0x66, 0x37, 0x53, 0x56, 0xc1, 0xec, 0x3c, 0xfd, 0x1d, 0x92, 0xa9, 0x70, 0xa7, 0x64,
+ 0xda, 0xb7, 0xab, 0x98, 0xd2, 0x2e, 0x01, 0x74, 0xd3, 0xba, 0x11, 0xb4, 0xec, 0xf2, 0xd7, 0x22,
+ 0x28, 0x3c, 0xc5, 0x2a, 0xfb, 0x13, 0x60, 0x13, 0xfe, 0x56, 0x52, 0xba, 0x18, 0x13, 0xff, 0x14,
+ 0xed, 0xcf, 0x33, 0x06, 0x04, 0xe7, 0x60, 0x7f, 0x04, 0x0f, 0x8e, 0x3f, 0x20, 0x62, 0x86, 0x6c,
+ 0x2f, 0x4c, 0xbb, 0xfd, 0x59, 0x36, 0x7c, 0x7a, 0x71, 0x6f, 0x7a, 0x59, 0x8a, 0x8f, 0x94, 0x55,
+ 0xa6, 0xe2, 0x11, 0xd2, 0xb2, 0x3f, 0x33, 0xa0, 0x95, 0xcc, 0xd8, 0xcb, 0x0c, 0x19, 0xfd, 0x98,
+ 0xf6, 0x30, 0x7b, 0x4c, 0x70, 0x92, 0xf1, 0xf3, 0xd7, 0xd7, 0x1d, 0xe6, 0xcd, 0x75, 0x87, 0xf9,
+ 0xfb, 0xba, 0xc3, 0xfc, 0x7e, 0xd3, 0xc9, 0xbd, 0xb9, 0xe9, 0xe4, 0xfe, 0xba, 0xe9, 0xe4, 0xbe,
+ 0xff, 0x42, 0xd5, 0xac, 0xe5, 0x76, 0x2e, 0x2a, 0x68, 0x23, 0x29, 0x08, 0x6f, 0x10, 0x96, 0xb4,
+ 0xb9, 0xf2, 0x48, 0x45, 0xd2, 0x06, 0x2d, 0xb6, 0x6b, 0x88, 0xe9, 0x1f, 0xfe, 0x93, 0xc7, 0x8f,
+ 0x22, 0xdf, 0x78, 0xcb, 0x36, 0x20, 0x9e, 0x97, 0xc9, 0x2b, 0xfc, 0xf8, 0xbf, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x49, 0xfc, 0x07, 0xd9, 0x7d, 0x0c, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/03-connection/types/version.go b/modules/core/03-connection/types/version.go
similarity index 99%
rename from core/03-connection/types/version.go
rename to modules/core/03-connection/types/version.go
index 97f1a11a..cf0d29dd 100644
--- a/core/03-connection/types/version.go
+++ b/modules/core/03-connection/types/version.go
@@ -4,7 +4,7 @@ import (
"strings"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/03-connection/types/version_test.go b/modules/core/03-connection/types/version_test.go
similarity index 98%
rename from core/03-connection/types/version_test.go
rename to modules/core/03-connection/types/version_test.go
index cf0e73da..d50d6929 100644
--- a/core/03-connection/types/version_test.go
+++ b/modules/core/03-connection/types/version_test.go
@@ -5,8 +5,8 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/04-channel/client/cli/cli.go b/modules/core/04-channel/client/cli/cli.go
similarity index 96%
rename from core/04-channel/client/cli/cli.go
rename to modules/core/04-channel/client/cli/cli.go
index 9c0ccb42..2786f233 100644
--- a/core/04-channel/client/cli/cli.go
+++ b/modules/core/04-channel/client/cli/cli.go
@@ -4,7 +4,7 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// GetQueryCmd returns the query commands for IBC channels
diff --git a/core/04-channel/client/cli/query.go b/modules/core/04-channel/client/cli/query.go
similarity index 98%
rename from core/04-channel/client/cli/query.go
rename to modules/core/04-channel/client/cli/query.go
index e86e20c3..145cbd65 100644
--- a/core/04-channel/client/cli/query.go
+++ b/modules/core/04-channel/client/cli/query.go
@@ -9,9 +9,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/version"
- "github.com/cosmos/ibc-go/core/04-channel/client/utils"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/client/utils"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/core/04-channel/client/cli/tx.go b/modules/core/04-channel/client/cli/tx.go
similarity index 96%
rename from core/04-channel/client/cli/tx.go
rename to modules/core/04-channel/client/cli/tx.go
index f3ebaadc..fb6adede 100644
--- a/core/04-channel/client/cli/tx.go
+++ b/modules/core/04-channel/client/cli/tx.go
@@ -10,10 +10,10 @@ import (
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/tx"
"github.com/cosmos/cosmos-sdk/types/msgservice"
- ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectionutils "github.com/cosmos/ibc-go/core/03-connection/client/utils"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectionutils "github.com/cosmos/ibc-go/modules/core/03-connection/client/utils"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// IBC Channel flags
diff --git a/core/04-channel/client/utils/utils.go b/modules/core/04-channel/client/utils/utils.go
similarity index 96%
rename from core/04-channel/client/utils/utils.go
rename to modules/core/04-channel/client/utils/utils.go
index ab58bea7..f1384ad7 100644
--- a/core/04-channel/client/utils/utils.go
+++ b/modules/core/04-channel/client/utils/utils.go
@@ -7,12 +7,12 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clientutils "github.com/cosmos/ibc-go/core/02-client/client/utils"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- ibcclient "github.com/cosmos/ibc-go/core/client"
- "github.com/cosmos/ibc-go/core/exported"
+ clientutils "github.com/cosmos/ibc-go/modules/core/02-client/client/utils"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/modules/core/client"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// QueryChannel returns a channel end.
diff --git a/core/04-channel/genesis.go b/modules/core/04-channel/genesis.go
similarity index 93%
rename from core/04-channel/genesis.go
rename to modules/core/04-channel/genesis.go
index 9564e581..b86a2112 100644
--- a/core/04-channel/genesis.go
+++ b/modules/core/04-channel/genesis.go
@@ -2,8 +2,8 @@ package channel
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/04-channel/keeper"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/keeper"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// InitGenesis initializes the ibc channel submodule's state from a provided genesis
diff --git a/core/04-channel/handler.go b/modules/core/04-channel/handler.go
similarity index 98%
rename from core/04-channel/handler.go
rename to modules/core/04-channel/handler.go
index 59d29f09..2c222a2e 100644
--- a/core/04-channel/handler.go
+++ b/modules/core/04-channel/handler.go
@@ -4,8 +4,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/ibc-go/core/04-channel/keeper"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/keeper"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// HandleMsgChannelOpenInit defines the sdk.Handler for MsgChannelOpenInit
diff --git a/core/04-channel/keeper/grpc_query.go b/modules/core/04-channel/keeper/grpc_query.go
similarity index 98%
rename from core/04-channel/keeper/grpc_query.go
rename to modules/core/04-channel/keeper/grpc_query.go
index 689d4f28..d7c29a4f 100644
--- a/core/04-channel/keeper/grpc_query.go
+++ b/modules/core/04-channel/keeper/grpc_query.go
@@ -12,10 +12,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
var _ types.QueryServer = (*Keeper)(nil)
diff --git a/core/04-channel/keeper/grpc_query_test.go b/modules/core/04-channel/keeper/grpc_query_test.go
similarity index 99%
rename from core/04-channel/keeper/grpc_query_test.go
rename to modules/core/04-channel/keeper/grpc_query_test.go
index 9e0a7696..c8af41fe 100644
--- a/core/04-channel/keeper/grpc_query_test.go
+++ b/modules/core/04-channel/keeper/grpc_query_test.go
@@ -5,10 +5,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/query"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/04-channel/keeper/handshake.go b/modules/core/04-channel/keeper/handshake.go
similarity index 98%
rename from core/04-channel/keeper/handshake.go
rename to modules/core/04-channel/keeper/handshake.go
index a3f8a238..2dcfdbed 100644
--- a/core/04-channel/keeper/handshake.go
+++ b/modules/core/04-channel/keeper/handshake.go
@@ -5,11 +5,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- porttypes "github.com/cosmos/ibc-go/core/05-port/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CounterpartyHops returns the connection hops of the counterparty channel.
diff --git a/core/04-channel/keeper/handshake_test.go b/modules/core/04-channel/keeper/handshake_test.go
similarity index 99%
rename from core/04-channel/keeper/handshake_test.go
rename to modules/core/04-channel/keeper/handshake_test.go
index 64a49f7e..a79de391 100644
--- a/core/04-channel/keeper/handshake_test.go
+++ b/modules/core/04-channel/keeper/handshake_test.go
@@ -4,11 +4,11 @@ import (
"fmt"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/04-channel/keeper/keeper.go b/modules/core/04-channel/keeper/keeper.go
similarity index 97%
rename from core/04-channel/keeper/keeper.go
rename to modules/core/04-channel/keeper/keeper.go
index 930a6ec4..3ffe8ed4 100644
--- a/core/04-channel/keeper/keeper.go
+++ b/modules/core/04-channel/keeper/keeper.go
@@ -12,12 +12,12 @@ import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- porttypes "github.com/cosmos/ibc-go/core/05-port/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// Keeper defines the IBC channel keeper
diff --git a/core/04-channel/keeper/keeper_test.go b/modules/core/04-channel/keeper/keeper_test.go
similarity index 99%
rename from core/04-channel/keeper/keeper_test.go
rename to modules/core/04-channel/keeper/keeper_test.go
index 7bc07190..531cbb50 100644
--- a/core/04-channel/keeper/keeper_test.go
+++ b/modules/core/04-channel/keeper/keeper_test.go
@@ -5,8 +5,8 @@ import (
"github.com/stretchr/testify/suite"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
similarity index 98%
rename from core/04-channel/keeper/packet.go
rename to modules/core/04-channel/keeper/packet.go
index 3cedbfe4..957a5a2f 100644
--- a/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -8,11 +8,11 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// SendPacket is called by a module in order to send an IBC packet on a channel
diff --git a/core/04-channel/keeper/packet_test.go b/modules/core/04-channel/keeper/packet_test.go
similarity index 99%
rename from core/04-channel/keeper/packet_test.go
rename to modules/core/04-channel/keeper/packet_test.go
index 91743d61..aa9bc316 100644
--- a/core/04-channel/keeper/packet_test.go
+++ b/modules/core/04-channel/keeper/packet_test.go
@@ -4,11 +4,11 @@ import (
"fmt"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/core/04-channel/keeper/timeout.go b/modules/core/04-channel/keeper/timeout.go
similarity index 97%
rename from core/04-channel/keeper/timeout.go
rename to modules/core/04-channel/keeper/timeout.go
index 2b6e65ea..1f6357d3 100644
--- a/core/04-channel/keeper/timeout.go
+++ b/modules/core/04-channel/keeper/timeout.go
@@ -7,10 +7,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// TimeoutPacket is called by a module which originally attempted to send a
diff --git a/core/04-channel/keeper/timeout_test.go b/modules/core/04-channel/keeper/timeout_test.go
similarity index 98%
rename from core/04-channel/keeper/timeout_test.go
rename to modules/core/04-channel/keeper/timeout_test.go
index b7a34c73..4c286690 100644
--- a/core/04-channel/keeper/timeout_test.go
+++ b/modules/core/04-channel/keeper/timeout_test.go
@@ -4,10 +4,10 @@ import (
"fmt"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/04-channel/module.go b/modules/core/04-channel/module.go
similarity index 83%
rename from core/04-channel/module.go
rename to modules/core/04-channel/module.go
index 6a9aceac..bb002607 100644
--- a/core/04-channel/module.go
+++ b/modules/core/04-channel/module.go
@@ -4,8 +4,8 @@ import (
"github.com/gogo/protobuf/grpc"
"github.com/spf13/cobra"
- "github.com/cosmos/ibc-go/core/04-channel/client/cli"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/client/cli"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// Name returns the IBC channel ICS name.
diff --git a/core/04-channel/simulation/decoder.go b/modules/core/04-channel/simulation/decoder.go
similarity index 93%
rename from core/04-channel/simulation/decoder.go
rename to modules/core/04-channel/simulation/decoder.go
index efdcf589..d96e95f3 100644
--- a/core/04-channel/simulation/decoder.go
+++ b/modules/core/04-channel/simulation/decoder.go
@@ -7,8 +7,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
diff --git a/core/04-channel/simulation/decoder_test.go b/modules/core/04-channel/simulation/decoder_test.go
similarity index 92%
rename from core/04-channel/simulation/decoder_test.go
rename to modules/core/04-channel/simulation/decoder_test.go
index d94b5606..5b6b83d2 100644
--- a/core/04-channel/simulation/decoder_test.go
+++ b/modules/core/04-channel/simulation/decoder_test.go
@@ -9,9 +9,9 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
- "github.com/cosmos/ibc-go/core/04-channel/simulation"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/simulation"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/04-channel/simulation/genesis.go b/modules/core/04-channel/simulation/genesis.go
similarity index 83%
rename from core/04-channel/simulation/genesis.go
rename to modules/core/04-channel/simulation/genesis.go
index 10850758..7e48f59b 100644
--- a/core/04-channel/simulation/genesis.go
+++ b/modules/core/04-channel/simulation/genesis.go
@@ -4,7 +4,7 @@ import (
"math/rand"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// GenChannelGenesis returns the default channel genesis state.
diff --git a/core/04-channel/types/acknowledgement.go b/modules/core/04-channel/types/acknowledgement.go
similarity index 100%
rename from core/04-channel/types/acknowledgement.go
rename to modules/core/04-channel/types/acknowledgement.go
diff --git a/core/04-channel/types/acknowledgement_test.go b/modules/core/04-channel/types/acknowledgement_test.go
similarity index 94%
rename from core/04-channel/types/acknowledgement_test.go
rename to modules/core/04-channel/types/acknowledgement_test.go
index 6906540b..fa286d06 100644
--- a/core/04-channel/types/acknowledgement_test.go
+++ b/modules/core/04-channel/types/acknowledgement_test.go
@@ -1,6 +1,6 @@
package types_test
-import "github.com/cosmos/ibc-go/core/04-channel/types"
+import "github.com/cosmos/ibc-go/modules/core/04-channel/types"
// tests acknowledgement.ValidateBasic and acknowledgement.GetBytes
func (suite TypesTestSuite) TestAcknowledgement() {
diff --git a/core/04-channel/types/channel.go b/modules/core/04-channel/types/channel.go
similarity index 97%
rename from core/04-channel/types/channel.go
rename to modules/core/04-channel/types/channel.go
index a902d46e..e1ee0be7 100644
--- a/core/04-channel/types/channel.go
+++ b/modules/core/04-channel/types/channel.go
@@ -2,8 +2,8 @@ package types
import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/04-channel/types/channel.pb.go b/modules/core/04-channel/types/channel.pb.go
similarity index 90%
rename from core/04-channel/types/channel.pb.go
rename to modules/core/04-channel/types/channel.pb.go
index dada2008..88da8ebd 100644
--- a/core/04-channel/types/channel.pb.go
+++ b/modules/core/04-channel/types/channel.pb.go
@@ -5,7 +5,7 @@ package types
import (
fmt "fmt"
- types "github.com/cosmos/ibc-go/core/02-client/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
@@ -457,64 +457,65 @@ func init() {
}
var fileDescriptor_3a7a8797f9808eee = []byte{
- // 904 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6e, 0xe3, 0x46,
- 0x10, 0x16, 0x65, 0xea, 0x6f, 0x64, 0xc9, 0xf2, 0x26, 0xd6, 0x31, 0x8c, 0x4f, 0x54, 0x78, 0x29,
- 0x8c, 0x0b, 0x4e, 0x8a, 0x9d, 0x43, 0x12, 0x5c, 0x15, 0x4b, 0xe2, 0x41, 0x04, 0x2e, 0x92, 0x41,
- 0xcb, 0x45, 0xae, 0x11, 0x28, 0x72, 0x23, 0x11, 0x27, 0x71, 0x15, 0x72, 0x65, 0xc3, 0x65, 0xba,
- 0x83, 0xaa, 0xbc, 0x80, 0x80, 0x00, 0x01, 0xd2, 0xe4, 0x01, 0xf2, 0x0a, 0x57, 0x5e, 0x99, 0x8a,
- 0x08, 0xec, 0x3a, 0x8d, 0x9e, 0x20, 0xe0, 0x2e, 0xa9, 0xbf, 0xcb, 0x4f, 0x97, 0x2a, 0x95, 0x76,
- 0xe6, 0xfb, 0x66, 0xe6, 0xdb, 0x99, 0x11, 0x17, 0x1e, 0x39, 0x03, 0x6b, 0x48, 0xea, 0x16, 0xf1,
- 0x70, 0xdd, 0x1a, 0x99, 0xae, 0x8b, 0xc7, 0xf5, 0xeb, 0xd3, 0xf8, 0x58, 0x9b, 0x7a, 0x84, 0x12,
- 0x74, 0xc4, 0x48, 0xb5, 0x90, 0x54, 0x8b, 0x91, 0xeb, 0x53, 0xf9, 0xfd, 0x21, 0x19, 0x12, 0xc6,
- 0xa8, 0x87, 0x27, 0x4e, 0x96, 0x3f, 0xda, 0xcc, 0x38, 0x76, 0xb0, 0x4b, 0x59, 0x42, 0x76, 0xe2,
- 0x14, 0xf5, 0x97, 0x24, 0x64, 0x9a, 0x3c, 0x0f, 0x3a, 0x83, 0x94, 0x4f, 0x4d, 0x8a, 0x25, 0xa1,
- 0x2a, 0x9c, 0x14, 0xcf, 0x8e, 0x6b, 0x7f, 0x59, 0xab, 0x76, 0x19, 0x72, 0x0c, 0x4e, 0x45, 0x5f,
- 0x42, 0x96, 0x78, 0x36, 0xf6, 0x1c, 0x77, 0x28, 0x25, 0xff, 0x31, 0xac, 0x1b, 0xd2, 0x8c, 0x15,
- 0x1b, 0x7d, 0x0d, 0xfb, 0x16, 0x99, 0xb9, 0x14, 0x7b, 0x53, 0xd3, 0xa3, 0xb7, 0xd2, 0x5e, 0x55,
- 0x38, 0xc9, 0x9f, 0x3d, 0xfa, 0x9b, 0xe8, 0xe6, 0x06, 0xb5, 0x21, 0xbe, 0x09, 0x94, 0x84, 0xb1,
- 0x15, 0x8e, 0x9a, 0x70, 0x60, 0x11, 0xd7, 0xc5, 0x16, 0x75, 0x88, 0xdb, 0x1f, 0x91, 0xa9, 0x2f,
- 0x89, 0xd5, 0xbd, 0x93, 0x5c, 0x43, 0x5e, 0x06, 0x4a, 0xf9, 0xd6, 0x9c, 0x8c, 0x9f, 0xa9, 0x3b,
- 0x04, 0xd5, 0x28, 0xae, 0x3d, 0x6d, 0x32, 0xf5, 0x91, 0x04, 0x99, 0x6b, 0xec, 0xf9, 0x0e, 0x71,
- 0xa5, 0x54, 0x55, 0x38, 0xc9, 0x19, 0xb1, 0xf9, 0x4c, 0x7c, 0xfd, 0xa3, 0x92, 0x50, 0xff, 0x48,
- 0xc2, 0xa1, 0x6e, 0x63, 0x97, 0x3a, 0xdf, 0x3a, 0xd8, 0xfe, 0xbf, 0x6f, 0xff, 0xda, 0x37, 0xf4,
- 0x00, 0x32, 0x53, 0xe2, 0xd1, 0xbe, 0x63, 0x4b, 0x69, 0x86, 0xa4, 0x43, 0x53, 0xb7, 0xd1, 0x43,
- 0x80, 0x48, 0x66, 0x88, 0x65, 0x18, 0x96, 0x8b, 0x3c, 0xba, 0x1d, 0xf5, 0xfb, 0x06, 0xf6, 0x37,
- 0x2f, 0x80, 0x3e, 0x59, 0x67, 0x0b, 0x7b, 0x9d, 0x6b, 0xa0, 0x65, 0xa0, 0x14, 0xb9, 0xc8, 0x08,
- 0x50, 0x57, 0x15, 0x9e, 0x6e, 0x55, 0x48, 0x32, 0xfe, 0xd1, 0x32, 0x50, 0x0e, 0xa3, 0x4b, 0xad,
- 0x30, 0xf5, 0xdd, 0xc2, 0xdf, 0x8b, 0x90, 0xbe, 0x30, 0xad, 0x57, 0x98, 0x22, 0x19, 0xb2, 0x3e,
- 0xfe, 0x6e, 0x86, 0x5d, 0x8b, 0x0f, 0x58, 0x34, 0x56, 0x36, 0xfa, 0x02, 0xf2, 0x3e, 0x99, 0x79,
- 0x16, 0xee, 0x87, 0x35, 0xa3, 0x1a, 0xe5, 0x65, 0xa0, 0x20, 0x5e, 0x63, 0x03, 0x54, 0x0d, 0xe0,
- 0xd6, 0x05, 0xf1, 0x28, 0xfa, 0x0a, 0x8a, 0x11, 0x16, 0x55, 0x66, 0x63, 0xcc, 0x35, 0x3e, 0x58,
- 0x06, 0xca, 0xd1, 0x56, 0x6c, 0x84, 0xab, 0x46, 0x81, 0x3b, 0xe2, 0xa5, 0x7b, 0x0e, 0x25, 0x1b,
- 0xfb, 0xd4, 0x71, 0x4d, 0x36, 0x17, 0x56, 0x5f, 0x64, 0x39, 0x3e, 0x5c, 0x06, 0xca, 0x03, 0x9e,
- 0x63, 0x97, 0xa1, 0x1a, 0x07, 0x1b, 0x2e, 0xa6, 0xa4, 0x0b, 0xef, 0x6d, 0xb2, 0x62, 0x39, 0x6c,
- 0x8c, 0x8d, 0xca, 0x32, 0x50, 0xe4, 0x77, 0x53, 0xad, 0x34, 0xa1, 0x0d, 0x6f, 0x2c, 0x0c, 0x81,
- 0x68, 0x9b, 0xd4, 0x64, 0xe3, 0xde, 0x37, 0xd8, 0x19, 0x0d, 0xa0, 0x48, 0x9d, 0x09, 0x26, 0x33,
- 0xda, 0x1f, 0x61, 0x67, 0x38, 0xa2, 0x6c, 0xe0, 0xf9, 0x9d, 0x9d, 0xe7, 0xdf, 0xa5, 0xeb, 0xd3,
- 0x5a, 0x9b, 0x71, 0x1a, 0x0f, 0xc3, 0x75, 0x5d, 0x37, 0x64, 0x3b, 0x83, 0x6a, 0x14, 0x22, 0x07,
- 0x67, 0x23, 0x1d, 0x0e, 0x63, 0x46, 0xf8, 0xeb, 0x53, 0x73, 0x32, 0x95, 0xb2, 0xe1, 0xc0, 0x1a,
- 0xc7, 0xcb, 0x40, 0x91, 0xb6, 0x93, 0xac, 0x28, 0xaa, 0x51, 0x8a, 0x7c, 0xbd, 0xd8, 0x15, 0xed,
- 0xc0, 0xcf, 0x02, 0xe4, 0xf9, 0x0e, 0xb0, 0x7f, 0xee, 0x7f, 0xb0, 0x7c, 0x5b, 0xbb, 0xb6, 0xb7,
- 0xb3, 0x6b, 0x71, 0x5f, 0xc5, 0x75, 0x5f, 0x23, 0xa1, 0x5d, 0x38, 0x38, 0xb7, 0x5e, 0xb9, 0xe4,
- 0x66, 0x8c, 0xed, 0x21, 0x9e, 0x60, 0x97, 0x22, 0x09, 0xd2, 0x1e, 0xf6, 0x67, 0x63, 0x2a, 0x1d,
- 0x85, 0xf4, 0x76, 0xc2, 0x88, 0x6c, 0x54, 0x86, 0x14, 0xf6, 0x3c, 0xe2, 0x49, 0xe5, 0x50, 0x53,
- 0x3b, 0x61, 0x70, 0xb3, 0x01, 0x90, 0xf5, 0xb0, 0x3f, 0x25, 0xae, 0x8f, 0x1f, 0xff, 0x2a, 0x40,
- 0x8a, 0xdf, 0xf9, 0x73, 0x50, 0x2e, 0x7b, 0xe7, 0x3d, 0xad, 0x7f, 0xd5, 0xd1, 0x3b, 0x7a, 0x4f,
- 0x3f, 0x7f, 0xa1, 0xbf, 0xd4, 0x5a, 0xfd, 0xab, 0xce, 0xe5, 0x85, 0xd6, 0xd4, 0x9f, 0xeb, 0x5a,
- 0xab, 0x94, 0x90, 0x0f, 0xe7, 0x8b, 0x6a, 0x61, 0x8b, 0x80, 0x24, 0x00, 0x1e, 0x17, 0x3a, 0x4b,
- 0x82, 0x9c, 0x9d, 0x2f, 0xaa, 0x62, 0x78, 0x46, 0x15, 0x28, 0x70, 0xa4, 0x67, 0x7c, 0xd3, 0xbd,
- 0xd0, 0x3a, 0xa5, 0xa4, 0x9c, 0x9f, 0x2f, 0xaa, 0x99, 0xc8, 0x5c, 0x47, 0x32, 0x70, 0x8f, 0x47,
- 0x32, 0xe4, 0x18, 0xf6, 0x39, 0xd2, 0x7c, 0xd1, 0xbd, 0xd4, 0x5a, 0x25, 0x51, 0x86, 0xf9, 0xa2,
- 0x9a, 0xe6, 0x96, 0x2c, 0xbe, 0xfe, 0xa9, 0x92, 0x78, 0x7c, 0x03, 0x29, 0xf6, 0xbd, 0x44, 0x1f,
- 0x43, 0xb9, 0x6b, 0xb4, 0x34, 0xa3, 0xdf, 0xe9, 0x76, 0xb4, 0x1d, 0xbd, 0x2c, 0x65, 0xe8, 0x47,
- 0x2a, 0x1c, 0x70, 0xd6, 0x55, 0x87, 0xfd, 0x6a, 0xad, 0x92, 0x20, 0x17, 0xe6, 0x8b, 0x6a, 0x6e,
- 0xe5, 0x08, 0x05, 0x73, 0x4e, 0xcc, 0x88, 0x04, 0x47, 0x26, 0x2f, 0xdc, 0x68, 0xbf, 0xb9, 0xab,
- 0x08, 0x6f, 0xef, 0x2a, 0xc2, 0xef, 0x77, 0x15, 0xe1, 0x87, 0xfb, 0x4a, 0xe2, 0xed, 0x7d, 0x25,
- 0xf1, 0xdb, 0x7d, 0x25, 0xf1, 0xb2, 0x36, 0x74, 0xe8, 0x68, 0x36, 0xa8, 0x59, 0x64, 0x52, 0xb7,
- 0x88, 0x3f, 0x21, 0x7e, 0xdd, 0x19, 0x58, 0x4f, 0xe2, 0x77, 0xf9, 0xd3, 0xa7, 0x4f, 0xe2, 0xc7,
- 0x9e, 0xde, 0x4e, 0xb1, 0x3f, 0x48, 0xb3, 0x87, 0xf9, 0xb3, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff,
- 0xbc, 0xc9, 0x53, 0x80, 0x0f, 0x08, 0x00, 0x00,
+ // 913 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x3d, 0x6f, 0xe3, 0x46,
+ 0x13, 0x16, 0x65, 0xea, 0x6b, 0x64, 0xc9, 0xf2, 0xbe, 0xaf, 0x75, 0x0c, 0xe3, 0x13, 0x15, 0x5e,
+ 0x0a, 0xe3, 0x82, 0x93, 0x62, 0xe7, 0x70, 0x09, 0xae, 0x8a, 0x25, 0xf1, 0x60, 0x02, 0x17, 0x49,
+ 0xa0, 0xe5, 0x22, 0xd7, 0x08, 0x14, 0xb9, 0x91, 0x88, 0x93, 0xb8, 0x0a, 0xb9, 0xb2, 0xe1, 0x32,
+ 0xdd, 0x41, 0x55, 0xfe, 0x80, 0x80, 0x00, 0x01, 0xd2, 0xe4, 0x07, 0xe4, 0x2f, 0x5c, 0x79, 0x65,
+ 0x2a, 0x22, 0xb0, 0xeb, 0x34, 0xfa, 0x05, 0x01, 0x77, 0x49, 0x7d, 0x5d, 0x3e, 0xba, 0x54, 0xa9,
+ 0xb4, 0x33, 0xcf, 0x33, 0x33, 0xcf, 0xce, 0x8c, 0xb8, 0xf0, 0xc8, 0x19, 0x58, 0x43, 0x52, 0xb7,
+ 0x88, 0x87, 0xeb, 0xd6, 0xc8, 0x74, 0x5d, 0x3c, 0xae, 0x5f, 0x9f, 0xc6, 0xc7, 0xda, 0xd4, 0x23,
+ 0x94, 0xa0, 0x23, 0x46, 0xaa, 0x85, 0xa4, 0x5a, 0x8c, 0x5c, 0x9f, 0xca, 0xff, 0x1f, 0x92, 0x21,
+ 0x61, 0x8c, 0x7a, 0x78, 0xe2, 0x64, 0xf9, 0xa3, 0xcd, 0x8c, 0x63, 0x07, 0xbb, 0x94, 0x25, 0x64,
+ 0x27, 0x4e, 0x51, 0x7f, 0x4e, 0x42, 0xa6, 0xc9, 0xf3, 0xa0, 0x33, 0x48, 0xf9, 0xd4, 0xa4, 0x58,
+ 0x12, 0xaa, 0xc2, 0x49, 0xf1, 0xec, 0xb8, 0xf6, 0xa7, 0xb5, 0x6a, 0x97, 0x21, 0xc7, 0xe0, 0x54,
+ 0xf4, 0x05, 0x64, 0x89, 0x67, 0x63, 0xcf, 0x71, 0x87, 0x52, 0xf2, 0x6f, 0xc3, 0x3a, 0x21, 0xcd,
+ 0x58, 0xb1, 0xd1, 0x57, 0xb0, 0x6f, 0x91, 0x99, 0x4b, 0xb1, 0x37, 0x35, 0x3d, 0x7a, 0x2b, 0xed,
+ 0x55, 0x85, 0x93, 0xfc, 0xd9, 0xa3, 0xbf, 0x88, 0x6e, 0x6e, 0x50, 0x1b, 0xe2, 0xdb, 0x40, 0x49,
+ 0x18, 0x5b, 0xe1, 0xa8, 0x09, 0x07, 0x16, 0x71, 0x5d, 0x6c, 0x51, 0x87, 0xb8, 0xfd, 0x11, 0x99,
+ 0xfa, 0x92, 0x58, 0xdd, 0x3b, 0xc9, 0x35, 0xe4, 0x65, 0xa0, 0x94, 0x6f, 0xcd, 0xc9, 0xf8, 0xb9,
+ 0xba, 0x43, 0x50, 0x8d, 0xe2, 0xda, 0x73, 0x41, 0xa6, 0x3e, 0x92, 0x20, 0x73, 0x8d, 0x3d, 0xdf,
+ 0x21, 0xae, 0x94, 0xaa, 0x0a, 0x27, 0x39, 0x23, 0x36, 0x9f, 0x8b, 0x6f, 0x7e, 0x50, 0x12, 0xea,
+ 0xef, 0x49, 0x38, 0xd4, 0x6d, 0xec, 0x52, 0xe7, 0x1b, 0x07, 0xdb, 0xff, 0xf5, 0xed, 0x1f, 0xfb,
+ 0x86, 0x1e, 0x40, 0x66, 0x4a, 0x3c, 0xda, 0x77, 0x6c, 0x29, 0xcd, 0x90, 0x74, 0x68, 0xea, 0x36,
+ 0x7a, 0x08, 0x10, 0xc9, 0x0c, 0xb1, 0x0c, 0xc3, 0x72, 0x91, 0x47, 0xb7, 0xa3, 0x7e, 0xdf, 0xc0,
+ 0xfe, 0xe6, 0x05, 0xd0, 0x27, 0xeb, 0x6c, 0x61, 0xaf, 0x73, 0x0d, 0xb4, 0x0c, 0x94, 0x22, 0x17,
+ 0x19, 0x01, 0xea, 0xaa, 0xc2, 0xd3, 0xad, 0x0a, 0x49, 0xc6, 0x3f, 0x5a, 0x06, 0xca, 0x61, 0x74,
+ 0xa9, 0x15, 0xa6, 0xbe, 0x5f, 0xf8, 0x3b, 0x11, 0xd2, 0x5d, 0xd3, 0x7a, 0x8d, 0x29, 0x92, 0x21,
+ 0xeb, 0xe3, 0x6f, 0x67, 0xd8, 0xb5, 0xf8, 0x80, 0x45, 0x63, 0x65, 0xa3, 0xcf, 0x21, 0xef, 0x93,
+ 0x99, 0x67, 0xe1, 0x7e, 0x58, 0x33, 0xaa, 0x51, 0x5e, 0x06, 0x0a, 0xe2, 0x35, 0x36, 0x40, 0xd5,
+ 0x00, 0x6e, 0x75, 0x89, 0x47, 0xd1, 0x97, 0x50, 0x8c, 0xb0, 0xa8, 0x32, 0x1b, 0x63, 0xae, 0xf1,
+ 0xc1, 0x32, 0x50, 0x8e, 0xb6, 0x62, 0x23, 0x5c, 0x35, 0x0a, 0xdc, 0x11, 0x2f, 0xdd, 0x0b, 0x28,
+ 0xd9, 0xd8, 0xa7, 0x8e, 0x6b, 0xb2, 0xb9, 0xb0, 0xfa, 0x22, 0xcb, 0xf1, 0xe1, 0x32, 0x50, 0x1e,
+ 0xf0, 0x1c, 0xbb, 0x0c, 0xd5, 0x38, 0xd8, 0x70, 0x31, 0x25, 0x1d, 0xf8, 0xdf, 0x26, 0x2b, 0x96,
+ 0xc3, 0xc6, 0xd8, 0xa8, 0x2c, 0x03, 0x45, 0x7e, 0x3f, 0xd5, 0x4a, 0x13, 0xda, 0xf0, 0xc6, 0xc2,
+ 0x10, 0x88, 0xb6, 0x49, 0x4d, 0x36, 0xee, 0x7d, 0x83, 0x9d, 0xd1, 0x00, 0x8a, 0xd4, 0x99, 0x60,
+ 0x32, 0xa3, 0xfd, 0x11, 0x76, 0x86, 0x23, 0xca, 0x06, 0x9e, 0xdf, 0xd9, 0x79, 0xfe, 0x5d, 0xba,
+ 0x3e, 0xad, 0x5d, 0x30, 0x4e, 0xe3, 0x61, 0xb8, 0xae, 0xeb, 0x86, 0x6c, 0x67, 0x50, 0x8d, 0x42,
+ 0xe4, 0xe0, 0x6c, 0xa4, 0xc3, 0x61, 0xcc, 0x08, 0x7f, 0x7d, 0x6a, 0x4e, 0xa6, 0x52, 0x36, 0x1c,
+ 0x58, 0xe3, 0x78, 0x19, 0x28, 0xd2, 0x76, 0x92, 0x15, 0x45, 0x35, 0x4a, 0x91, 0xaf, 0x17, 0xbb,
+ 0xa2, 0x1d, 0xf8, 0x49, 0x80, 0x3c, 0xdf, 0x01, 0xf6, 0xcf, 0xfd, 0x17, 0x96, 0x6f, 0x6b, 0xd7,
+ 0xf6, 0x76, 0x76, 0x2d, 0xee, 0xab, 0xb8, 0xee, 0x6b, 0x24, 0xb4, 0x03, 0x07, 0xe7, 0xd6, 0x6b,
+ 0x97, 0xdc, 0x8c, 0xb1, 0x3d, 0xc4, 0x13, 0xec, 0x52, 0x24, 0x41, 0xda, 0xc3, 0xfe, 0x6c, 0x4c,
+ 0xa5, 0xa3, 0x90, 0x7e, 0x91, 0x30, 0x22, 0x1b, 0x95, 0x21, 0x85, 0x3d, 0x8f, 0x78, 0x52, 0x39,
+ 0xd4, 0x74, 0x91, 0x30, 0xb8, 0xd9, 0x00, 0xc8, 0x7a, 0xd8, 0x9f, 0x12, 0xd7, 0xc7, 0x8f, 0x7f,
+ 0x11, 0x20, 0xc5, 0xef, 0xfc, 0x0c, 0x94, 0xcb, 0xde, 0x79, 0x4f, 0xeb, 0x5f, 0xb5, 0xf5, 0xb6,
+ 0xde, 0xd3, 0xcf, 0x5f, 0xea, 0xaf, 0xb4, 0x56, 0xff, 0xaa, 0x7d, 0xd9, 0xd5, 0x9a, 0xfa, 0x0b,
+ 0x5d, 0x6b, 0x95, 0x12, 0xf2, 0xe1, 0x7c, 0x51, 0x2d, 0x6c, 0x11, 0x90, 0x04, 0xc0, 0xe3, 0x42,
+ 0x67, 0x49, 0x90, 0xb3, 0xf3, 0x45, 0x55, 0x0c, 0xcf, 0xa8, 0x02, 0x05, 0x8e, 0xf4, 0x8c, 0xaf,
+ 0x3b, 0x5d, 0xad, 0x5d, 0x4a, 0xca, 0xf9, 0xf9, 0xa2, 0x9a, 0x89, 0xcc, 0x75, 0x24, 0x03, 0xf7,
+ 0x78, 0x24, 0x43, 0x8e, 0x61, 0x9f, 0x23, 0xcd, 0x97, 0x9d, 0x4b, 0xad, 0x55, 0x12, 0x65, 0x98,
+ 0x2f, 0xaa, 0x69, 0x6e, 0xc9, 0xe2, 0x9b, 0x1f, 0x2b, 0x89, 0xc7, 0x37, 0x90, 0x62, 0xdf, 0x4b,
+ 0xf4, 0x31, 0x94, 0x3b, 0x46, 0x4b, 0x33, 0xfa, 0xed, 0x4e, 0x5b, 0xdb, 0xd1, 0xcb, 0x52, 0x86,
+ 0x7e, 0xa4, 0xc2, 0x01, 0x67, 0x5d, 0xb5, 0xd9, 0xaf, 0xd6, 0x2a, 0x09, 0x72, 0x61, 0xbe, 0xa8,
+ 0xe6, 0x56, 0x8e, 0x50, 0x30, 0xe7, 0xc4, 0x8c, 0x48, 0x70, 0x64, 0xf2, 0xc2, 0x8d, 0xee, 0xdb,
+ 0xbb, 0x8a, 0xf0, 0xee, 0xae, 0x22, 0xfc, 0x76, 0x57, 0x11, 0xbe, 0xbf, 0xaf, 0x24, 0xde, 0xdd,
+ 0x57, 0x12, 0xbf, 0xde, 0x57, 0x12, 0xaf, 0x9e, 0x0d, 0x1d, 0x3a, 0x9a, 0x0d, 0x6a, 0x16, 0x99,
+ 0xd4, 0x2d, 0xe2, 0x4f, 0x88, 0x5f, 0x77, 0x06, 0xd6, 0x93, 0x21, 0xa9, 0x4f, 0x88, 0x3d, 0x1b,
+ 0x63, 0x9f, 0xbf, 0xcf, 0x9f, 0x3e, 0x7d, 0x12, 0x3f, 0xfa, 0xf4, 0x76, 0x8a, 0xfd, 0x41, 0x9a,
+ 0x3d, 0xd0, 0x9f, 0xfd, 0x11, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x56, 0x2b, 0x23, 0x17, 0x08, 0x00,
+ 0x00,
}
func (m *Channel) Marshal() (dAtA []byte, err error) {
diff --git a/core/04-channel/types/channel_test.go b/modules/core/04-channel/types/channel_test.go
similarity index 97%
rename from core/04-channel/types/channel_test.go
rename to modules/core/04-channel/types/channel_test.go
index c392a0ba..14bd8a54 100644
--- a/core/04-channel/types/channel_test.go
+++ b/modules/core/04-channel/types/channel_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
func TestChannelValidateBasic(t *testing.T) {
diff --git a/core/04-channel/types/codec.go b/modules/core/04-channel/types/codec.go
similarity index 96%
rename from core/04-channel/types/codec.go
rename to modules/core/04-channel/types/codec.go
index 477e4620..7f8cc901 100644
--- a/core/04-channel/types/codec.go
+++ b/modules/core/04-channel/types/codec.go
@@ -5,7 +5,7 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/msgservice"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces register the ibc channel submodule interfaces to protobuf
diff --git a/core/04-channel/types/errors.go b/modules/core/04-channel/types/errors.go
similarity index 100%
rename from core/04-channel/types/errors.go
rename to modules/core/04-channel/types/errors.go
diff --git a/core/04-channel/types/events.go b/modules/core/04-channel/types/events.go
similarity index 96%
rename from core/04-channel/types/events.go
rename to modules/core/04-channel/types/events.go
index 36af818f..9667a130 100644
--- a/core/04-channel/types/events.go
+++ b/modules/core/04-channel/types/events.go
@@ -3,7 +3,7 @@ package types
import (
"fmt"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// IBC channel events
diff --git a/core/04-channel/types/expected_keepers.go b/modules/core/04-channel/types/expected_keepers.go
similarity index 93%
rename from core/04-channel/types/expected_keepers.go
rename to modules/core/04-channel/types/expected_keepers.go
index d34167b2..b109b6c1 100644
--- a/core/04-channel/types/expected_keepers.go
+++ b/modules/core/04-channel/types/expected_keepers.go
@@ -3,8 +3,8 @@ package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- "github.com/cosmos/ibc-go/core/exported"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// ClientKeeper expected account IBC client keeper
diff --git a/core/04-channel/types/genesis.go b/modules/core/04-channel/types/genesis.go
similarity index 98%
rename from core/04-channel/types/genesis.go
rename to modules/core/04-channel/types/genesis.go
index 6b0b2d3c..e80f82b0 100644
--- a/core/04-channel/types/genesis.go
+++ b/modules/core/04-channel/types/genesis.go
@@ -4,7 +4,7 @@ import (
"errors"
"fmt"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// NewPacketState creates a new PacketState instance.
diff --git a/core/04-channel/types/genesis.pb.go b/modules/core/04-channel/types/genesis.pb.go
similarity index 88%
rename from core/04-channel/types/genesis.pb.go
rename to modules/core/04-channel/types/genesis.pb.go
index 9c55fc20..ddd254d7 100644
--- a/core/04-channel/types/genesis.pb.go
+++ b/modules/core/04-channel/types/genesis.pb.go
@@ -197,39 +197,39 @@ func init() {
}
var fileDescriptor_c4d4e081eaaab7c3 = []byte{
- // 497 bytes of a gzipped FileDescriptorProto
+ // 505 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcf, 0x6e, 0xd3, 0x30,
0x1c, 0x80, 0x9b, 0xb5, 0xeb, 0x3a, 0x6f, 0xad, 0x98, 0x59, 0xa5, 0x50, 0x8d, 0xa4, 0x32, 0x42,
- 0xaa, 0x84, 0x96, 0x30, 0xd8, 0x89, 0x63, 0x40, 0x82, 0x72, 0x42, 0x61, 0x27, 0x2e, 0x55, 0xea,
- 0xfc, 0x48, 0xad, 0x36, 0x71, 0x89, 0xbd, 0xc2, 0x9e, 0x02, 0x1e, 0x6b, 0xc7, 0x1d, 0x39, 0x45,
- 0xa8, 0x7d, 0x02, 0x7a, 0xe4, 0x84, 0xe2, 0xfc, 0x69, 0xab, 0x6d, 0x48, 0x15, 0xb7, 0xd8, 0xfe,
- 0xfc, 0x7d, 0xbf, 0x43, 0x8c, 0x9e, 0xb0, 0x21, 0x0d, 0xb8, 0x4d, 0x79, 0x0c, 0x36, 0x1d, 0x79,
- 0x51, 0x04, 0x13, 0x7b, 0x76, 0x66, 0x07, 0x10, 0x81, 0x60, 0xc2, 0x9a, 0xc6, 0x5c, 0x72, 0xdc,
- 0x56, 0x90, 0x95, 0x42, 0x56, 0x0e, 0x59, 0xb3, 0xb3, 0xce, 0x71, 0xc0, 0x03, 0xae, 0x08, 0x3b,
- 0xfd, 0xca, 0xe0, 0xce, 0x3d, 0xc6, 0xe2, 0x9e, 0x82, 0xc8, 0xef, 0x5d, 0x74, 0xf8, 0x36, 0x6b,
- 0x7c, 0x94, 0x9e, 0x04, 0x3c, 0x40, 0x8d, 0x9c, 0x10, 0xba, 0xd6, 0xad, 0xf6, 0x0e, 0x5e, 0xf4,
- 0xac, 0x3b, 0xab, 0x56, 0xdf, 0x87, 0x48, 0xb2, 0xcf, 0x0c, 0xfc, 0xd7, 0xd9, 0xa6, 0xf3, 0xe8,
- 0x3a, 0x31, 0x2b, 0x7f, 0x12, 0xf3, 0xe8, 0xd6, 0x91, 0x5b, 0x4a, 0xf1, 0x05, 0x7a, 0xe0, 0xd1,
- 0x71, 0xc4, 0xbf, 0x4e, 0xc0, 0x0f, 0x20, 0x84, 0x48, 0x0a, 0x7d, 0x47, 0x85, 0xc8, 0x3d, 0xa1,
- 0x0f, 0x1e, 0x1d, 0x83, 0x54, 0xe3, 0x39, 0xb5, 0x34, 0xe1, 0xde, 0x32, 0xe0, 0xf7, 0xe8, 0x80,
- 0xf2, 0x30, 0x64, 0x32, 0x13, 0x56, 0xb7, 0x14, 0xae, 0x5f, 0xc6, 0x6f, 0x50, 0x23, 0x06, 0x0a,
- 0x6c, 0x2a, 0x85, 0x5e, 0xdb, 0x52, 0x54, 0xde, 0xc4, 0x63, 0xd4, 0x12, 0x10, 0xf9, 0x03, 0x01,
- 0x5f, 0x2e, 0x21, 0xa2, 0x20, 0xf4, 0x5d, 0xe5, 0x7a, 0xfa, 0x6f, 0x57, 0x4e, 0x3b, 0x8f, 0x53,
- 0xdd, 0x32, 0x31, 0xdb, 0x57, 0x5e, 0x38, 0x79, 0x45, 0x36, 0x55, 0xc4, 0x6d, 0xa6, 0x1b, 0x05,
- 0xac, 0x62, 0x31, 0xd0, 0xd9, 0x5a, 0xac, 0xfe, 0x1f, 0xb1, 0x4d, 0x15, 0x71, 0x9b, 0xe9, 0xc6,
- 0x2a, 0x36, 0x42, 0x4d, 0x8f, 0x8e, 0xd7, 0x5a, 0x7b, 0xdb, 0xb4, 0x4e, 0xf2, 0xd6, 0x71, 0xd6,
- 0xda, 0x30, 0x11, 0xf7, 0xd0, 0xa3, 0xe3, 0x55, 0xe9, 0x02, 0xb5, 0x23, 0xf8, 0x26, 0x07, 0xb9,
- 0xad, 0x04, 0xf5, 0x46, 0x57, 0xeb, 0xd5, 0x9c, 0xee, 0x32, 0x31, 0x4f, 0x32, 0xcd, 0x9d, 0x18,
- 0x71, 0x1f, 0xa6, 0xfb, 0xf9, 0x3f, 0x58, 0x68, 0xc9, 0x77, 0x0d, 0xb5, 0x36, 0x87, 0xc2, 0xcf,
- 0xd0, 0xde, 0x94, 0xc7, 0x72, 0xc0, 0x7c, 0x5d, 0xeb, 0x6a, 0xbd, 0x7d, 0x07, 0x2f, 0x13, 0xb3,
- 0x95, 0xa9, 0xf3, 0x03, 0xe2, 0xd6, 0xd3, 0xaf, 0xbe, 0x8f, 0xcf, 0x11, 0x2a, 0x4a, 0xcc, 0xd7,
- 0x77, 0x14, 0xdf, 0x5e, 0x26, 0xe6, 0x51, 0xc6, 0xaf, 0xce, 0x88, 0xbb, 0x9f, 0x2f, 0xfa, 0x3e,
- 0xee, 0xa0, 0x46, 0x39, 0x7e, 0x35, 0x1d, 0xdf, 0x2d, 0xd7, 0xce, 0xbb, 0xeb, 0xb9, 0xa1, 0xdd,
- 0xcc, 0x0d, 0xed, 0xd7, 0xdc, 0xd0, 0x7e, 0x2c, 0x8c, 0xca, 0xcd, 0xc2, 0xa8, 0xfc, 0x5c, 0x18,
- 0x95, 0x4f, 0x56, 0xc0, 0xe4, 0xe8, 0x72, 0x68, 0x51, 0x1e, 0xda, 0x94, 0x8b, 0x90, 0x0b, 0x9b,
- 0x0d, 0xe9, 0x69, 0xf1, 0xae, 0x9f, 0x9f, 0x9f, 0x16, 0x4f, 0x5b, 0x5e, 0x4d, 0x41, 0x0c, 0xeb,
- 0xea, 0x59, 0xbf, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x02, 0xd2, 0xd3, 0x2f, 0x4f, 0x04, 0x00,
- 0x00,
+ 0xaa, 0x84, 0x96, 0x30, 0x98, 0x38, 0x70, 0x0c, 0x48, 0xa8, 0x9c, 0xa6, 0xb0, 0x13, 0x97, 0x2a,
+ 0x75, 0x7e, 0xa4, 0x56, 0x9a, 0xb8, 0xc4, 0x6e, 0x61, 0x4f, 0x01, 0x8f, 0xb5, 0xe3, 0x8e, 0x9c,
+ 0x22, 0xd4, 0x3e, 0x01, 0x3d, 0x72, 0x42, 0xf9, 0xdb, 0x56, 0xdb, 0x90, 0xaa, 0xdd, 0x62, 0xfb,
+ 0xf3, 0xf7, 0xfd, 0x0e, 0x31, 0x7a, 0xc6, 0x86, 0xd4, 0xe3, 0x26, 0xe5, 0x11, 0x98, 0x74, 0xe4,
+ 0x84, 0x21, 0x8c, 0xcd, 0xd9, 0x99, 0xe9, 0x41, 0x08, 0x82, 0x09, 0x63, 0x12, 0x71, 0xc9, 0x71,
+ 0x3b, 0x85, 0x8c, 0x04, 0x32, 0x72, 0xc8, 0x98, 0x9d, 0x75, 0x8e, 0x3d, 0xee, 0xf1, 0x94, 0x30,
+ 0x93, 0xaf, 0x0c, 0xee, 0xdc, 0x63, 0x2c, 0xee, 0xa5, 0x10, 0xf9, 0xb3, 0x8b, 0x0e, 0x3f, 0x64,
+ 0x8d, 0x4f, 0xd2, 0x91, 0x80, 0x07, 0xa8, 0x91, 0x13, 0x42, 0x55, 0xba, 0xd5, 0xde, 0xc1, 0xab,
+ 0x9e, 0x71, 0x67, 0xd5, 0xe8, 0xbb, 0x10, 0x4a, 0xf6, 0x85, 0x81, 0xfb, 0x2e, 0xdb, 0xb4, 0x9e,
+ 0x5c, 0xc7, 0x7a, 0xe5, 0x6f, 0xac, 0x1f, 0xdd, 0x3a, 0xb2, 0x4b, 0x29, 0xbe, 0x44, 0x8f, 0x1c,
+ 0xea, 0x87, 0xfc, 0xdb, 0x18, 0x5c, 0x0f, 0x02, 0x08, 0xa5, 0x50, 0x77, 0xd2, 0x10, 0xb9, 0x27,
+ 0x74, 0xe1, 0x50, 0x1f, 0x64, 0x3a, 0x9e, 0x55, 0x4b, 0x12, 0xf6, 0x2d, 0x03, 0xfe, 0x88, 0x0e,
+ 0x28, 0x0f, 0x02, 0x26, 0x33, 0x61, 0x75, 0x4b, 0xe1, 0xfa, 0x65, 0xfc, 0x1e, 0x35, 0x22, 0xa0,
+ 0xc0, 0x26, 0x52, 0xa8, 0xb5, 0x2d, 0x45, 0xe5, 0x4d, 0xec, 0xa3, 0x96, 0x80, 0xd0, 0x1d, 0x08,
+ 0xf8, 0x3a, 0x85, 0x90, 0x82, 0x50, 0x77, 0x53, 0xd7, 0xf3, 0xff, 0xbb, 0x72, 0xda, 0x7a, 0x9a,
+ 0xe8, 0x96, 0xb1, 0xde, 0xbe, 0x72, 0x82, 0xf1, 0x5b, 0xb2, 0xa9, 0x22, 0x76, 0x33, 0xd9, 0x28,
+ 0xe0, 0x34, 0x16, 0x01, 0x9d, 0xad, 0xc5, 0xea, 0x0f, 0x88, 0x6d, 0xaa, 0x88, 0xdd, 0x4c, 0x36,
+ 0x56, 0xb1, 0x11, 0x6a, 0x3a, 0xd4, 0x5f, 0x6b, 0xed, 0x6d, 0xd3, 0x3a, 0xc9, 0x5b, 0xc7, 0x59,
+ 0x6b, 0xc3, 0x44, 0xec, 0x43, 0x87, 0xfa, 0xab, 0xd2, 0x25, 0x6a, 0x87, 0xf0, 0x5d, 0x0e, 0x72,
+ 0x5b, 0x09, 0xaa, 0x8d, 0xae, 0xd2, 0xab, 0x59, 0xdd, 0x65, 0xac, 0x9f, 0x64, 0x9a, 0x3b, 0x31,
+ 0x62, 0x3f, 0x4e, 0xf6, 0xf3, 0x7f, 0xb0, 0xd0, 0x92, 0x1f, 0x0a, 0x6a, 0x6d, 0x0e, 0x85, 0x5f,
+ 0xa0, 0xbd, 0x09, 0x8f, 0xe4, 0x80, 0xb9, 0xaa, 0xd2, 0x55, 0x7a, 0xfb, 0x16, 0x5e, 0xc6, 0x7a,
+ 0x2b, 0x53, 0xe7, 0x07, 0xc4, 0xae, 0x27, 0x5f, 0x7d, 0x17, 0x9f, 0x23, 0x54, 0x94, 0x98, 0xab,
+ 0xee, 0xa4, 0x7c, 0x7b, 0x19, 0xeb, 0x47, 0x19, 0xbf, 0x3a, 0x23, 0xf6, 0x7e, 0xbe, 0xe8, 0xbb,
+ 0xb8, 0x83, 0x1a, 0xe5, 0xf8, 0xd5, 0x64, 0x7c, 0xbb, 0x5c, 0x5b, 0x17, 0xd7, 0x73, 0x4d, 0xb9,
+ 0x99, 0x6b, 0xca, 0xef, 0xb9, 0xa6, 0xfc, 0x5c, 0x68, 0x95, 0x9b, 0x85, 0x56, 0xf9, 0xb5, 0xd0,
+ 0x2a, 0x9f, 0xdf, 0x78, 0x4c, 0x8e, 0xa6, 0x43, 0x83, 0xf2, 0xc0, 0xa4, 0x5c, 0x04, 0x5c, 0x98,
+ 0x6c, 0x48, 0x4f, 0x3d, 0x6e, 0x06, 0xdc, 0x9d, 0x8e, 0x41, 0x64, 0xef, 0xfb, 0xe5, 0xf9, 0x69,
+ 0xf1, 0xc4, 0xe5, 0xd5, 0x04, 0xc4, 0xb0, 0x9e, 0x3e, 0xef, 0xd7, 0xff, 0x02, 0x00, 0x00, 0xff,
+ 0xff, 0xd6, 0x46, 0x49, 0x69, 0x57, 0x04, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/core/04-channel/types/genesis_test.go b/modules/core/04-channel/types/genesis_test.go
similarity index 99%
rename from core/04-channel/types/genesis_test.go
rename to modules/core/04-channel/types/genesis_test.go
index 74e53f75..35e20f1b 100644
--- a/core/04-channel/types/genesis_test.go
+++ b/modules/core/04-channel/types/genesis_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
const (
diff --git a/core/04-channel/types/keys.go b/modules/core/04-channel/types/keys.go
similarity index 97%
rename from core/04-channel/types/keys.go
rename to modules/core/04-channel/types/keys.go
index 62f81341..64ee6a22 100644
--- a/core/04-channel/types/keys.go
+++ b/modules/core/04-channel/types/keys.go
@@ -5,7 +5,7 @@ import (
"regexp"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- host "github.com/cosmos/ibc-go/core/24-host"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/core/04-channel/types/keys_test.go b/modules/core/04-channel/types/keys_test.go
similarity index 95%
rename from core/04-channel/types/keys_test.go
rename to modules/core/04-channel/types/keys_test.go
index 0c3d67b8..a89936d0 100644
--- a/core/04-channel/types/keys_test.go
+++ b/modules/core/04-channel/types/keys_test.go
@@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// tests ParseChannelSequence and IsValidChannelID
diff --git a/core/04-channel/types/msgs.go b/modules/core/04-channel/types/msgs.go
similarity index 99%
rename from core/04-channel/types/msgs.go
rename to modules/core/04-channel/types/msgs.go
index 013ac9bf..d35c983f 100644
--- a/core/04-channel/types/msgs.go
+++ b/modules/core/04-channel/types/msgs.go
@@ -5,9 +5,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
var _ sdk.Msg = &MsgChannelOpenInit{}
diff --git a/core/04-channel/types/msgs_test.go b/modules/core/04-channel/types/msgs_test.go
similarity index 98%
rename from core/04-channel/types/msgs_test.go
rename to modules/core/04-channel/types/msgs_test.go
index 966e5ec3..fd8d2328 100644
--- a/core/04-channel/types/msgs_test.go
+++ b/modules/core/04-channel/types/msgs_test.go
@@ -14,10 +14,10 @@ import (
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
const (
diff --git a/core/04-channel/types/packet.go b/modules/core/04-channel/types/packet.go
similarity index 95%
rename from core/04-channel/types/packet.go
rename to modules/core/04-channel/types/packet.go
index 5f08223c..092f2b6b 100644
--- a/core/04-channel/types/packet.go
+++ b/modules/core/04-channel/types/packet.go
@@ -6,9 +6,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CommitPacket returns the packet commitment bytes. The commitment consists of:
diff --git a/core/04-channel/types/packet_test.go b/modules/core/04-channel/types/packet_test.go
similarity index 94%
rename from core/04-channel/types/packet_test.go
rename to modules/core/04-channel/types/packet_test.go
index d7a9878b..17163cee 100644
--- a/core/04-channel/types/packet_test.go
+++ b/modules/core/04-channel/types/packet_test.go
@@ -7,8 +7,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/04-channel/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
func TestCommitPacket(t *testing.T) {
diff --git a/core/04-channel/types/query.go b/modules/core/04-channel/types/query.go
similarity index 96%
rename from core/04-channel/types/query.go
rename to modules/core/04-channel/types/query.go
index 9a50900c..8bd65fd4 100644
--- a/core/04-channel/types/query.go
+++ b/modules/core/04-channel/types/query.go
@@ -2,8 +2,8 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/core/04-channel/types/query.pb.go b/modules/core/04-channel/types/query.pb.go
similarity index 95%
rename from core/04-channel/types/query.pb.go
rename to modules/core/04-channel/types/query.pb.go
index 7330eaf2..07c825a1 100644
--- a/core/04-channel/types/query.pb.go
+++ b/modules/core/04-channel/types/query.pb.go
@@ -8,7 +8,7 @@ import (
fmt "fmt"
types1 "github.com/cosmos/cosmos-sdk/codec/types"
query "github.com/cosmos/cosmos-sdk/types/query"
- types "github.com/cosmos/ibc-go/core/02-client/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
grpc1 "github.com/gogo/protobuf/grpc"
proto "github.com/gogo/protobuf/proto"
@@ -1699,100 +1699,101 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/channel/v1/query.proto", fileDescriptor_3acdacc9aeb4fa50) }
var fileDescriptor_3acdacc9aeb4fa50 = []byte{
- // 1487 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcb, 0x8f, 0xdb, 0xd4,
- 0x17, 0x9e, 0x9b, 0x99, 0xb6, 0x99, 0x33, 0xfd, 0xf5, 0x71, 0x3b, 0x69, 0xa7, 0xee, 0x34, 0x9d,
- 0xba, 0xfa, 0xd1, 0x51, 0x4b, 0xed, 0x66, 0xfa, 0xa0, 0x54, 0x50, 0xa9, 0x2d, 0xd0, 0x0e, 0x52,
- 0x5f, 0x2e, 0x15, 0x6d, 0x25, 0x08, 0x8e, 0x73, 0x9b, 0xb1, 0x66, 0x62, 0xbb, 0xb1, 0x93, 0xb6,
- 0x0c, 0x41, 0x88, 0x05, 0x20, 0xc4, 0x02, 0x09, 0x01, 0x1b, 0x24, 0x36, 0x88, 0x0d, 0xea, 0x8a,
- 0x3f, 0x80, 0x05, 0x9b, 0x2e, 0x2b, 0x15, 0x89, 0x6e, 0x78, 0x68, 0x06, 0x89, 0x45, 0xd9, 0xb2,
- 0x61, 0x85, 0x7c, 0x1f, 0x8e, 0x9d, 0xd8, 0x9e, 0x49, 0x32, 0x91, 0x2a, 0x76, 0xf1, 0xf5, 0x3d,
- 0xe7, 0x7e, 0xdf, 0x77, 0xee, 0x39, 0x39, 0x27, 0x81, 0xbd, 0x66, 0xc9, 0xa8, 0xd8, 0xaa, 0x61,
- 0xd7, 0x88, 0x6a, 0xcc, 0xe9, 0x96, 0x45, 0x16, 0xd4, 0x46, 0x41, 0xbd, 0x5d, 0x27, 0xb5, 0x7b,
- 0x8a, 0x53, 0xb3, 0x3d, 0x1b, 0xe7, 0xe8, 0x16, 0xc5, 0xdf, 0xa2, 0xf0, 0x2d, 0x4a, 0xa3, 0x20,
- 0x45, 0x2c, 0x17, 0x4c, 0x62, 0x79, 0xbe, 0x21, 0xfb, 0xc4, 0x2c, 0xa5, 0x03, 0x86, 0xed, 0x56,
- 0x6d, 0x57, 0x2d, 0xe9, 0x2e, 0x61, 0x2e, 0xd5, 0x46, 0xa1, 0x44, 0x3c, 0xbd, 0xa0, 0x3a, 0x7a,
- 0xc5, 0xb4, 0x74, 0xcf, 0xb4, 0x2d, 0xbe, 0x77, 0x5f, 0x3c, 0x10, 0x71, 0x20, 0xdb, 0x34, 0x59,
- 0xb1, 0xed, 0xca, 0x02, 0x51, 0x75, 0xc7, 0x54, 0x75, 0xcb, 0xb2, 0x3d, 0xea, 0xc1, 0xe5, 0x6f,
- 0x77, 0xf2, 0xb7, 0xf4, 0xa9, 0x54, 0xbf, 0xa5, 0xea, 0x16, 0xe7, 0x20, 0x8d, 0x57, 0xec, 0x8a,
- 0x4d, 0x3f, 0xaa, 0xfe, 0x27, 0xb6, 0x2a, 0x5f, 0x80, 0x6d, 0x57, 0x7c, 0x54, 0x67, 0xd9, 0x21,
- 0x1a, 0xb9, 0x5d, 0x27, 0xae, 0x87, 0x77, 0xc0, 0x06, 0xc7, 0xae, 0x79, 0x45, 0xb3, 0x3c, 0x81,
- 0xa6, 0xd0, 0xf4, 0xa8, 0xb6, 0xde, 0x7f, 0x9c, 0x2d, 0xe3, 0xdd, 0x00, 0x1c, 0x8f, 0xff, 0x2e,
- 0x43, 0xdf, 0x8d, 0xf2, 0x95, 0xd9, 0xb2, 0x7c, 0x1f, 0xc1, 0x78, 0xd4, 0x9f, 0xeb, 0xd8, 0x96,
- 0x4b, 0xf0, 0x09, 0xd8, 0xc0, 0x77, 0x51, 0x87, 0x63, 0x33, 0x79, 0x25, 0x56, 0x53, 0x45, 0x18,
- 0x8a, 0xed, 0x78, 0x1c, 0xd6, 0x39, 0x35, 0xdb, 0xbe, 0x45, 0x0f, 0xdb, 0xa8, 0xb1, 0x07, 0xfc,
- 0x32, 0x6c, 0xa4, 0x1f, 0x8a, 0x73, 0xc4, 0xac, 0xcc, 0x79, 0x13, 0xc3, 0xd4, 0xe9, 0x64, 0xc4,
- 0x29, 0x8b, 0x43, 0xa3, 0xa0, 0x9c, 0xa7, 0x7b, 0xce, 0x8c, 0x3c, 0xf8, 0x75, 0xcf, 0x90, 0x36,
- 0x46, 0xed, 0xd8, 0x92, 0xfc, 0x66, 0x14, 0xae, 0x2b, 0xf8, 0xbf, 0x02, 0xd0, 0x0a, 0x0f, 0x47,
- 0xfc, 0x8c, 0xc2, 0x62, 0xa9, 0xf8, 0xb1, 0x54, 0xd8, 0xf5, 0xe0, 0xb1, 0x54, 0x2e, 0xeb, 0x15,
- 0xc2, 0x6d, 0xb5, 0x90, 0xa5, 0xbc, 0x84, 0x20, 0xd7, 0x76, 0x00, 0x17, 0xe4, 0x25, 0xc8, 0x72,
- 0x86, 0xee, 0x04, 0x9a, 0x1a, 0x9e, 0x1e, 0x9b, 0x99, 0x4e, 0x50, 0x64, 0xb6, 0x4c, 0x2c, 0xcf,
- 0xbc, 0x65, 0x92, 0xb2, 0xd0, 0x26, 0xb0, 0xc4, 0xe7, 0x22, 0x38, 0x33, 0x14, 0xe7, 0xfe, 0x15,
- 0x71, 0x32, 0x08, 0x61, 0xa0, 0xf8, 0x24, 0xac, 0xef, 0x5a, 0x49, 0x6e, 0x21, 0x7f, 0x84, 0x20,
- 0xcf, 0x48, 0xda, 0x96, 0x45, 0x0c, 0xdf, 0x5f, 0xbb, 0x9e, 0x79, 0x00, 0x23, 0x78, 0xc9, 0xaf,
- 0x54, 0x68, 0xa5, 0x4d, 0xef, 0x4c, 0xcf, 0x7a, 0xff, 0x85, 0x60, 0x4f, 0x22, 0x94, 0xff, 0x9e,
- 0xf2, 0xd7, 0x85, 0xf0, 0x0c, 0xd5, 0x59, 0xba, 0xfb, 0xaa, 0xa7, 0x7b, 0xa4, 0xdf, 0x44, 0x5e,
- 0x0e, 0x84, 0x8c, 0x71, 0xcd, 0x85, 0x34, 0x60, 0x87, 0x19, 0x28, 0x54, 0x64, 0x50, 0x8b, 0xae,
- 0xbf, 0x85, 0x67, 0xcc, 0xc1, 0x78, 0x2a, 0x21, 0x59, 0x43, 0x5e, 0x73, 0x66, 0xdc, 0xf2, 0x60,
- 0xd3, 0xff, 0x3e, 0x82, 0xbd, 0x11, 0x96, 0x3e, 0x2f, 0xcb, 0xad, 0xbb, 0x6b, 0xa1, 0x21, 0xde,
- 0x0f, 0x9b, 0x6b, 0xa4, 0x61, 0xba, 0xa6, 0x6d, 0x15, 0xad, 0x7a, 0xb5, 0x44, 0x6a, 0x14, 0xe7,
- 0x88, 0xb6, 0x49, 0x2c, 0x5f, 0xa4, 0xab, 0x91, 0x8d, 0x9c, 0xd0, 0x48, 0x74, 0x23, 0xc7, 0xfb,
- 0x0b, 0x02, 0x39, 0x0d, 0x2f, 0x0f, 0xcc, 0x8b, 0xb0, 0xd9, 0x10, 0x6f, 0x22, 0x01, 0x19, 0x57,
- 0xd8, 0xf7, 0x83, 0x22, 0xbe, 0x1f, 0x94, 0xd3, 0xd6, 0x3d, 0x6d, 0x93, 0x11, 0x71, 0x83, 0x77,
- 0xc1, 0x28, 0x0f, 0x66, 0xc0, 0x2a, 0xcb, 0x16, 0x66, 0xcb, 0xad, 0x78, 0x0c, 0xa7, 0xc5, 0x63,
- 0xa4, 0xb7, 0x78, 0xd4, 0x60, 0x92, 0xd2, 0xbb, 0xac, 0x1b, 0xf3, 0xc4, 0x3b, 0x6b, 0x57, 0xab,
- 0xa6, 0x57, 0x25, 0x96, 0xd7, 0x6f, 0x24, 0x24, 0xc8, 0xba, 0xbe, 0x0b, 0xcb, 0x20, 0x3c, 0x04,
- 0xc1, 0xb3, 0xfc, 0x15, 0x82, 0xdd, 0x09, 0x87, 0x72, 0x39, 0x69, 0xf1, 0x12, 0xab, 0xf4, 0xe0,
- 0x8d, 0x5a, 0x68, 0x65, 0xb0, 0x57, 0xf4, 0xeb, 0x24, 0x78, 0x6e, 0xbf, 0xa2, 0x44, 0x6b, 0xee,
- 0x70, 0xcf, 0x35, 0xf7, 0x89, 0x28, 0xff, 0x31, 0x08, 0x83, 0x92, 0x3b, 0xd6, 0xd2, 0x4b, 0x54,
- 0x5d, 0x39, 0xa1, 0xea, 0x32, 0x37, 0xec, 0x46, 0x87, 0xcd, 0x9e, 0x8e, 0x92, 0x6b, 0xc3, 0xce,
- 0x10, 0x59, 0x8d, 0x18, 0xc4, 0x74, 0x06, 0x7a, 0x3f, 0x3f, 0x47, 0x20, 0xc5, 0x9d, 0xc8, 0xa5,
- 0x95, 0x20, 0x5b, 0xf3, 0x97, 0x1a, 0x84, 0xf9, 0xcd, 0x6a, 0xc1, 0xf3, 0x60, 0x73, 0xf5, 0x0e,
- 0x2f, 0x9d, 0x0c, 0xd6, 0x69, 0x63, 0xde, 0xb2, 0xef, 0x2c, 0x90, 0x72, 0x85, 0x0c, 0x3a, 0x61,
- 0xbf, 0x13, 0x45, 0x30, 0xe1, 0x64, 0x2e, 0xcc, 0x34, 0x6c, 0xd6, 0xa3, 0xaf, 0x78, 0xea, 0xb6,
- 0x2f, 0x0f, 0x36, 0x7f, 0xbf, 0x49, 0x45, 0xfb, 0xd4, 0x24, 0xf1, 0x3f, 0x08, 0xf6, 0xa5, 0xc2,
- 0xe4, 0xaa, 0x5e, 0x84, 0x2d, 0x6d, 0xf2, 0x75, 0x93, 0xce, 0x1d, 0xb6, 0x4f, 0x47, 0x4e, 0x7f,
- 0x29, 0x6a, 0xec, 0x35, 0x4b, 0xe4, 0x0e, 0x43, 0xdd, 0x77, 0x78, 0x4e, 0xc1, 0x2e, 0x87, 0x7a,
- 0x2a, 0xb6, 0x0a, 0x59, 0x51, 0xdc, 0x64, 0x77, 0x62, 0x78, 0x6a, 0x78, 0x7a, 0x44, 0xdb, 0xe9,
- 0xb4, 0x15, 0xce, 0xab, 0x62, 0x83, 0xfc, 0x36, 0x2f, 0xad, 0x31, 0xc0, 0x78, 0x40, 0x26, 0x61,
- 0xb4, 0xe5, 0x0f, 0x51, 0x7f, 0xad, 0x85, 0x90, 0x2a, 0x99, 0xae, 0x55, 0xf9, 0x40, 0x14, 0x9e,
- 0xd6, 0xe1, 0xa7, 0x8d, 0xf9, 0xbe, 0x25, 0x39, 0x0c, 0xe3, 0x5c, 0x12, 0xdd, 0x98, 0xef, 0xd0,
- 0x02, 0x3b, 0xe2, 0xfe, 0xb5, 0x44, 0xb8, 0x03, 0xbb, 0x62, 0x71, 0x0c, 0x5c, 0x81, 0x1b, 0xbc,
- 0x07, 0xbe, 0x48, 0xee, 0x06, 0x31, 0xd1, 0x18, 0x84, 0x7e, 0xfb, 0xeb, 0xef, 0x11, 0x4c, 0x25,
- 0xfb, 0xe6, 0xcc, 0x66, 0x20, 0x67, 0x91, 0xbb, 0xad, 0x0b, 0x53, 0xe4, 0xfc, 0xe9, 0x51, 0x23,
- 0xda, 0x36, 0xab, 0xd3, 0x76, 0xa0, 0xc5, 0x6c, 0xe6, 0x87, 0xed, 0xb0, 0x8e, 0xa2, 0xc6, 0xdf,
- 0x22, 0xd8, 0xc0, 0x9b, 0x50, 0x7c, 0x20, 0x21, 0xf3, 0x63, 0x7e, 0x58, 0x90, 0x0e, 0xae, 0x6a,
- 0x2f, 0xe3, 0x2f, 0x9f, 0x79, 0xff, 0xd1, 0x1f, 0x9f, 0x65, 0x5e, 0xc0, 0x27, 0x55, 0xb3, 0x64,
- 0x24, 0xfd, 0x2e, 0xe2, 0xaa, 0x8b, 0x2d, 0xa1, 0x9b, 0xaa, 0x2f, 0xbf, 0xab, 0x2e, 0xf2, 0xa0,
- 0x34, 0xf1, 0x27, 0x08, 0xb2, 0x62, 0x04, 0xc4, 0xab, 0x39, 0x5d, 0x5c, 0x70, 0xe9, 0xd9, 0xd5,
- 0x6d, 0xe6, 0x58, 0xff, 0x4f, 0xb1, 0xee, 0xc1, 0xbb, 0x53, 0xb1, 0xe2, 0x1f, 0x11, 0xe0, 0xce,
- 0xd9, 0x14, 0x1f, 0x4b, 0x3d, 0x2b, 0x69, 0xac, 0x96, 0x8e, 0x77, 0x6b, 0xc6, 0xc1, 0x9e, 0xa2,
- 0x60, 0x4f, 0xe0, 0xe3, 0xf1, 0x60, 0x03, 0x43, 0x5f, 0xdb, 0xe0, 0xa1, 0xd9, 0x62, 0xf1, 0x93,
- 0xcf, 0xa2, 0x63, 0x30, 0x5c, 0x81, 0x45, 0xd2, 0x8c, 0xba, 0x02, 0x8b, 0xc4, 0xf9, 0x53, 0xbe,
- 0x44, 0x59, 0xcc, 0xe2, 0x73, 0xbd, 0x5f, 0x0f, 0x35, 0x3c, 0xb5, 0xe2, 0x2f, 0x32, 0x90, 0x8b,
- 0x9d, 0xac, 0xf0, 0x89, 0xd5, 0x40, 0x8c, 0x1b, 0x1e, 0xa5, 0xe7, 0x7b, 0xb0, 0xe4, 0xfc, 0x3e,
- 0x44, 0x94, 0xe0, 0x7b, 0x08, 0xbf, 0xdb, 0x0f, 0xc3, 0xe8, 0x24, 0xa8, 0x8a, 0x91, 0x52, 0x5d,
- 0x6c, 0x1b, 0x4e, 0x9b, 0x2a, 0x2b, 0x0e, 0xa1, 0x17, 0x6c, 0xa1, 0x89, 0x7f, 0x43, 0xb0, 0xa5,
- 0xbd, 0xbb, 0xc7, 0x47, 0xd2, 0x98, 0x25, 0x4c, 0x70, 0xd2, 0xd1, 0xee, 0x8c, 0xb8, 0x12, 0x6f,
- 0x51, 0x21, 0x6e, 0xe2, 0xeb, 0x7d, 0xe8, 0xd0, 0xf1, 0x3d, 0xec, 0xaa, 0x8b, 0xa2, 0xb0, 0x36,
- 0xf1, 0xcf, 0x08, 0xb6, 0x76, 0xcc, 0x2f, 0xb8, 0x2b, 0xb4, 0x41, 0x56, 0x1e, 0xeb, 0xd2, 0x8a,
- 0x93, 0xbc, 0x46, 0x49, 0x5e, 0xc2, 0x17, 0xd6, 0x94, 0x24, 0x7e, 0x84, 0xe0, 0x7f, 0x91, 0xd1,
- 0x01, 0x1f, 0x5e, 0x19, 0x5f, 0x74, 0xae, 0x91, 0x0a, 0x5d, 0x58, 0x70, 0x36, 0x6f, 0x50, 0x36,
- 0xaf, 0xe3, 0x6b, 0xfd, 0xb3, 0xa9, 0x31, 0xd7, 0x91, 0x78, 0xfd, 0x89, 0x20, 0x17, 0xdb, 0xaa,
- 0xa6, 0xa7, 0x6a, 0xda, 0xb0, 0x92, 0x9e, 0xaa, 0xa9, 0xc3, 0x86, 0x7c, 0x83, 0xb2, 0xbd, 0x8a,
- 0xaf, 0xf4, 0xcf, 0x56, 0x37, 0xe6, 0x23, 0x4c, 0x9f, 0x20, 0xd8, 0x1e, 0xdf, 0x94, 0xe3, 0xee,
- 0x01, 0x07, 0x77, 0xf4, 0x64, 0x2f, 0xa6, 0x9c, 0xec, 0x4d, 0x4a, 0xf6, 0x35, 0xac, 0xad, 0x09,
- 0xd9, 0x28, 0xa5, 0x8f, 0x33, 0xb0, 0xb5, 0xa3, 0xd9, 0x4d, 0xcf, 0xc3, 0xa4, 0xa6, 0x3d, 0x3d,
- 0x0f, 0x13, 0x3b, 0xea, 0x35, 0x2a, 0xbb, 0x71, 0xe5, 0x26, 0x65, 0x14, 0x68, 0xaa, 0xf5, 0x00,
- 0x50, 0xd1, 0xe1, 0xb4, 0xff, 0x46, 0xb0, 0x29, 0xda, 0xf4, 0xe2, 0xc2, 0xea, 0x38, 0x85, 0x1a,
- 0x75, 0x69, 0xa6, 0x1b, 0x13, 0xae, 0xc1, 0x3b, 0x54, 0x82, 0x06, 0xf6, 0x06, 0xa3, 0x40, 0xa4,
- 0xf3, 0x8f, 0x50, 0xf7, 0x6f, 0x3f, 0x7e, 0x8c, 0x60, 0x5b, 0x4c, 0x5f, 0x8c, 0x53, 0x1b, 0x85,
- 0xe4, 0x26, 0x5d, 0x7a, 0xae, 0x6b, 0x3b, 0x2e, 0xc3, 0x65, 0x2a, 0xc3, 0xab, 0xf8, 0x7c, 0x1f,
- 0x32, 0x44, 0x3a, 0xf8, 0x33, 0xe7, 0x1f, 0x2c, 0xe5, 0xd1, 0xc3, 0xa5, 0x3c, 0xfa, 0x7d, 0x29,
- 0x8f, 0x3e, 0x5d, 0xce, 0x0f, 0x3d, 0x5c, 0xce, 0x0f, 0x3d, 0x5e, 0xce, 0x0f, 0xdd, 0x54, 0x2a,
- 0xa6, 0x37, 0x57, 0x2f, 0x29, 0x86, 0x5d, 0x55, 0xf9, 0x9f, 0x86, 0x66, 0xc9, 0x38, 0x24, 0xfe,
- 0x10, 0x3c, 0x7c, 0xf4, 0x90, 0x38, 0xda, 0xbb, 0xe7, 0x10, 0xb7, 0xb4, 0x9e, 0xfe, 0x86, 0x7b,
- 0xe4, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x58, 0xb6, 0x16, 0xbf, 0x1c, 0x00, 0x00,
+ // 1495 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdf, 0x6f, 0x14, 0xd5,
+ 0x17, 0xef, 0xdd, 0x16, 0x68, 0x4f, 0xf9, 0xf2, 0xe3, 0xd2, 0x42, 0x19, 0xca, 0x52, 0x96, 0x7c,
+ 0xa5, 0x01, 0x99, 0x61, 0xcb, 0x0f, 0x91, 0x28, 0x09, 0xa0, 0x42, 0x4d, 0x80, 0x32, 0x48, 0x04,
+ 0x12, 0x5d, 0x67, 0x67, 0x2f, 0xdb, 0x49, 0xbb, 0x73, 0x87, 0x9d, 0xd9, 0x05, 0xac, 0x6b, 0x8c,
+ 0x0f, 0x6a, 0x8c, 0x0f, 0x26, 0x46, 0x7d, 0x31, 0xf1, 0xc5, 0xf8, 0x62, 0x78, 0xf2, 0x0f, 0xf0,
+ 0xc1, 0x17, 0x1e, 0x49, 0x30, 0x91, 0x17, 0x7f, 0xa4, 0x35, 0xf1, 0x01, 0x5f, 0x7d, 0xf1, 0xc9,
+ 0xcc, 0xfd, 0x31, 0x3b, 0xb3, 0x3b, 0x33, 0xed, 0x76, 0xbb, 0x49, 0xe3, 0xdb, 0xcc, 0x9d, 0x7b,
+ 0xce, 0xfd, 0x7c, 0x3e, 0xe7, 0x9e, 0xd3, 0x73, 0xb6, 0xb0, 0xdf, 0x2a, 0x9a, 0x65, 0xaa, 0x99,
+ 0xb4, 0x4a, 0x34, 0x73, 0xd6, 0xb0, 0x6d, 0x32, 0xaf, 0xd5, 0xf3, 0xda, 0x9d, 0x1a, 0xa9, 0xde,
+ 0x57, 0x9d, 0x2a, 0xf5, 0x28, 0x1e, 0x65, 0x5b, 0x54, 0x7f, 0x8b, 0x2a, 0xb6, 0xa8, 0xf5, 0xbc,
+ 0x12, 0xb1, 0x9c, 0xb7, 0x88, 0xed, 0xf9, 0x86, 0xfc, 0x89, 0x5b, 0x2a, 0x87, 0x4c, 0xea, 0x56,
+ 0xa8, 0xab, 0x15, 0x0d, 0x97, 0x70, 0x97, 0x5a, 0x3d, 0x5f, 0x24, 0x9e, 0x91, 0xd7, 0x1c, 0xa3,
+ 0x6c, 0xd9, 0x86, 0x67, 0x51, 0x5b, 0xec, 0x3d, 0x10, 0x0f, 0x44, 0x1e, 0xc8, 0x37, 0x8d, 0x97,
+ 0x29, 0x2d, 0xcf, 0x13, 0xcd, 0x70, 0x2c, 0xcd, 0xb0, 0x6d, 0xea, 0x31, 0x0f, 0xae, 0xf8, 0xba,
+ 0x5b, 0x7c, 0x65, 0x6f, 0xc5, 0xda, 0x6d, 0xcd, 0xb0, 0x05, 0x07, 0x65, 0xa4, 0x4c, 0xcb, 0x94,
+ 0x3d, 0x6a, 0xfe, 0x13, 0x5f, 0xcd, 0x5d, 0x82, 0x1d, 0x57, 0x7d, 0x54, 0xe7, 0xf9, 0x21, 0x3a,
+ 0xb9, 0x53, 0x23, 0xae, 0x87, 0x77, 0xc1, 0x26, 0x87, 0x56, 0xbd, 0x82, 0x55, 0x1a, 0x43, 0x13,
+ 0x68, 0x72, 0x48, 0xdf, 0xe8, 0xbf, 0x4e, 0x97, 0xf0, 0x5e, 0x00, 0x81, 0xc7, 0xff, 0x96, 0x61,
+ 0xdf, 0x86, 0xc4, 0xca, 0x74, 0x29, 0xf7, 0x00, 0xc1, 0x48, 0xd4, 0x9f, 0xeb, 0x50, 0xdb, 0x25,
+ 0xf8, 0x14, 0x6c, 0x12, 0xbb, 0x98, 0xc3, 0xe1, 0xa9, 0xac, 0x1a, 0xab, 0xa9, 0x2a, 0x0d, 0xe5,
+ 0x76, 0x3c, 0x02, 0x1b, 0x9c, 0x2a, 0xa5, 0xb7, 0xd9, 0x61, 0x9b, 0x75, 0xfe, 0x82, 0x5f, 0x86,
+ 0xcd, 0xec, 0xa1, 0x30, 0x4b, 0xac, 0xf2, 0xac, 0x37, 0xd6, 0xcf, 0x9c, 0x8e, 0x47, 0x9c, 0xf2,
+ 0x38, 0xd4, 0xf3, 0xea, 0x45, 0xb6, 0xe7, 0xdc, 0xc0, 0xc3, 0x5f, 0xf7, 0xf5, 0xe9, 0xc3, 0xcc,
+ 0x8e, 0x2f, 0xe5, 0xde, 0x8c, 0xc2, 0x75, 0x25, 0xff, 0x57, 0x00, 0x9a, 0xe1, 0x11, 0x88, 0x9f,
+ 0x51, 0x79, 0x2c, 0x55, 0x3f, 0x96, 0x2a, 0xbf, 0x1e, 0x22, 0x96, 0xea, 0x8c, 0x51, 0x26, 0xc2,
+ 0x56, 0x0f, 0x59, 0xe6, 0x16, 0x11, 0x8c, 0xb6, 0x1c, 0x20, 0x04, 0x79, 0x09, 0x06, 0x05, 0x43,
+ 0x77, 0x0c, 0x4d, 0xf4, 0x4f, 0x0e, 0x4f, 0x4d, 0x26, 0x28, 0x32, 0x5d, 0x22, 0xb6, 0x67, 0xdd,
+ 0xb6, 0x48, 0x49, 0x6a, 0x13, 0x58, 0xe2, 0x0b, 0x11, 0x9c, 0x19, 0x86, 0xf3, 0xe0, 0xb2, 0x38,
+ 0x39, 0x84, 0x30, 0x50, 0x7c, 0x1a, 0x36, 0x76, 0xac, 0xa4, 0xb0, 0xc8, 0x7d, 0x84, 0x20, 0xcb,
+ 0x49, 0x52, 0xdb, 0x26, 0xa6, 0xef, 0xaf, 0x55, 0xcf, 0x2c, 0x80, 0x19, 0x7c, 0x14, 0x57, 0x2a,
+ 0xb4, 0xd2, 0xa2, 0x77, 0x66, 0xd5, 0x7a, 0xff, 0x85, 0x60, 0x5f, 0x22, 0x94, 0xff, 0x9e, 0xf2,
+ 0x37, 0xa4, 0xf0, 0x1c, 0xd5, 0x79, 0xb6, 0xfb, 0x9a, 0x67, 0x78, 0xa4, 0xdb, 0x44, 0x5e, 0x0a,
+ 0x84, 0x8c, 0x71, 0x2d, 0x84, 0x34, 0x61, 0x97, 0x15, 0x28, 0x54, 0xe0, 0x50, 0x0b, 0xae, 0xbf,
+ 0x45, 0x64, 0xcc, 0xe1, 0x78, 0x2a, 0x21, 0x59, 0x43, 0x5e, 0x47, 0xad, 0xb8, 0xe5, 0xde, 0xa6,
+ 0xff, 0x03, 0x04, 0xfb, 0x23, 0x2c, 0x7d, 0x5e, 0xb6, 0x5b, 0x73, 0xd7, 0x42, 0x43, 0x7c, 0x10,
+ 0xb6, 0x56, 0x49, 0xdd, 0x72, 0x2d, 0x6a, 0x17, 0xec, 0x5a, 0xa5, 0x48, 0xaa, 0x0c, 0xe7, 0x80,
+ 0xbe, 0x45, 0x2e, 0x5f, 0x66, 0xab, 0x91, 0x8d, 0x82, 0xd0, 0x40, 0x74, 0xa3, 0xc0, 0xfb, 0x0b,
+ 0x82, 0x5c, 0x1a, 0x5e, 0x11, 0x98, 0x17, 0x61, 0xab, 0x29, 0xbf, 0x44, 0x02, 0x32, 0xa2, 0xf2,
+ 0xbf, 0x0f, 0xaa, 0xfc, 0xfb, 0xa0, 0x9e, 0xb5, 0xef, 0xeb, 0x5b, 0xcc, 0x88, 0x1b, 0xbc, 0x07,
+ 0x86, 0x44, 0x30, 0x03, 0x56, 0x83, 0x7c, 0x61, 0xba, 0xd4, 0x8c, 0x47, 0x7f, 0x5a, 0x3c, 0x06,
+ 0x56, 0x17, 0x8f, 0x2a, 0x8c, 0x33, 0x7a, 0x33, 0x86, 0x39, 0x47, 0xbc, 0xf3, 0xb4, 0x52, 0xb1,
+ 0xbc, 0x0a, 0xb1, 0xbd, 0x6e, 0x23, 0xa1, 0xc0, 0xa0, 0xeb, 0xbb, 0xb0, 0x4d, 0x22, 0x42, 0x10,
+ 0xbc, 0xe7, 0xbe, 0x42, 0xb0, 0x37, 0xe1, 0x50, 0x21, 0x27, 0x2b, 0x5e, 0x72, 0x95, 0x1d, 0xbc,
+ 0x59, 0x0f, 0xad, 0xf4, 0xf6, 0x8a, 0x7e, 0x9d, 0x04, 0xcf, 0xed, 0x56, 0x94, 0x68, 0xcd, 0xed,
+ 0x5f, 0x75, 0xcd, 0x7d, 0x2a, 0xcb, 0x7f, 0x0c, 0xc2, 0xa0, 0xe4, 0x0e, 0x37, 0xf5, 0x92, 0x55,
+ 0x37, 0x97, 0x50, 0x75, 0xb9, 0x1b, 0x7e, 0xa3, 0xc3, 0x66, 0xeb, 0xa3, 0xe4, 0x52, 0xd8, 0x1d,
+ 0x22, 0xab, 0x13, 0x93, 0x58, 0x4e, 0x4f, 0xef, 0xe7, 0xe7, 0x08, 0x94, 0xb8, 0x13, 0x85, 0xb4,
+ 0x0a, 0x0c, 0x56, 0xfd, 0xa5, 0x3a, 0xe1, 0x7e, 0x07, 0xf5, 0xe0, 0xbd, 0xb7, 0xb9, 0x7a, 0x57,
+ 0x94, 0x4e, 0x0e, 0xeb, 0xac, 0x39, 0x67, 0xd3, 0xbb, 0xf3, 0xa4, 0x54, 0x26, 0xbd, 0x4e, 0xd8,
+ 0xef, 0x64, 0x11, 0x4c, 0x38, 0x59, 0x08, 0x33, 0x09, 0x5b, 0x8d, 0xe8, 0x27, 0x91, 0xba, 0xad,
+ 0xcb, 0xbd, 0xcd, 0xdf, 0x6f, 0x52, 0xd1, 0xae, 0x9b, 0x24, 0xfe, 0x07, 0xc1, 0x81, 0x54, 0x98,
+ 0x42, 0xd5, 0xcb, 0xb0, 0xad, 0x45, 0xbe, 0x4e, 0xd2, 0xb9, 0xcd, 0x76, 0x7d, 0xe4, 0xf4, 0x97,
+ 0xb2, 0xc6, 0x5e, 0xb7, 0x65, 0xee, 0x70, 0xd4, 0x5d, 0x87, 0xe7, 0x0c, 0xec, 0x71, 0x98, 0xa7,
+ 0x42, 0xb3, 0x90, 0x15, 0xe4, 0x4d, 0x76, 0xc7, 0xfa, 0x27, 0xfa, 0x27, 0x07, 0xf4, 0xdd, 0x4e,
+ 0x4b, 0xe1, 0xbc, 0x26, 0x37, 0xe4, 0xde, 0x16, 0xa5, 0x35, 0x06, 0x98, 0x08, 0xc8, 0x38, 0x0c,
+ 0x35, 0xfd, 0x21, 0xe6, 0xaf, 0xb9, 0x10, 0x52, 0x25, 0xd3, 0xb1, 0x2a, 0x1f, 0xc8, 0xc2, 0xd3,
+ 0x3c, 0xfc, 0xac, 0x39, 0xd7, 0xb5, 0x24, 0x47, 0x61, 0x44, 0x48, 0x62, 0x98, 0x73, 0x6d, 0x5a,
+ 0x60, 0x47, 0xde, 0xbf, 0xa6, 0x08, 0x77, 0x61, 0x4f, 0x2c, 0x8e, 0x9e, 0x2b, 0x70, 0x53, 0xf4,
+ 0xc0, 0x97, 0xc9, 0xbd, 0x20, 0x26, 0x3a, 0x87, 0xd0, 0x6d, 0x7f, 0xfd, 0x3d, 0x82, 0x89, 0x64,
+ 0xdf, 0x82, 0xd9, 0x14, 0x8c, 0xda, 0xe4, 0x5e, 0xf3, 0xc2, 0x14, 0x04, 0x7f, 0x76, 0xd4, 0x80,
+ 0xbe, 0xc3, 0x6e, 0xb7, 0xed, 0x69, 0x31, 0x9b, 0xfa, 0x61, 0x27, 0x6c, 0x60, 0xa8, 0xf1, 0xb7,
+ 0x08, 0x36, 0x89, 0x26, 0x14, 0x1f, 0x4a, 0xc8, 0xfc, 0x98, 0x1f, 0x16, 0x94, 0xc3, 0x2b, 0xda,
+ 0xcb, 0xf9, 0xe7, 0xce, 0xbd, 0xff, 0xf8, 0x8f, 0xcf, 0x32, 0x2f, 0xe0, 0xd3, 0x9a, 0x55, 0x34,
+ 0x93, 0x7e, 0x17, 0x71, 0xb5, 0x85, 0xa6, 0xd0, 0x0d, 0xcd, 0x97, 0xdf, 0xd5, 0x16, 0x44, 0x50,
+ 0x1a, 0xf8, 0x13, 0x04, 0x83, 0x72, 0x04, 0xc4, 0x2b, 0x39, 0x5d, 0x5e, 0x70, 0xe5, 0xd9, 0x95,
+ 0x6d, 0x16, 0x58, 0xff, 0xcf, 0xb0, 0xee, 0xc3, 0x7b, 0x53, 0xb1, 0xe2, 0x1f, 0x11, 0xe0, 0xf6,
+ 0xd9, 0x14, 0x9f, 0x48, 0x3d, 0x2b, 0x69, 0xac, 0x56, 0x4e, 0x76, 0x6a, 0x26, 0xc0, 0x9e, 0x61,
+ 0x60, 0x4f, 0xe1, 0x93, 0xf1, 0x60, 0x03, 0x43, 0x5f, 0xdb, 0xe0, 0xa5, 0xd1, 0x64, 0xf1, 0x93,
+ 0xcf, 0xa2, 0x6d, 0x30, 0x5c, 0x86, 0x45, 0xd2, 0x8c, 0xba, 0x0c, 0x8b, 0xc4, 0xf9, 0x33, 0x77,
+ 0x85, 0xb1, 0x98, 0xc6, 0x17, 0x56, 0x7f, 0x3d, 0xb4, 0xf0, 0xd4, 0x8a, 0xbf, 0xc8, 0xc0, 0x68,
+ 0xec, 0x64, 0x85, 0x4f, 0xad, 0x04, 0x62, 0xdc, 0xf0, 0xa8, 0x3c, 0xbf, 0x0a, 0x4b, 0xc1, 0xef,
+ 0x43, 0xc4, 0x08, 0xbe, 0x87, 0xf0, 0xbb, 0xdd, 0x30, 0x8c, 0x4e, 0x82, 0x9a, 0x1c, 0x29, 0xb5,
+ 0x85, 0x96, 0xe1, 0xb4, 0xa1, 0xf1, 0xe2, 0x10, 0xfa, 0xc0, 0x17, 0x1a, 0xf8, 0x37, 0x04, 0xdb,
+ 0x5a, 0xbb, 0x7b, 0x7c, 0x2c, 0x8d, 0x59, 0xc2, 0x04, 0xa7, 0x1c, 0xef, 0xcc, 0x48, 0x28, 0xf1,
+ 0x16, 0x13, 0xe2, 0x16, 0xbe, 0xd1, 0x85, 0x0e, 0x6d, 0x7f, 0x87, 0x5d, 0x6d, 0x41, 0x16, 0xd6,
+ 0x06, 0xfe, 0x19, 0xc1, 0xf6, 0xb6, 0xf9, 0x05, 0x77, 0x84, 0x36, 0xc8, 0xca, 0x13, 0x1d, 0x5a,
+ 0x09, 0x92, 0xd7, 0x19, 0xc9, 0x2b, 0xf8, 0xd2, 0x9a, 0x92, 0xc4, 0x8f, 0x11, 0xfc, 0x2f, 0x32,
+ 0x3a, 0xe0, 0xa3, 0xcb, 0xe3, 0x8b, 0xce, 0x35, 0x4a, 0xbe, 0x03, 0x0b, 0xc1, 0xe6, 0x0d, 0xc6,
+ 0xe6, 0x75, 0x7c, 0xbd, 0x7b, 0x36, 0x55, 0xee, 0x3a, 0x12, 0xaf, 0x3f, 0x11, 0x8c, 0xc6, 0xb6,
+ 0xaa, 0xe9, 0xa9, 0x9a, 0x36, 0xac, 0xa4, 0xa7, 0x6a, 0xea, 0xb0, 0x91, 0xbb, 0xc9, 0xd8, 0x5e,
+ 0xc3, 0x57, 0xbb, 0x67, 0x6b, 0x98, 0x73, 0x11, 0xa6, 0x4f, 0x11, 0xec, 0x8c, 0x6f, 0xca, 0x71,
+ 0xe7, 0x80, 0x83, 0x3b, 0x7a, 0x7a, 0x35, 0xa6, 0x82, 0xec, 0x2d, 0x46, 0xf6, 0x35, 0xac, 0xaf,
+ 0x09, 0xd9, 0x28, 0xa5, 0x8f, 0x33, 0xb0, 0xbd, 0xad, 0xd9, 0x4d, 0xcf, 0xc3, 0xa4, 0xa6, 0x3d,
+ 0x3d, 0x0f, 0x13, 0x3b, 0xea, 0x35, 0x2a, 0xbb, 0x71, 0xe5, 0x26, 0x65, 0x14, 0x68, 0x68, 0xb5,
+ 0x00, 0x50, 0xc1, 0x11, 0xb4, 0xff, 0x46, 0xb0, 0x25, 0xda, 0xf4, 0xe2, 0xfc, 0xca, 0x38, 0x85,
+ 0x1a, 0x75, 0x65, 0xaa, 0x13, 0x13, 0xa1, 0xc1, 0x3b, 0x4c, 0x82, 0x3a, 0xf6, 0x7a, 0xa3, 0x40,
+ 0xa4, 0xf3, 0x8f, 0x50, 0xf7, 0x6f, 0x3f, 0x7e, 0x82, 0x60, 0x47, 0x4c, 0x5f, 0x8c, 0x53, 0x1b,
+ 0x85, 0xe4, 0x26, 0x5d, 0x79, 0xae, 0x63, 0x3b, 0x21, 0xc3, 0x0c, 0x93, 0xe1, 0x55, 0x7c, 0xb1,
+ 0x0b, 0x19, 0x22, 0x1d, 0xfc, 0xb9, 0x99, 0x87, 0x8b, 0x59, 0xf4, 0x68, 0x31, 0x8b, 0x7e, 0x5f,
+ 0xcc, 0xa2, 0x4f, 0x97, 0xb2, 0x7d, 0x8f, 0x96, 0xb2, 0x7d, 0x4f, 0x96, 0xb2, 0x7d, 0xb7, 0x4e,
+ 0x96, 0x2d, 0x6f, 0xb6, 0x56, 0x54, 0x4d, 0x5a, 0xd1, 0xc4, 0x3f, 0x0d, 0xad, 0xa2, 0x79, 0xa4,
+ 0x4c, 0xb5, 0x0a, 0x2d, 0xd5, 0xe6, 0x89, 0xcb, 0xcf, 0x3f, 0x7a, 0xfc, 0x88, 0x84, 0xe0, 0xdd,
+ 0x77, 0x88, 0x5b, 0xdc, 0xc8, 0x7e, 0xcb, 0x3d, 0xf6, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54,
+ 0x68, 0xbe, 0xa1, 0xc7, 0x1c, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/04-channel/types/query.pb.gw.go b/modules/core/04-channel/types/query.pb.gw.go
similarity index 100%
rename from core/04-channel/types/query.pb.gw.go
rename to modules/core/04-channel/types/query.pb.gw.go
diff --git a/core/04-channel/types/tx.pb.go b/modules/core/04-channel/types/tx.pb.go
similarity index 95%
rename from core/04-channel/types/tx.pb.go
rename to modules/core/04-channel/types/tx.pb.go
index 9b8976ec..70131bfe 100644
--- a/core/04-channel/types/tx.pb.go
+++ b/modules/core/04-channel/types/tx.pb.go
@@ -6,7 +6,7 @@ package types
import (
context "context"
fmt "fmt"
- types "github.com/cosmos/ibc-go/core/02-client/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
grpc1 "github.com/gogo/protobuf/grpc"
proto "github.com/gogo/protobuf/proto"
@@ -855,78 +855,78 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/channel/v1/tx.proto", fileDescriptor_4f707a6c6f551009) }
var fileDescriptor_4f707a6c6f551009 = []byte{
- // 1126 bytes of a gzipped FileDescriptorProto
+ // 1134 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6e, 0xe3, 0x54,
0x14, 0xce, 0x5f, 0xd3, 0xf6, 0xb4, 0x4c, 0x5b, 0xa7, 0x3f, 0x19, 0x67, 0x6a, 0x77, 0x0c, 0x8b,
- 0x0c, 0x4c, 0x93, 0x49, 0x29, 0x42, 0x1a, 0x24, 0xa4, 0xa4, 0x12, 0x9a, 0x11, 0x2a, 0x83, 0x4c,
- 0x01, 0x69, 0x84, 0x14, 0xd2, 0x9b, 0x3b, 0xae, 0x95, 0xc4, 0x37, 0xd8, 0x4e, 0x68, 0xc4, 0x0b,
- 0xb0, 0x64, 0xc1, 0x8a, 0x05, 0x1a, 0x89, 0x35, 0x0b, 0x24, 0x1e, 0x62, 0x96, 0xb3, 0xe3, 0x67,
- 0x61, 0xa1, 0x76, 0xc3, 0xda, 0x4f, 0x80, 0x7c, 0x7d, 0xed, 0x38, 0x89, 0xdd, 0x3a, 0x1d, 0xd2,
- 0xe9, 0xce, 0x3e, 0xe7, 0xbb, 0xe7, 0x9c, 0xfb, 0x7d, 0xc7, 0xc7, 0xd7, 0x06, 0x41, 0x3d, 0x46,
- 0x0a, 0x29, 0x23, 0xa2, 0xe3, 0x32, 0x3a, 0x69, 0x68, 0x1a, 0x6e, 0x97, 0xfb, 0x95, 0xb2, 0x79,
- 0x5a, 0xea, 0xea, 0xc4, 0x24, 0xdc, 0x06, 0xf5, 0x97, 0x1c, 0x7f, 0x89, 0xf9, 0x4b, 0xfd, 0x0a,
- 0xbf, 0xae, 0x10, 0x85, 0x50, 0x44, 0xd9, 0xb9, 0x72, 0xc1, 0xfc, 0xdd, 0x60, 0xb0, 0xb6, 0x8a,
- 0x35, 0xd3, 0x89, 0xe5, 0x5e, 0x31, 0xc8, 0x9b, 0xe1, 0xf9, 0xbc, 0xd0, 0x14, 0x24, 0xfd, 0x92,
- 0x04, 0xee, 0xd0, 0x50, 0x0e, 0x5c, 0xe3, 0x93, 0x2e, 0xd6, 0x1e, 0x6b, 0xaa, 0xc9, 0xbd, 0x03,
- 0xf3, 0x5d, 0xa2, 0x9b, 0x75, 0xb5, 0x99, 0x4f, 0xee, 0x24, 0x8b, 0x8b, 0x35, 0xce, 0xb6, 0xc4,
- 0x5b, 0x83, 0x46, 0xa7, 0xfd, 0x50, 0x62, 0x0e, 0x49, 0xce, 0x3a, 0x57, 0x8f, 0x9b, 0xdc, 0x87,
- 0x30, 0xcf, 0x82, 0xe6, 0x53, 0x3b, 0xc9, 0xe2, 0xd2, 0x9e, 0x50, 0x0a, 0xdd, 0x4a, 0x89, 0x65,
- 0xa9, 0x65, 0x5e, 0x58, 0x62, 0x42, 0xf6, 0x16, 0x71, 0x9b, 0x90, 0x35, 0x54, 0x45, 0xc3, 0x7a,
- 0x3e, 0xed, 0xe4, 0x92, 0xd9, 0xdd, 0xc3, 0x85, 0xef, 0x9f, 0x8b, 0x89, 0x7f, 0x9f, 0x8b, 0x09,
- 0xe9, 0x0e, 0xf0, 0x93, 0x45, 0xca, 0xd8, 0xe8, 0x12, 0xcd, 0xc0, 0xd2, 0xdf, 0x69, 0x58, 0x1b,
- 0x75, 0x1f, 0xe9, 0x83, 0xe9, 0xb6, 0xf0, 0x09, 0xe4, 0xba, 0x3a, 0xee, 0xab, 0xa4, 0x67, 0xd4,
- 0x59, 0x59, 0xce, 0xc2, 0x14, 0x5d, 0x28, 0xd8, 0x96, 0xc8, 0xb3, 0x85, 0x93, 0x20, 0x49, 0x5e,
- 0xf3, 0xac, 0xac, 0x82, 0x51, 0x4a, 0xd2, 0x57, 0xa1, 0x44, 0x86, 0x75, 0x44, 0x7a, 0x9a, 0x89,
- 0xf5, 0x6e, 0x43, 0x37, 0x07, 0xf5, 0x3e, 0xd6, 0x0d, 0x95, 0x68, 0xf9, 0x0c, 0x2d, 0x48, 0xb4,
- 0x2d, 0xb1, 0xe0, 0x16, 0x14, 0x86, 0x92, 0xe4, 0x5c, 0xd0, 0xfc, 0x85, 0x6b, 0xe5, 0xf6, 0x01,
- 0xba, 0x3a, 0x21, 0xcf, 0xea, 0xaa, 0xa6, 0x9a, 0xf9, 0xb9, 0x9d, 0x64, 0x71, 0xb9, 0xb6, 0x61,
- 0x5b, 0xe2, 0x9a, 0xb7, 0x35, 0xcf, 0x27, 0xc9, 0x8b, 0xf4, 0x86, 0x76, 0xc2, 0x57, 0xb0, 0xec,
- 0x7a, 0x4e, 0xb0, 0xaa, 0x9c, 0x98, 0xf9, 0x2c, 0xdd, 0xce, 0x9d, 0x91, 0xed, 0xb8, 0x5d, 0xd7,
- 0xaf, 0x94, 0x1e, 0x51, 0x4c, 0xad, 0xe0, 0x6c, 0xc6, 0xb6, 0xc4, 0x5c, 0x30, 0xb2, 0xbb, 0x5e,
- 0x92, 0x97, 0xe8, 0xad, 0x8b, 0x0c, 0x48, 0x3f, 0x1f, 0x21, 0x7d, 0x01, 0x6e, 0x4f, 0x68, 0xeb,
- 0x2b, 0xff, 0xd7, 0x84, 0xf2, 0x55, 0xd4, 0x9a, 0x4e, 0xf9, 0x7d, 0x80, 0x09, 0xc1, 0x03, 0xac,
- 0x04, 0x75, 0x5e, 0x44, 0xbe, 0xbe, 0x4f, 0x61, 0x6b, 0x84, 0xf9, 0x40, 0x08, 0xda, 0xc3, 0x35,
- 0xc9, 0xb6, 0x44, 0x21, 0x44, 0xa2, 0x60, 0xbc, 0x8d, 0xa0, 0x67, 0xd8, 0x3b, 0xb3, 0xd0, 0xbe,
- 0x02, 0xae, 0xa4, 0x75, 0x53, 0x1f, 0x30, 0xe9, 0xd7, 0x6d, 0x4b, 0x5c, 0x0d, 0x0a, 0x64, 0xea,
- 0x03, 0x49, 0x5e, 0xa0, 0xd7, 0xce, 0xf3, 0x73, 0xe3, 0x84, 0xaf, 0xa2, 0x96, 0x2f, 0xfc, 0xaf,
- 0x29, 0xd8, 0x18, 0xf5, 0x1e, 0x10, 0xed, 0x99, 0xaa, 0x77, 0xae, 0x43, 0x7c, 0x9f, 0xcc, 0x06,
- 0x6a, 0x51, 0xb9, 0x43, 0xc8, 0x6c, 0xa0, 0x96, 0x47, 0xa6, 0xd3, 0x92, 0xe3, 0x64, 0x66, 0x66,
- 0x44, 0xe6, 0x5c, 0x04, 0x99, 0x22, 0x6c, 0x87, 0xd2, 0xe5, 0x13, 0xfa, 0x53, 0x12, 0x72, 0x43,
- 0xc4, 0x41, 0x9b, 0x18, 0x78, 0xfa, 0x17, 0xc1, 0xd5, 0xe8, 0xbc, 0x7c, 0xfc, 0x6f, 0x43, 0x21,
- 0xa4, 0x36, 0xbf, 0xf6, 0xdf, 0x52, 0xb0, 0x39, 0xe6, 0xbf, 0xc6, 0x6e, 0x18, 0x1d, 0xab, 0xe9,
- 0x2b, 0x8e, 0xd5, 0xeb, 0x6e, 0x88, 0x1d, 0x10, 0xc2, 0x29, 0xf3, 0x59, 0xfd, 0x31, 0x05, 0x6f,
- 0x1c, 0x1a, 0x8a, 0x8c, 0x51, 0xff, 0xd3, 0x06, 0x6a, 0x61, 0x93, 0xfb, 0x00, 0xb2, 0x5d, 0x7a,
- 0x45, 0xb9, 0x5c, 0xda, 0xdb, 0x8e, 0x78, 0xa7, 0xb9, 0x70, 0xf6, 0x4a, 0x63, 0x4b, 0xb8, 0x8f,
- 0x60, 0xd5, 0x2d, 0x18, 0x91, 0x4e, 0x47, 0x35, 0x3b, 0x58, 0x33, 0x29, 0xc5, 0xcb, 0xb5, 0x82,
- 0x6d, 0x89, 0x5b, 0xc1, 0x2d, 0x0d, 0x11, 0x92, 0xbc, 0x42, 0x4d, 0x07, 0xbe, 0x65, 0x82, 0xb8,
- 0xf4, 0x8c, 0x88, 0xcb, 0x44, 0x10, 0xb7, 0x45, 0x07, 0xcf, 0x90, 0x15, 0x9f, 0x2f, 0x2b, 0x05,
- 0x70, 0x68, 0x28, 0x47, 0x6a, 0x07, 0x93, 0xde, 0xff, 0x45, 0x56, 0x4f, 0xd3, 0x31, 0xc2, 0x6a,
- 0x1f, 0x37, 0xa3, 0xc8, 0x1a, 0x22, 0x3c, 0xb2, 0x3e, 0xf7, 0x2d, 0x33, 0x26, 0xeb, 0x63, 0xe0,
- 0x34, 0x7c, 0x6a, 0xd6, 0x0d, 0xfc, 0x4d, 0x0f, 0x6b, 0x08, 0xd7, 0x75, 0x8c, 0xfa, 0x94, 0xb8,
- 0x4c, 0x6d, 0xdb, 0xb6, 0xc4, 0xdb, 0x6e, 0x84, 0x49, 0x8c, 0x24, 0xaf, 0x3a, 0xc6, 0xcf, 0x98,
- 0xcd, 0x21, 0x33, 0x46, 0xcb, 0xae, 0xd3, 0x93, 0x2a, 0xe3, 0xd7, 0xa7, 0xfd, 0x67, 0xf7, 0x08,
- 0xc0, 0xcc, 0x4f, 0x34, 0xda, 0xcb, 0x37, 0x83, 0xfd, 0xf7, 0x61, 0x89, 0x35, 0xb4, 0x53, 0x13,
- 0x1b, 0x0d, 0x9b, 0xb6, 0x25, 0x72, 0x23, 0xdd, 0xee, 0x38, 0x25, 0xd9, 0x1d, 0x22, 0x6e, 0xf5,
- 0xb3, 0x1d, 0x0e, 0xe1, 0xb2, 0xcd, 0xbd, 0xaa, 0x6c, 0xd9, 0x0b, 0xdf, 0xe3, 0xa3, 0xfa, 0xf8,
- 0xea, 0xfd, 0x9e, 0xa2, 0xa2, 0x56, 0x51, 0x4b, 0x23, 0xdf, 0xb6, 0x71, 0x53, 0xc1, 0xf4, 0x21,
- 0x7f, 0x25, 0xf9, 0x8a, 0xb0, 0xd2, 0x18, 0x8d, 0xe7, 0xaa, 0x27, 0x8f, 0x9b, 0x87, 0x02, 0x39,
- 0x0b, 0x9b, 0x51, 0x02, 0x51, 0xa7, 0x27, 0x50, 0xd5, 0xb9, 0x79, 0xed, 0xd3, 0xdb, 0xfd, 0x1e,
- 0x1a, 0x63, 0xcd, 0x23, 0x75, 0xef, 0x8f, 0x05, 0x48, 0x1f, 0x1a, 0x0a, 0x47, 0x60, 0x65, 0xfc,
- 0xbb, 0xee, 0x5e, 0x04, 0x91, 0x93, 0x5f, 0x57, 0x7c, 0x25, 0x36, 0xd4, 0x4b, 0xcc, 0xb5, 0xe1,
- 0xd6, 0xd8, 0x47, 0x58, 0x31, 0x56, 0x90, 0x23, 0x7d, 0xc0, 0x3f, 0x88, 0x8b, 0x8c, 0xc8, 0xe6,
- 0x9c, 0xb2, 0xe2, 0x65, 0xab, 0xa2, 0x56, 0xcc, 0x6c, 0x81, 0x13, 0x27, 0x77, 0x0a, 0x5c, 0xc8,
- 0x69, 0xf3, 0x7e, 0xac, 0x38, 0x0c, 0xcd, 0xef, 0x4f, 0x83, 0xf6, 0x33, 0xeb, 0xb0, 0x3a, 0x71,
- 0x2c, 0x7b, 0xfb, 0xd2, 0x48, 0x3e, 0x96, 0xdf, 0x8b, 0x8f, 0xf5, 0x73, 0x7e, 0x07, 0xb9, 0xb0,
- 0xe3, 0xd4, 0x6e, 0xbc, 0x50, 0xde, 0x7e, 0xdf, 0x9b, 0x0a, 0xee, 0x27, 0xff, 0x1a, 0x20, 0x70,
- 0xea, 0x78, 0x2b, 0x3a, 0xc8, 0x10, 0xc5, 0xdf, 0x8f, 0x83, 0xf2, 0x33, 0x7c, 0x09, 0xf3, 0xde,
- 0x7b, 0xfa, 0x6e, 0xf4, 0x42, 0x06, 0xe1, 0xef, 0x5d, 0x0a, 0x09, 0xf6, 0xe4, 0xd8, 0x9b, 0xa8,
- 0x78, 0xe9, 0x62, 0x86, 0xbc, 0xa8, 0x27, 0xc3, 0xa7, 0xa7, 0xf3, 0x80, 0x8f, 0x4f, 0xce, 0x0b,
- 0x6a, 0x1d, 0x83, 0x5e, 0xf4, 0x80, 0x47, 0x4c, 0x96, 0xda, 0xa3, 0x17, 0x67, 0x42, 0xf2, 0xe5,
- 0x99, 0x90, 0xfc, 0xe7, 0x4c, 0x48, 0xfe, 0x70, 0x2e, 0x24, 0x5e, 0x9e, 0x0b, 0x89, 0x3f, 0xcf,
- 0x85, 0xc4, 0xd3, 0x92, 0xa2, 0x9a, 0x27, 0xbd, 0xe3, 0x12, 0x22, 0x9d, 0x32, 0x22, 0x46, 0x87,
- 0x18, 0x65, 0xf5, 0x18, 0xed, 0x7a, 0xff, 0x9f, 0x1e, 0xec, 0xef, 0x7a, 0xbf, 0xa0, 0xcc, 0x41,
- 0x17, 0x1b, 0xc7, 0x59, 0xfa, 0xfb, 0xe9, 0xdd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xfe,
- 0x8d, 0x31, 0x15, 0x13, 0x00, 0x00,
+ 0x0c, 0x4c, 0x93, 0x49, 0x29, 0x20, 0x0d, 0x12, 0x52, 0x52, 0x09, 0x31, 0x42, 0x65, 0x46, 0xa6,
+ 0x80, 0x34, 0x42, 0x0a, 0xe9, 0xcd, 0x1d, 0xd7, 0x4a, 0xe2, 0x1b, 0x6c, 0x27, 0x34, 0xe2, 0x05,
+ 0x58, 0xb2, 0x60, 0xc5, 0x02, 0x8d, 0xc4, 0x9a, 0x05, 0x12, 0x0f, 0x31, 0xcb, 0xd9, 0xf1, 0xb3,
+ 0xb0, 0x50, 0xbb, 0x61, 0xed, 0x27, 0x40, 0xbe, 0xbe, 0x76, 0x9c, 0xc4, 0x6e, 0x9d, 0x0e, 0x29,
+ 0xdd, 0xd9, 0xe7, 0x7c, 0xf7, 0x9c, 0x73, 0xbf, 0xef, 0xf8, 0xf8, 0xda, 0x20, 0xa8, 0xc7, 0x48,
+ 0x21, 0x65, 0x44, 0x74, 0x5c, 0x46, 0x27, 0x0d, 0x4d, 0xc3, 0xed, 0x72, 0xbf, 0x52, 0x36, 0x4f,
+ 0x4b, 0x5d, 0x9d, 0x98, 0x84, 0xdb, 0xa0, 0xfe, 0x92, 0xe3, 0x2f, 0x31, 0x7f, 0xa9, 0x5f, 0xe1,
+ 0xd7, 0x15, 0xa2, 0x10, 0x8a, 0x28, 0x3b, 0x57, 0x2e, 0x98, 0xbf, 0x1b, 0x0c, 0xd6, 0x56, 0xb1,
+ 0x66, 0x3a, 0xb1, 0xdc, 0x2b, 0x06, 0x79, 0x3d, 0x3c, 0x9f, 0x17, 0x9a, 0x82, 0xa4, 0x9f, 0x93,
+ 0xc0, 0x1d, 0x1a, 0xca, 0x81, 0x6b, 0x7c, 0xdc, 0xc5, 0xda, 0x23, 0x4d, 0x35, 0xb9, 0xb7, 0x60,
+ 0xbe, 0x4b, 0x74, 0xb3, 0xae, 0x36, 0xf3, 0xc9, 0x9d, 0x64, 0x71, 0xb1, 0xc6, 0xd9, 0x96, 0x78,
+ 0x6b, 0xd0, 0xe8, 0xb4, 0x1f, 0x4a, 0xcc, 0x21, 0xc9, 0x59, 0xe7, 0xea, 0x51, 0x93, 0xfb, 0x00,
+ 0xe6, 0x59, 0xd0, 0x7c, 0x6a, 0x27, 0x59, 0x5c, 0xda, 0x13, 0x4a, 0xa1, 0x5b, 0x29, 0xb1, 0x2c,
+ 0xb5, 0xcc, 0x0b, 0x4b, 0x4c, 0xc8, 0xde, 0x22, 0x6e, 0x13, 0xb2, 0x86, 0xaa, 0x68, 0x58, 0xcf,
+ 0xa7, 0x9d, 0x5c, 0x32, 0xbb, 0x7b, 0xb8, 0xf0, 0xdd, 0x73, 0x31, 0xf1, 0xcf, 0x73, 0x31, 0x21,
+ 0xdd, 0x01, 0x7e, 0xb2, 0x48, 0x19, 0x1b, 0x5d, 0xa2, 0x19, 0x58, 0xfa, 0x2b, 0x0d, 0x6b, 0xa3,
+ 0xee, 0x23, 0x7d, 0x30, 0xdd, 0x16, 0x3e, 0x81, 0x5c, 0x57, 0xc7, 0x7d, 0x95, 0xf4, 0x8c, 0x3a,
+ 0x2b, 0xcb, 0x59, 0x98, 0xa2, 0x0b, 0x05, 0xdb, 0x12, 0x79, 0xb6, 0x70, 0x12, 0x24, 0xc9, 0x6b,
+ 0x9e, 0x95, 0x55, 0x30, 0x4a, 0x49, 0xfa, 0x2a, 0x94, 0xc8, 0xb0, 0x8e, 0x48, 0x4f, 0x33, 0xb1,
+ 0xde, 0x6d, 0xe8, 0xe6, 0xa0, 0xde, 0xc7, 0xba, 0xa1, 0x12, 0x2d, 0x9f, 0xa1, 0x05, 0x89, 0xb6,
+ 0x25, 0x16, 0xdc, 0x82, 0xc2, 0x50, 0x92, 0x9c, 0x0b, 0x9a, 0x3f, 0x77, 0xad, 0xdc, 0x3e, 0x40,
+ 0x57, 0x27, 0xe4, 0x59, 0x5d, 0xd5, 0x54, 0x33, 0x3f, 0xb7, 0x93, 0x2c, 0x2e, 0xd7, 0x36, 0x6c,
+ 0x4b, 0x5c, 0xf3, 0xb6, 0xe6, 0xf9, 0x24, 0x79, 0x91, 0xde, 0xd0, 0x4e, 0xf8, 0x12, 0x96, 0x5d,
+ 0xcf, 0x09, 0x56, 0x95, 0x13, 0x33, 0x9f, 0xa5, 0xdb, 0xb9, 0x33, 0xb2, 0x1d, 0xb7, 0xeb, 0xfa,
+ 0x95, 0xd2, 0x47, 0x14, 0x53, 0x2b, 0x38, 0x9b, 0xb1, 0x2d, 0x31, 0x17, 0x8c, 0xec, 0xae, 0x97,
+ 0xe4, 0x25, 0x7a, 0xeb, 0x22, 0x03, 0xd2, 0xcf, 0x47, 0x48, 0x5f, 0x80, 0xdb, 0x13, 0xda, 0xfa,
+ 0xca, 0xff, 0x39, 0xa1, 0x7c, 0x15, 0xb5, 0xa6, 0x53, 0x7e, 0x1f, 0x60, 0x42, 0xf0, 0x00, 0x2b,
+ 0x41, 0x9d, 0x17, 0x91, 0xaf, 0xef, 0x53, 0xd8, 0x1a, 0x61, 0x3e, 0x10, 0x82, 0xf6, 0x70, 0x4d,
+ 0xb2, 0x2d, 0x51, 0x08, 0x91, 0x28, 0x18, 0x6f, 0x23, 0xe8, 0x19, 0xf6, 0xce, 0x2c, 0xb4, 0xaf,
+ 0x80, 0x2b, 0x69, 0xdd, 0xd4, 0x07, 0x4c, 0xfa, 0x75, 0xdb, 0x12, 0x57, 0x83, 0x02, 0x99, 0xfa,
+ 0x40, 0x92, 0x17, 0xe8, 0xb5, 0xf3, 0xfc, 0xdc, 0x38, 0xe1, 0xab, 0xa8, 0xe5, 0x0b, 0xff, 0x4b,
+ 0x0a, 0x36, 0x46, 0xbd, 0x07, 0x44, 0x7b, 0xa6, 0xea, 0x9d, 0xeb, 0x10, 0xdf, 0x27, 0xb3, 0x81,
+ 0x5a, 0x54, 0xee, 0x10, 0x32, 0x1b, 0xa8, 0xe5, 0x91, 0xe9, 0xb4, 0xe4, 0x38, 0x99, 0x99, 0x19,
+ 0x91, 0x39, 0x17, 0x41, 0xa6, 0x08, 0xdb, 0xa1, 0x74, 0xf9, 0x84, 0xfe, 0x98, 0x84, 0xdc, 0x10,
+ 0x71, 0xd0, 0x26, 0x06, 0x9e, 0xfe, 0x45, 0x70, 0x35, 0x3a, 0x2f, 0x1f, 0xff, 0xdb, 0x50, 0x08,
+ 0xa9, 0xcd, 0xaf, 0xfd, 0xd7, 0x14, 0x6c, 0x8e, 0xf9, 0xaf, 0xb1, 0x1b, 0x46, 0xc7, 0x6a, 0xfa,
+ 0x8a, 0x63, 0xf5, 0xba, 0x1b, 0x62, 0x07, 0x84, 0x70, 0xca, 0x7c, 0x56, 0x7f, 0x48, 0xc1, 0x6b,
+ 0x87, 0x86, 0x22, 0x63, 0xd4, 0x7f, 0xd2, 0x40, 0x2d, 0x6c, 0x72, 0xef, 0x43, 0xb6, 0x4b, 0xaf,
+ 0x28, 0x97, 0x4b, 0x7b, 0xdb, 0x11, 0xef, 0x34, 0x17, 0xce, 0x5e, 0x69, 0x6c, 0x09, 0xf7, 0x21,
+ 0xac, 0xba, 0x05, 0x23, 0xd2, 0xe9, 0xa8, 0x66, 0x07, 0x6b, 0x26, 0xa5, 0x78, 0xb9, 0x56, 0xb0,
+ 0x2d, 0x71, 0x2b, 0xb8, 0xa5, 0x21, 0x42, 0x92, 0x57, 0xa8, 0xe9, 0xc0, 0xb7, 0x4c, 0x10, 0x97,
+ 0x9e, 0x11, 0x71, 0x99, 0x08, 0xe2, 0xb6, 0xe8, 0xe0, 0x19, 0xb2, 0xe2, 0xf3, 0x65, 0xa5, 0x00,
+ 0x0e, 0x0d, 0xe5, 0x48, 0xed, 0x60, 0xd2, 0xfb, 0xaf, 0xc8, 0xea, 0x69, 0x3a, 0x46, 0x58, 0xed,
+ 0xe3, 0x66, 0x14, 0x59, 0x43, 0x84, 0x47, 0xd6, 0x67, 0xbe, 0x65, 0xc6, 0x64, 0x7d, 0x0c, 0x9c,
+ 0x86, 0x4f, 0xcd, 0xba, 0x81, 0xbf, 0xee, 0x61, 0x0d, 0xe1, 0xba, 0x8e, 0x51, 0x9f, 0x12, 0x97,
+ 0xa9, 0x6d, 0xdb, 0x96, 0x78, 0xdb, 0x8d, 0x30, 0x89, 0x91, 0xe4, 0x55, 0xc7, 0xf8, 0x29, 0xb3,
+ 0x39, 0x64, 0xc6, 0x68, 0xd9, 0x75, 0x7a, 0x52, 0x65, 0xfc, 0xfa, 0xb4, 0xff, 0xe4, 0x1e, 0x01,
+ 0x98, 0xf9, 0xb1, 0x46, 0x7b, 0xf9, 0x66, 0xb0, 0xff, 0x1e, 0x2c, 0xb1, 0x86, 0x76, 0x6a, 0x62,
+ 0xa3, 0x61, 0xd3, 0xb6, 0x44, 0x6e, 0xa4, 0xdb, 0x1d, 0xa7, 0x24, 0xbb, 0x43, 0xc4, 0xad, 0x7e,
+ 0xb6, 0xc3, 0x21, 0x5c, 0xb6, 0xb9, 0x57, 0x95, 0x2d, 0x7b, 0xe1, 0x7b, 0x7c, 0x54, 0x1f, 0x5f,
+ 0xbd, 0xdf, 0x52, 0x54, 0xd4, 0x2a, 0x6a, 0x69, 0xe4, 0x9b, 0x36, 0x6e, 0x2a, 0x98, 0x3e, 0xe4,
+ 0xaf, 0x24, 0x5f, 0x11, 0x56, 0x1a, 0xa3, 0xf1, 0x5c, 0xf5, 0xe4, 0x71, 0xf3, 0x50, 0x20, 0x67,
+ 0x61, 0x33, 0x4a, 0x20, 0xea, 0xf4, 0x04, 0xaa, 0x3a, 0x37, 0xff, 0xfb, 0xf4, 0x76, 0xbf, 0x87,
+ 0xc6, 0x58, 0xf3, 0x48, 0xdd, 0xfb, 0x7d, 0x01, 0xd2, 0x87, 0x86, 0xc2, 0x11, 0x58, 0x19, 0xff,
+ 0xae, 0xbb, 0x17, 0x41, 0xe4, 0xe4, 0xd7, 0x15, 0x5f, 0x89, 0x0d, 0xf5, 0x12, 0x73, 0x6d, 0xb8,
+ 0x35, 0xf6, 0x11, 0x56, 0x8c, 0x15, 0xe4, 0x48, 0x1f, 0xf0, 0x0f, 0xe2, 0x22, 0x23, 0xb2, 0x39,
+ 0xa7, 0xac, 0x78, 0xd9, 0xaa, 0xa8, 0x15, 0x33, 0x5b, 0xe0, 0xc4, 0xc9, 0x9d, 0x02, 0x17, 0x72,
+ 0xda, 0xbc, 0x1f, 0x2b, 0x0e, 0x43, 0xf3, 0xfb, 0xd3, 0xa0, 0xfd, 0xcc, 0x3a, 0xac, 0x4e, 0x1c,
+ 0xcb, 0xde, 0xbc, 0x34, 0x92, 0x8f, 0xe5, 0xf7, 0xe2, 0x63, 0xfd, 0x9c, 0xdf, 0x42, 0x2e, 0xec,
+ 0x38, 0xb5, 0x1b, 0x2f, 0x94, 0xb7, 0xdf, 0x77, 0xa6, 0x82, 0xfb, 0xc9, 0xbf, 0x02, 0x08, 0x9c,
+ 0x3a, 0xde, 0x88, 0x0e, 0x32, 0x44, 0xf1, 0xf7, 0xe3, 0xa0, 0xfc, 0x0c, 0x5f, 0xc0, 0xbc, 0xf7,
+ 0x9e, 0xbe, 0x1b, 0xbd, 0x90, 0x41, 0xf8, 0x7b, 0x97, 0x42, 0x82, 0x3d, 0x39, 0xf6, 0x26, 0x2a,
+ 0x5e, 0xba, 0x98, 0x21, 0x2f, 0xea, 0xc9, 0xf0, 0xe9, 0xe9, 0x3c, 0xe0, 0xe3, 0x93, 0xf3, 0x82,
+ 0x5a, 0xc7, 0xa0, 0x17, 0x3d, 0xe0, 0x11, 0x93, 0xa5, 0xf6, 0xe4, 0xc5, 0x99, 0x90, 0x7c, 0x79,
+ 0x26, 0x24, 0xff, 0x3e, 0x13, 0x92, 0xdf, 0x9f, 0x0b, 0x89, 0x97, 0xe7, 0x42, 0xe2, 0x8f, 0x73,
+ 0x21, 0xf1, 0xf4, 0x5d, 0x45, 0x35, 0x4f, 0x7a, 0xc7, 0x25, 0x44, 0x3a, 0x65, 0x44, 0x8c, 0x0e,
+ 0x31, 0xca, 0xea, 0x31, 0xda, 0x55, 0x48, 0xb9, 0x43, 0x9a, 0xbd, 0x36, 0x36, 0xdc, 0xff, 0x50,
+ 0x0f, 0xf6, 0x77, 0xbd, 0x5f, 0x51, 0xe6, 0xa0, 0x8b, 0x8d, 0xe3, 0x2c, 0xfd, 0x0d, 0xf5, 0xf6,
+ 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xb3, 0x23, 0x46, 0x1d, 0x13, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/core/05-port/keeper/keeper.go b/modules/core/05-port/keeper/keeper.go
similarity index 95%
rename from core/05-port/keeper/keeper.go
rename to modules/core/05-port/keeper/keeper.go
index 31ba4c91..1bac7622 100644
--- a/core/05-port/keeper/keeper.go
+++ b/modules/core/05-port/keeper/keeper.go
@@ -8,8 +8,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/ibc-go/core/05-port/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// Keeper defines the IBC connection keeper
diff --git a/core/05-port/keeper/keeper_test.go b/modules/core/05-port/keeper/keeper_test.go
similarity index 97%
rename from core/05-port/keeper/keeper_test.go
rename to modules/core/05-port/keeper/keeper_test.go
index 7081978f..75d1064f 100644
--- a/core/05-port/keeper/keeper_test.go
+++ b/modules/core/05-port/keeper/keeper_test.go
@@ -9,7 +9,7 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/05-port/keeper"
+ "github.com/cosmos/ibc-go/modules/core/05-port/keeper"
)
var (
diff --git a/core/05-port/types/errors.go b/modules/core/05-port/types/errors.go
similarity index 100%
rename from core/05-port/types/errors.go
rename to modules/core/05-port/types/errors.go
diff --git a/core/05-port/types/keys.go b/modules/core/05-port/types/keys.go
similarity index 100%
rename from core/05-port/types/keys.go
rename to modules/core/05-port/types/keys.go
diff --git a/core/05-port/types/module.go b/modules/core/05-port/types/module.go
similarity index 95%
rename from core/05-port/types/module.go
rename to modules/core/05-port/types/module.go
index 40a737e3..91ee642f 100644
--- a/core/05-port/types/module.go
+++ b/modules/core/05-port/types/module.go
@@ -4,7 +4,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// IBCModule defines an interface that implements all the callbacks
diff --git a/core/05-port/types/router.go b/modules/core/05-port/types/router.go
similarity index 100%
rename from core/05-port/types/router.go
rename to modules/core/05-port/types/router.go
diff --git a/core/05-port/types/utils.go b/modules/core/05-port/types/utils.go
similarity index 100%
rename from core/05-port/types/utils.go
rename to modules/core/05-port/types/utils.go
diff --git a/core/23-commitment/types/bench_test.go b/modules/core/23-commitment/types/bench_test.go
similarity index 100%
rename from core/23-commitment/types/bench_test.go
rename to modules/core/23-commitment/types/bench_test.go
diff --git a/core/23-commitment/types/codec.go b/modules/core/23-commitment/types/codec.go
similarity index 94%
rename from core/23-commitment/types/codec.go
rename to modules/core/23-commitment/types/codec.go
index ed96411d..931e629f 100644
--- a/core/23-commitment/types/codec.go
+++ b/modules/core/23-commitment/types/codec.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces registers the commitment interfaces to protobuf Any.
diff --git a/core/23-commitment/types/commitment.pb.go b/modules/core/23-commitment/types/commitment.pb.go
similarity index 90%
rename from core/23-commitment/types/commitment.pb.go
rename to modules/core/23-commitment/types/commitment.pb.go
index ac4201c4..0c88037f 100644
--- a/core/23-commitment/types/commitment.pb.go
+++ b/modules/core/23-commitment/types/commitment.pb.go
@@ -217,28 +217,29 @@ func init() {
}
var fileDescriptor_eb23d5444771a147 = []byte{
- // 329 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbf, 0x4e, 0xc3, 0x30,
- 0x10, 0xc6, 0x13, 0x51, 0x15, 0xea, 0x56, 0x42, 0xa4, 0x80, 0xaa, 0x0e, 0x29, 0xca, 0x80, 0xca,
- 0x50, 0x5b, 0x6d, 0x99, 0x2a, 0xb1, 0x04, 0x36, 0x84, 0x54, 0x65, 0x64, 0x41, 0x89, 0xe5, 0x24,
- 0x56, 0x9b, 0x5e, 0x14, 0x9b, 0x8a, 0xbc, 0x01, 0x23, 0x23, 0x23, 0x8f, 0xc3, 0xd8, 0x91, 0xa9,
- 0x42, 0xed, 0x1b, 0xf4, 0x09, 0x90, 0x6d, 0x0a, 0xd9, 0xee, 0x7c, 0xbf, 0xfb, 0xe3, 0xef, 0x43,
- 0x57, 0x3c, 0xa2, 0x09, 0x10, 0x0a, 0x05, 0x23, 0x14, 0xb2, 0x8c, 0xcb, 0x8c, 0x2d, 0x24, 0x59,
- 0x0e, 0x2b, 0x19, 0xce, 0x0b, 0x90, 0xe0, 0x74, 0x34, 0x8a, 0x15, 0x8a, 0x2b, 0xc5, 0xe5, 0xb0,
- 0x7b, 0x9a, 0x40, 0x02, 0x1a, 0x22, 0x2a, 0x32, 0x7c, 0xb7, 0x4d, 0x61, 0x11, 0x73, 0x20, 0x79,
- 0x01, 0x10, 0x0b, 0xf3, 0xe8, 0x5d, 0x22, 0xf4, 0xc0, 0x8a, 0xd9, 0x9c, 0x05, 0x00, 0xd2, 0x71,
- 0x50, 0x2d, 0x0d, 0x45, 0xda, 0xb1, 0x2f, 0xec, 0x7e, 0x2b, 0xd0, 0xf1, 0xa4, 0xf6, 0xfa, 0xd1,
- 0xb3, 0xbc, 0x3b, 0xd4, 0x32, 0xdc, 0xb4, 0x60, 0x31, 0x7f, 0x71, 0xae, 0x11, 0x9a, 0xb1, 0xf2,
- 0x29, 0xd7, 0x99, 0xe1, 0xfd, 0xb3, 0xdd, 0xba, 0x77, 0x52, 0x86, 0xd9, 0x7c, 0xe2, 0xfd, 0xd7,
- 0xbc, 0xa0, 0x31, 0x63, 0xa5, 0xe9, 0xf2, 0xfc, 0xfd, 0xb6, 0x69, 0x28, 0x53, 0x07, 0xa3, 0x23,
- 0xcd, 0x85, 0x52, 0x6d, 0x3c, 0xe8, 0x37, 0xfc, 0xf6, 0x6e, 0xdd, 0x3b, 0xae, 0x4c, 0x08, 0x65,
- 0xea, 0x05, 0x87, 0xaa, 0x3f, 0x94, 0xe9, 0xa4, 0xf6, 0xae, 0x2e, 0xb9, 0x41, 0xcd, 0xfd, 0x25,
- 0x00, 0xb1, 0x83, 0x51, 0xdd, 0x7c, 0x48, 0x8f, 0x68, 0x8e, 0xce, 0x31, 0xa7, 0x62, 0x34, 0xc6,
- 0xb7, 0x7f, 0x8a, 0x68, 0x2e, 0xf8, 0xa5, 0xfc, 0xfb, 0xcf, 0x8d, 0x6b, 0xaf, 0x36, 0xae, 0xfd,
- 0xbd, 0x71, 0xed, 0xb7, 0xad, 0x6b, 0xad, 0xb6, 0xae, 0xf5, 0xb5, 0x75, 0xad, 0xc7, 0x61, 0xc2,
- 0x65, 0xfa, 0x1c, 0x29, 0x2d, 0x09, 0x05, 0x91, 0x81, 0x20, 0x3c, 0xa2, 0x83, 0xbd, 0x1b, 0xa3,
- 0xf1, 0xa0, 0x62, 0x88, 0x2c, 0x73, 0x26, 0xa2, 0xba, 0x16, 0x71, 0xfc, 0x13, 0x00, 0x00, 0xff,
- 0xff, 0xe6, 0x8b, 0xf4, 0x8a, 0xb6, 0x01, 0x00, 0x00,
+ // 338 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbf, 0x4e, 0xeb, 0x30,
+ 0x14, 0xc6, 0x13, 0xdd, 0xaa, 0x97, 0xba, 0x95, 0x10, 0x29, 0xa0, 0xaa, 0x43, 0x8a, 0x32, 0xa0,
+ 0x32, 0xd4, 0x56, 0x5b, 0x16, 0x2a, 0xb1, 0x04, 0x56, 0xa4, 0x2a, 0x6c, 0x2c, 0x28, 0x31, 0x4e,
+ 0x62, 0xb5, 0xe9, 0x89, 0x62, 0xb7, 0x22, 0x6f, 0xc0, 0xc8, 0xc8, 0xc8, 0xe3, 0x30, 0x76, 0x64,
+ 0xaa, 0x50, 0xf3, 0x06, 0x7d, 0x02, 0x14, 0x9b, 0x42, 0xb6, 0x73, 0x7c, 0x7e, 0xe7, 0x8f, 0xbf,
+ 0x0f, 0x5d, 0xf0, 0x80, 0x46, 0x40, 0x28, 0x64, 0x8c, 0x50, 0x48, 0x12, 0x2e, 0x13, 0xb6, 0x90,
+ 0x64, 0x35, 0xac, 0x64, 0x38, 0xcd, 0x40, 0x82, 0xd5, 0x51, 0x28, 0x2e, 0x51, 0x5c, 0x29, 0xae,
+ 0x86, 0xdd, 0xe3, 0x08, 0x22, 0x50, 0x10, 0x29, 0x23, 0xcd, 0x77, 0xdb, 0x14, 0x16, 0x21, 0x07,
+ 0x92, 0x66, 0x00, 0xa1, 0xd0, 0x8f, 0xce, 0x39, 0x42, 0x77, 0x2c, 0x9b, 0xcd, 0x99, 0x07, 0x20,
+ 0x2d, 0x0b, 0xd5, 0x62, 0x5f, 0xc4, 0x1d, 0xf3, 0xcc, 0xec, 0xb7, 0x3c, 0x15, 0x4f, 0x6a, 0x2f,
+ 0xef, 0x3d, 0xc3, 0xb9, 0x45, 0x2d, 0xcd, 0x4d, 0x33, 0x16, 0xf2, 0x67, 0xeb, 0x12, 0xa1, 0x19,
+ 0xcb, 0x1f, 0x53, 0x95, 0x69, 0xde, 0x3d, 0xd9, 0x6d, 0x7a, 0x47, 0xb9, 0x9f, 0xcc, 0x27, 0xce,
+ 0x5f, 0xcd, 0xf1, 0x1a, 0x33, 0x96, 0xeb, 0x2e, 0xc7, 0xdd, 0x6f, 0x9b, 0xfa, 0x32, 0xb6, 0x30,
+ 0x3a, 0x50, 0x9c, 0x2f, 0xcb, 0x8d, 0xff, 0xfa, 0x0d, 0xb7, 0xbd, 0xdb, 0xf4, 0x0e, 0x2b, 0x13,
+ 0x7c, 0x19, 0x3b, 0xde, 0xff, 0xb2, 0xdf, 0x97, 0xf1, 0xa4, 0xf6, 0x56, 0x5e, 0x72, 0x8d, 0x9a,
+ 0xfb, 0x4b, 0x00, 0x42, 0x0b, 0xa3, 0xba, 0xfe, 0x90, 0x1a, 0xd1, 0x1c, 0x9d, 0x62, 0x4e, 0xc5,
+ 0x68, 0x8c, 0x6f, 0x7e, 0x15, 0x51, 0x9c, 0xf7, 0x43, 0xb9, 0xf7, 0x1f, 0x5b, 0xdb, 0x5c, 0x6f,
+ 0x6d, 0xf3, 0x6b, 0x6b, 0x9b, 0xaf, 0x85, 0x6d, 0xac, 0x0b, 0xdb, 0xf8, 0x2c, 0x6c, 0xe3, 0xe1,
+ 0x2a, 0xe2, 0x32, 0x5e, 0x06, 0xa5, 0x96, 0x84, 0x82, 0x48, 0x40, 0x10, 0x1e, 0xd0, 0x41, 0x04,
+ 0x24, 0x81, 0xa7, 0xe5, 0x9c, 0x09, 0xed, 0xca, 0x68, 0x3c, 0xa8, 0x18, 0x23, 0xf3, 0x94, 0x89,
+ 0xa0, 0xae, 0xc4, 0x1c, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x44, 0xcf, 0xe4, 0xd6, 0xbe, 0x01,
+ 0x00, 0x00,
}
func (m *MerkleRoot) Marshal() (dAtA []byte, err error) {
diff --git a/core/23-commitment/types/commitment_test.go b/modules/core/23-commitment/types/commitment_test.go
similarity index 100%
rename from core/23-commitment/types/commitment_test.go
rename to modules/core/23-commitment/types/commitment_test.go
diff --git a/core/23-commitment/types/errors.go b/modules/core/23-commitment/types/errors.go
similarity index 100%
rename from core/23-commitment/types/errors.go
rename to modules/core/23-commitment/types/errors.go
diff --git a/core/23-commitment/types/merkle.go b/modules/core/23-commitment/types/merkle.go
similarity index 99%
rename from core/23-commitment/types/merkle.go
rename to modules/core/23-commitment/types/merkle.go
index 706ba7df..597a1ac9 100644
--- a/core/23-commitment/types/merkle.go
+++ b/modules/core/23-commitment/types/merkle.go
@@ -10,7 +10,7 @@ import (
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// var representing the proofspecs for a SDK chain
diff --git a/core/23-commitment/types/merkle_test.go b/modules/core/23-commitment/types/merkle_test.go
similarity index 99%
rename from core/23-commitment/types/merkle_test.go
rename to modules/core/23-commitment/types/merkle_test.go
index 54016dd7..db2d58f8 100644
--- a/core/23-commitment/types/merkle_test.go
+++ b/modules/core/23-commitment/types/merkle_test.go
@@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
- "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
)
func (suite *MerkleTestSuite) TestVerifyMembership() {
diff --git a/core/23-commitment/types/utils.go b/modules/core/23-commitment/types/utils.go
similarity index 100%
rename from core/23-commitment/types/utils.go
rename to modules/core/23-commitment/types/utils.go
diff --git a/core/23-commitment/types/utils_test.go b/modules/core/23-commitment/types/utils_test.go
similarity index 97%
rename from core/23-commitment/types/utils_test.go
rename to modules/core/23-commitment/types/utils_test.go
index 44513ac9..e06d1705 100644
--- a/core/23-commitment/types/utils_test.go
+++ b/modules/core/23-commitment/types/utils_test.go
@@ -7,7 +7,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
crypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
- "github.com/cosmos/ibc-go/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
)
func (suite *MerkleTestSuite) TestConvertProofs() {
diff --git a/core/24-host/errors.go b/modules/core/24-host/errors.go
similarity index 100%
rename from core/24-host/errors.go
rename to modules/core/24-host/errors.go
diff --git a/core/24-host/keys.go b/modules/core/24-host/keys.go
similarity index 99%
rename from core/24-host/keys.go
rename to modules/core/24-host/keys.go
index 81a4999b..ec07af54 100644
--- a/core/24-host/keys.go
+++ b/modules/core/24-host/keys.go
@@ -3,7 +3,7 @@ package host
import (
"fmt"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
const (
diff --git a/core/24-host/parse.go b/modules/core/24-host/parse.go
similarity index 100%
rename from core/24-host/parse.go
rename to modules/core/24-host/parse.go
diff --git a/core/24-host/parse_test.go b/modules/core/24-host/parse_test.go
similarity index 91%
rename from core/24-host/parse_test.go
rename to modules/core/24-host/parse_test.go
index 83c2a864..1c7244cc 100644
--- a/core/24-host/parse_test.go
+++ b/modules/core/24-host/parse_test.go
@@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/require"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
func TestParseIdentifier(t *testing.T) {
diff --git a/core/24-host/validate.go b/modules/core/24-host/validate.go
similarity index 100%
rename from core/24-host/validate.go
rename to modules/core/24-host/validate.go
diff --git a/core/24-host/validate_test.go b/modules/core/24-host/validate_test.go
similarity index 100%
rename from core/24-host/validate_test.go
rename to modules/core/24-host/validate_test.go
diff --git a/core/client/cli/cli.go b/modules/core/client/cli/cli.go
similarity index 82%
rename from core/client/cli/cli.go
rename to modules/core/client/cli/cli.go
index b1fced08..4a7054fb 100644
--- a/core/client/cli/cli.go
+++ b/modules/core/client/cli/cli.go
@@ -4,10 +4,10 @@ import (
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
- ibcclient "github.com/cosmos/ibc-go/core/02-client"
- connection "github.com/cosmos/ibc-go/core/03-connection"
- channel "github.com/cosmos/ibc-go/core/04-channel"
- host "github.com/cosmos/ibc-go/core/24-host"
+ ibcclient "github.com/cosmos/ibc-go/modules/core/02-client"
+ connection "github.com/cosmos/ibc-go/modules/core/03-connection"
+ channel "github.com/cosmos/ibc-go/modules/core/04-channel"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// GetTxCmd returns the transaction commands for this module
diff --git a/core/client/query.go b/modules/core/client/query.go
similarity index 91%
rename from core/client/query.go
rename to modules/core/client/query.go
index 72923d71..fbfeae04 100644
--- a/core/client/query.go
+++ b/modules/core/client/query.go
@@ -7,9 +7,9 @@ import (
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
// QueryTendermintProof performs an ABCI query with the given key and returns
diff --git a/core/exported/channel.go b/modules/core/exported/channel.go
similarity index 100%
rename from core/exported/channel.go
rename to modules/core/exported/channel.go
diff --git a/core/exported/client.go b/modules/core/exported/client.go
similarity index 100%
rename from core/exported/client.go
rename to modules/core/exported/client.go
diff --git a/core/exported/commitment.go b/modules/core/exported/commitment.go
similarity index 100%
rename from core/exported/commitment.go
rename to modules/core/exported/commitment.go
diff --git a/core/exported/connection.go b/modules/core/exported/connection.go
similarity index 100%
rename from core/exported/connection.go
rename to modules/core/exported/connection.go
diff --git a/core/genesis.go b/modules/core/genesis.go
similarity index 74%
rename from core/genesis.go
rename to modules/core/genesis.go
index c7fa47cd..71777eff 100644
--- a/core/genesis.go
+++ b/modules/core/genesis.go
@@ -2,11 +2,11 @@ package ibc
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- client "github.com/cosmos/ibc-go/core/02-client"
- connection "github.com/cosmos/ibc-go/core/03-connection"
- channel "github.com/cosmos/ibc-go/core/04-channel"
- "github.com/cosmos/ibc-go/core/keeper"
- "github.com/cosmos/ibc-go/core/types"
+ client "github.com/cosmos/ibc-go/modules/core/02-client"
+ connection "github.com/cosmos/ibc-go/modules/core/03-connection"
+ channel "github.com/cosmos/ibc-go/modules/core/04-channel"
+ "github.com/cosmos/ibc-go/modules/core/keeper"
+ "github.com/cosmos/ibc-go/modules/core/types"
)
// InitGenesis initializes the ibc state from a provided genesis
diff --git a/core/genesis_test.go b/modules/core/genesis_test.go
similarity index 95%
rename from core/genesis_test.go
rename to modules/core/genesis_test.go
index fb968921..0e17f6cd 100644
--- a/core/genesis_test.go
+++ b/modules/core/genesis_test.go
@@ -9,15 +9,15 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/ibc-go/testing/simapp"
- ibc "github.com/cosmos/ibc-go/core"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/core/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- localhosttypes "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ ibc "github.com/cosmos/ibc-go/modules/core"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/core/handler.go b/modules/core/handler.go
similarity index 92%
rename from core/handler.go
rename to modules/core/handler.go
index 040d9065..3384bbcf 100644
--- a/core/handler.go
+++ b/modules/core/handler.go
@@ -3,10 +3,10 @@ package ibc
import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/core/keeper"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/keeper"
)
// NewHandler defines the IBC handler
diff --git a/core/keeper/grpc_query.go b/modules/core/keeper/grpc_query.go
similarity index 96%
rename from core/keeper/grpc_query.go
rename to modules/core/keeper/grpc_query.go
index 21361b3b..365cae03 100644
--- a/core/keeper/grpc_query.go
+++ b/modules/core/keeper/grpc_query.go
@@ -3,9 +3,9 @@ package keeper
import (
"context"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// ClientState implements the IBC QueryServer interface
diff --git a/core/keeper/keeper.go b/modules/core/keeper/keeper.go
similarity index 79%
rename from core/keeper/keeper.go
rename to modules/core/keeper/keeper.go
index 5c105eb5..df83f1ff 100644
--- a/core/keeper/keeper.go
+++ b/modules/core/keeper/keeper.go
@@ -4,13 +4,13 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
- clientkeeper "github.com/cosmos/ibc-go/core/02-client/keeper"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectionkeeper "github.com/cosmos/ibc-go/core/03-connection/keeper"
- channelkeeper "github.com/cosmos/ibc-go/core/04-channel/keeper"
- portkeeper "github.com/cosmos/ibc-go/core/05-port/keeper"
- porttypes "github.com/cosmos/ibc-go/core/05-port/types"
- "github.com/cosmos/ibc-go/core/types"
+ clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectionkeeper "github.com/cosmos/ibc-go/modules/core/03-connection/keeper"
+ channelkeeper "github.com/cosmos/ibc-go/modules/core/04-channel/keeper"
+ portkeeper "github.com/cosmos/ibc-go/modules/core/05-port/keeper"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ "github.com/cosmos/ibc-go/modules/core/types"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
diff --git a/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
similarity index 98%
rename from core/keeper/msg_server.go
rename to modules/core/keeper/msg_server.go
index c2a8912c..d931abed 100644
--- a/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -8,11 +8,11 @@ import (
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channel "github.com/cosmos/ibc-go/core/04-channel"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- porttypes "github.com/cosmos/ibc-go/core/05-port/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channel "github.com/cosmos/ibc-go/modules/core/04-channel"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
)
var _ clienttypes.MsgServer = Keeper{}
diff --git a/core/keeper/msg_server_test.go b/modules/core/keeper/msg_server_test.go
similarity index 98%
rename from core/keeper/msg_server_test.go
rename to modules/core/keeper/msg_server_test.go
index 461e2917..18830d79 100644
--- a/core/keeper/msg_server_test.go
+++ b/modules/core/keeper/msg_server_test.go
@@ -6,13 +6,13 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/core/keeper"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/keeper"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
diff --git a/core/module.go b/modules/core/module.go
similarity index 92%
rename from core/module.go
rename to modules/core/module.go
index 097f7791..45c53abb 100644
--- a/core/module.go
+++ b/modules/core/module.go
@@ -18,15 +18,15 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- ibcclient "github.com/cosmos/ibc-go/core/02-client"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/client/cli"
- "github.com/cosmos/ibc-go/core/keeper"
- "github.com/cosmos/ibc-go/core/simulation"
- "github.com/cosmos/ibc-go/core/types"
+ ibcclient "github.com/cosmos/ibc-go/modules/core/02-client"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/client/cli"
+ "github.com/cosmos/ibc-go/modules/core/keeper"
+ "github.com/cosmos/ibc-go/modules/core/simulation"
+ "github.com/cosmos/ibc-go/modules/core/types"
)
var (
diff --git a/core/simulation/decoder.go b/modules/core/simulation/decoder.go
similarity index 68%
rename from core/simulation/decoder.go
rename to modules/core/simulation/decoder.go
index 8b4e3074..d553528e 100644
--- a/core/simulation/decoder.go
+++ b/modules/core/simulation/decoder.go
@@ -4,11 +4,11 @@ import (
"fmt"
"github.com/cosmos/cosmos-sdk/types/kv"
- clientsim "github.com/cosmos/ibc-go/core/02-client/simulation"
- connectionsim "github.com/cosmos/ibc-go/core/03-connection/simulation"
- channelsim "github.com/cosmos/ibc-go/core/04-channel/simulation"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/keeper"
+ clientsim "github.com/cosmos/ibc-go/modules/core/02-client/simulation"
+ connectionsim "github.com/cosmos/ibc-go/modules/core/03-connection/simulation"
+ channelsim "github.com/cosmos/ibc-go/modules/core/04-channel/simulation"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/keeper"
)
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
diff --git a/core/simulation/decoder_test.go b/modules/core/simulation/decoder_test.go
similarity index 82%
rename from core/simulation/decoder_test.go
rename to modules/core/simulation/decoder_test.go
index 827d94cf..0817d8f2 100644
--- a/core/simulation/decoder_test.go
+++ b/modules/core/simulation/decoder_test.go
@@ -8,12 +8,12 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/simulation"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/simulation"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
func TestDecodeStore(t *testing.T) {
diff --git a/core/simulation/genesis.go b/modules/core/simulation/genesis.go
similarity index 75%
rename from core/simulation/genesis.go
rename to modules/core/simulation/genesis.go
index 7944e275..d7f9d732 100644
--- a/core/simulation/genesis.go
+++ b/modules/core/simulation/genesis.go
@@ -8,14 +8,14 @@ import (
"math/rand"
"github.com/cosmos/cosmos-sdk/types/module"
- clientsims "github.com/cosmos/ibc-go/core/02-client/simulation"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectionsims "github.com/cosmos/ibc-go/core/03-connection/simulation"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channelsims "github.com/cosmos/ibc-go/core/04-channel/simulation"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/types"
+ clientsims "github.com/cosmos/ibc-go/modules/core/02-client/simulation"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectionsims "github.com/cosmos/ibc-go/modules/core/03-connection/simulation"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channelsims "github.com/cosmos/ibc-go/modules/core/04-channel/simulation"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/types"
)
// Simulation parameter constants
diff --git a/core/simulation/genesis_test.go b/modules/core/simulation/genesis_test.go
similarity index 89%
rename from core/simulation/genesis_test.go
rename to modules/core/simulation/genesis_test.go
index 44b5549d..de7a9f34 100644
--- a/core/simulation/genesis_test.go
+++ b/modules/core/simulation/genesis_test.go
@@ -11,9 +11,9 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/simulation"
- "github.com/cosmos/ibc-go/core/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/simulation"
+ "github.com/cosmos/ibc-go/modules/core/types"
)
// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState.
diff --git a/core/spec/01_concepts.md b/modules/core/spec/01_concepts.md
similarity index 100%
rename from core/spec/01_concepts.md
rename to modules/core/spec/01_concepts.md
diff --git a/core/spec/02_state.md b/modules/core/spec/02_state.md
similarity index 100%
rename from core/spec/02_state.md
rename to modules/core/spec/02_state.md
diff --git a/core/spec/03_state_transitions.md b/modules/core/spec/03_state_transitions.md
similarity index 100%
rename from core/spec/03_state_transitions.md
rename to modules/core/spec/03_state_transitions.md
diff --git a/core/spec/04_messages.md b/modules/core/spec/04_messages.md
similarity index 100%
rename from core/spec/04_messages.md
rename to modules/core/spec/04_messages.md
diff --git a/core/spec/05_callbacks.md b/modules/core/spec/05_callbacks.md
similarity index 100%
rename from core/spec/05_callbacks.md
rename to modules/core/spec/05_callbacks.md
diff --git a/core/spec/06_events.md b/modules/core/spec/06_events.md
similarity index 100%
rename from core/spec/06_events.md
rename to modules/core/spec/06_events.md
diff --git a/core/spec/07_params.md b/modules/core/spec/07_params.md
similarity index 100%
rename from core/spec/07_params.md
rename to modules/core/spec/07_params.md
diff --git a/core/spec/README.md b/modules/core/spec/README.md
similarity index 100%
rename from core/spec/README.md
rename to modules/core/spec/README.md
diff --git a/modules/core/types/codec.go b/modules/core/types/codec.go
new file mode 100644
index 00000000..83aa034d
--- /dev/null
+++ b/modules/core/types/codec.go
@@ -0,0 +1,23 @@
+package types
+
+import (
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
+)
+
+// RegisterInterfaces registers x/ibc interfaces into protobuf Any.
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ clienttypes.RegisterInterfaces(registry)
+ connectiontypes.RegisterInterfaces(registry)
+ channeltypes.RegisterInterfaces(registry)
+ solomachinetypes.RegisterInterfaces(registry)
+ ibctmtypes.RegisterInterfaces(registry)
+ localhosttypes.RegisterInterfaces(registry)
+ commitmenttypes.RegisterInterfaces(registry)
+}
diff --git a/core/types/genesis.go b/modules/core/types/genesis.go
similarity index 82%
rename from core/types/genesis.go
rename to modules/core/types/genesis.go
index cd8051af..4694e75c 100644
--- a/core/types/genesis.go
+++ b/modules/core/types/genesis.go
@@ -2,9 +2,9 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
var _ codectypes.UnpackInterfacesMessage = GenesisState{}
diff --git a/core/types/genesis.pb.go b/modules/core/types/genesis.pb.go
similarity index 82%
rename from core/types/genesis.pb.go
rename to modules/core/types/genesis.pb.go
index fc52b6f1..8318bd26 100644
--- a/core/types/genesis.pb.go
+++ b/modules/core/types/genesis.pb.go
@@ -5,9 +5,9 @@ package types
import (
fmt "fmt"
- types "github.com/cosmos/ibc-go/core/02-client/types"
- types1 "github.com/cosmos/ibc-go/core/03-connection/types"
- types2 "github.com/cosmos/ibc-go/core/04-channel/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ types1 "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ types2 "github.com/cosmos/ibc-go/modules/core/04-channel/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
@@ -97,27 +97,28 @@ func init() {
func init() { proto.RegisterFile("ibcgo/core/types/v1/genesis.proto", fileDescriptor_f0cf35a95987cc01) }
var fileDescriptor_f0cf35a95987cc01 = []byte{
- // 313 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x4a, 0xc3, 0x30,
- 0x1c, 0xc7, 0xdb, 0x09, 0x1e, 0xaa, 0x4e, 0xac, 0x7f, 0xd0, 0x81, 0x99, 0x4b, 0x61, 0x78, 0x31,
- 0xa1, 0x7a, 0x13, 0xbc, 0xec, 0xe2, 0xbd, 0xde, 0xbc, 0x48, 0x1b, 0x42, 0x1b, 0x68, 0x93, 0xb1,
- 0xc6, 0xe2, 0xde, 0xc2, 0xc7, 0xda, 0x71, 0x47, 0x4f, 0x63, 0xb4, 0x6f, 0xe0, 0x13, 0xc8, 0x92,
- 0xd8, 0xb5, 0xe4, 0x56, 0xbe, 0xfd, 0xfc, 0xbe, 0x9f, 0xfc, 0xf3, 0x26, 0x2c, 0x21, 0xa9, 0xc0,
- 0x44, 0x2c, 0x28, 0x96, 0xcb, 0x39, 0x2d, 0x71, 0x15, 0xe2, 0x94, 0x72, 0x5a, 0xb2, 0x12, 0xcd,
- 0x17, 0x42, 0x0a, 0xff, 0x5c, 0x21, 0x68, 0x87, 0x20, 0x85, 0xa0, 0x2a, 0x1c, 0x5d, 0xa4, 0x22,
- 0x15, 0xea, 0x3f, 0xde, 0x7d, 0x69, 0x74, 0x04, 0x3b, 0x6d, 0x24, 0x67, 0x94, 0x4b, 0xab, 0x6e,
- 0x34, 0xed, 0x32, 0x82, 0x73, 0x4a, 0x24, 0x13, 0xdc, 0xe6, 0x82, 0x2e, 0x97, 0xc5, 0x9c, 0xd3,
- 0xdc, 0x82, 0xe0, 0x76, 0xe0, 0x1d, 0xbf, 0xea, 0xe4, 0x4d, 0xc6, 0x92, 0xfa, 0x99, 0x37, 0xd4,
- 0xe2, 0x0f, 0x03, 0x5e, 0xbb, 0x77, 0xee, 0xfd, 0xd1, 0x23, 0x44, 0x9d, 0x5d, 0x68, 0x02, 0x55,
- 0x21, 0xea, 0xce, 0xce, 0x6e, 0x57, 0x9b, 0xb1, 0xf3, 0xbb, 0x19, 0x5f, 0x2e, 0xe3, 0x22, 0x7f,
- 0x86, 0xfd, 0x1e, 0x18, 0x9d, 0xe8, 0xc0, 0x8c, 0xf8, 0x5f, 0x9e, 0xbf, 0x5f, 0x7e, 0x6b, 0x1b,
- 0x28, 0xdb, 0xb4, 0x67, 0x6b, 0x29, 0xcb, 0x38, 0x31, 0xc6, 0x1b, 0x63, 0xb4, 0xfa, 0x60, 0x74,
- 0xb6, 0x0f, 0xff, 0xcd, 0xb9, 0x77, 0x6a, 0x0e, 0xa4, 0xd5, 0x1e, 0x28, 0x6d, 0xd0, 0xd3, 0x6a,
- 0xc4, 0x72, 0x02, 0xe3, 0xbc, 0x32, 0xce, 0x7e, 0x13, 0x8c, 0x86, 0x26, 0x31, 0x43, 0xb3, 0x97,
- 0x55, 0x0d, 0xdc, 0x75, 0x0d, 0xdc, 0x6d, 0x0d, 0xdc, 0xef, 0x06, 0x38, 0xeb, 0x06, 0x38, 0x3f,
- 0x0d, 0x70, 0xde, 0x83, 0x94, 0xc9, 0xec, 0x33, 0x41, 0x44, 0x14, 0x98, 0x88, 0xb2, 0x10, 0x25,
- 0x66, 0x09, 0x79, 0xe8, 0x3d, 0xa7, 0xe4, 0x50, 0x5d, 0xd4, 0xd3, 0x5f, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x44, 0x1f, 0x35, 0xd8, 0x69, 0x02, 0x00, 0x00,
+ // 322 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcb, 0x4a, 0x33, 0x31,
+ 0x18, 0x86, 0x67, 0xfa, 0xc3, 0xbf, 0x18, 0xb5, 0xe2, 0x78, 0x40, 0x0b, 0xa6, 0x36, 0x85, 0x22,
+ 0x88, 0x09, 0xd5, 0x9d, 0xcb, 0x82, 0xb8, 0xaf, 0x3b, 0x37, 0xd2, 0x89, 0x61, 0x1a, 0x98, 0xc9,
+ 0x57, 0x9a, 0xb4, 0xd8, 0xbb, 0xf0, 0xb2, 0xba, 0xec, 0xd2, 0x55, 0x29, 0xed, 0x1d, 0x78, 0x05,
+ 0xd2, 0x24, 0xb6, 0x19, 0xb2, 0x1b, 0xde, 0x79, 0xbe, 0xf7, 0xc9, 0x29, 0x69, 0x89, 0x8c, 0xe5,
+ 0x40, 0x19, 0x8c, 0x39, 0xd5, 0xb3, 0x11, 0x57, 0x74, 0xda, 0xa5, 0x39, 0x97, 0x5c, 0x09, 0x45,
+ 0x46, 0x63, 0xd0, 0x90, 0x9e, 0x1a, 0x84, 0x6c, 0x11, 0x62, 0x10, 0x32, 0xed, 0x36, 0xce, 0x72,
+ 0xc8, 0xc1, 0xfc, 0xa7, 0xdb, 0x2f, 0x8b, 0x36, 0xb0, 0xd7, 0xc6, 0x0a, 0xc1, 0xa5, 0x0e, 0xea,
+ 0x1a, 0x1d, 0x9f, 0x01, 0x29, 0x39, 0xd3, 0x02, 0x64, 0xc8, 0xb5, 0x7d, 0x6e, 0x38, 0x90, 0x92,
+ 0x17, 0x01, 0x84, 0x57, 0xb5, 0xe4, 0xf0, 0xc5, 0x26, 0xaf, 0x7a, 0xa0, 0x79, 0x3a, 0x4c, 0xea,
+ 0x56, 0xfc, 0xee, 0xc0, 0xcb, 0xf8, 0x26, 0xbe, 0x3d, 0x78, 0xc0, 0xc4, 0xdb, 0x85, 0x25, 0xc8,
+ 0xb4, 0x4b, 0xfc, 0xd9, 0xde, 0xf5, 0x7c, 0xd9, 0x8c, 0x7e, 0x96, 0xcd, 0xf3, 0xd9, 0xa0, 0x2c,
+ 0x9e, 0x70, 0xb5, 0x07, 0xf7, 0x8f, 0x6c, 0xe0, 0x46, 0xd2, 0xcf, 0x24, 0xdd, 0x2f, 0x7f, 0x67,
+ 0xab, 0x19, 0x5b, 0xa7, 0x62, 0xdb, 0x51, 0x81, 0xb1, 0xe5, 0x8c, 0x57, 0xce, 0x18, 0xf4, 0xe1,
+ 0xfe, 0xc9, 0x3e, 0xfc, 0x33, 0x17, 0xc9, 0xb1, 0x3b, 0x90, 0x9d, 0xf6, 0x9f, 0xd1, 0xb6, 0x2b,
+ 0x5a, 0x8b, 0x04, 0x4e, 0xe4, 0x9c, 0x17, 0xce, 0x59, 0x6d, 0xc2, 0xfd, 0xba, 0x4b, 0xdc, 0x50,
+ 0xef, 0x79, 0xbe, 0x46, 0xf1, 0x62, 0x8d, 0xe2, 0xd5, 0x1a, 0xc5, 0x5f, 0x1b, 0x14, 0x2d, 0x36,
+ 0x28, 0xfa, 0xde, 0xa0, 0xe8, 0xed, 0x2e, 0x17, 0x7a, 0x38, 0xc9, 0x08, 0x83, 0x92, 0x32, 0x50,
+ 0x25, 0x28, 0x2a, 0x32, 0x76, 0x9f, 0x03, 0x2d, 0xe1, 0x63, 0x52, 0x70, 0xe5, 0x3d, 0xab, 0xec,
+ 0xbf, 0xb9, 0xb0, 0xc7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe0, 0x5d, 0xb8, 0x0a, 0x71, 0x02,
+ 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/core/types/query.go b/modules/core/types/query.go
similarity index 59%
rename from core/types/query.go
rename to modules/core/types/query.go
index bd7d2e83..a4a4bd54 100644
--- a/core/types/query.go
+++ b/modules/core/types/query.go
@@ -3,12 +3,12 @@ package types
import (
"github.com/gogo/protobuf/grpc"
- client "github.com/cosmos/ibc-go/core/02-client"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connection "github.com/cosmos/ibc-go/core/03-connection"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channel "github.com/cosmos/ibc-go/core/04-channel"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ client "github.com/cosmos/ibc-go/modules/core/02-client"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connection "github.com/cosmos/ibc-go/modules/core/03-connection"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channel "github.com/cosmos/ibc-go/modules/core/04-channel"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// QueryServer defines the IBC interfaces that the gRPC query server must implement
diff --git a/light-clients/06-solomachine/doc.go b/modules/light-clients/06-solomachine/doc.go
similarity index 100%
rename from light-clients/06-solomachine/doc.go
rename to modules/light-clients/06-solomachine/doc.go
diff --git a/light-clients/06-solomachine/module.go b/modules/light-clients/06-solomachine/module.go
similarity index 64%
rename from light-clients/06-solomachine/module.go
rename to modules/light-clients/06-solomachine/module.go
index bfc820b8..facdf529 100644
--- a/light-clients/06-solomachine/module.go
+++ b/modules/light-clients/06-solomachine/module.go
@@ -1,7 +1,7 @@
package solomachine
import (
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
)
// Name returns the solo machine client name.
diff --git a/light-clients/06-solomachine/spec/01_concepts.md b/modules/light-clients/06-solomachine/spec/01_concepts.md
similarity index 100%
rename from light-clients/06-solomachine/spec/01_concepts.md
rename to modules/light-clients/06-solomachine/spec/01_concepts.md
diff --git a/light-clients/06-solomachine/spec/02_state.md b/modules/light-clients/06-solomachine/spec/02_state.md
similarity index 100%
rename from light-clients/06-solomachine/spec/02_state.md
rename to modules/light-clients/06-solomachine/spec/02_state.md
diff --git a/light-clients/06-solomachine/spec/03_state_transitions.md b/modules/light-clients/06-solomachine/spec/03_state_transitions.md
similarity index 100%
rename from light-clients/06-solomachine/spec/03_state_transitions.md
rename to modules/light-clients/06-solomachine/spec/03_state_transitions.md
diff --git a/light-clients/06-solomachine/spec/04_messages.md b/modules/light-clients/06-solomachine/spec/04_messages.md
similarity index 100%
rename from light-clients/06-solomachine/spec/04_messages.md
rename to modules/light-clients/06-solomachine/spec/04_messages.md
diff --git a/light-clients/06-solomachine/spec/README.md b/modules/light-clients/06-solomachine/spec/README.md
similarity index 100%
rename from light-clients/06-solomachine/spec/README.md
rename to modules/light-clients/06-solomachine/spec/README.md
diff --git a/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
similarity index 98%
rename from light-clients/06-solomachine/types/client_state.go
rename to modules/light-clients/06-solomachine/types/client_state.go
index 5dfadd25..d008ac81 100644
--- a/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -10,10 +10,10 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.ClientState = (*ClientState)(nil)
diff --git a/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
similarity index 97%
rename from light-clients/06-solomachine/types/client_state_test.go
rename to modules/light-clients/06-solomachine/types/client_state_test.go
index 88931bc5..6666f2d4 100644
--- a/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -1,13 +1,13 @@
package types_test
import (
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/codec.go b/modules/light-clients/06-solomachine/types/codec.go
similarity index 96%
rename from light-clients/06-solomachine/types/codec.go
rename to modules/light-clients/06-solomachine/types/codec.go
index 5b82081f..833c9c37 100644
--- a/light-clients/06-solomachine/types/codec.go
+++ b/modules/light-clients/06-solomachine/types/codec.go
@@ -5,8 +5,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces register the ibc channel submodule interfaces to protobuf
diff --git a/light-clients/06-solomachine/types/codec_test.go b/modules/light-clients/06-solomachine/types/codec_test.go
similarity index 95%
rename from light-clients/06-solomachine/types/codec_test.go
rename to modules/light-clients/06-solomachine/types/codec_test.go
index 68539aa8..d4589be6 100644
--- a/light-clients/06-solomachine/types/codec_test.go
+++ b/modules/light-clients/06-solomachine/types/codec_test.go
@@ -1,10 +1,10 @@
package types_test
import (
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/consensus_state.go b/modules/light-clients/06-solomachine/types/consensus_state.go
similarity index 93%
rename from light-clients/06-solomachine/types/consensus_state.go
rename to modules/light-clients/06-solomachine/types/consensus_state.go
index 72efd980..31aaa084 100644
--- a/light-clients/06-solomachine/types/consensus_state.go
+++ b/modules/light-clients/06-solomachine/types/consensus_state.go
@@ -5,8 +5,8 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.ConsensusState = &ConsensusState{}
diff --git a/light-clients/06-solomachine/types/consensus_state_test.go b/modules/light-clients/06-solomachine/types/consensus_state_test.go
similarity index 93%
rename from light-clients/06-solomachine/types/consensus_state_test.go
rename to modules/light-clients/06-solomachine/types/consensus_state_test.go
index d943b6ee..ff801aa9 100644
--- a/light-clients/06-solomachine/types/consensus_state_test.go
+++ b/modules/light-clients/06-solomachine/types/consensus_state_test.go
@@ -1,8 +1,8 @@
package types_test
import (
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/errors.go b/modules/light-clients/06-solomachine/types/errors.go
similarity index 100%
rename from light-clients/06-solomachine/types/errors.go
rename to modules/light-clients/06-solomachine/types/errors.go
diff --git a/light-clients/06-solomachine/types/header.go b/modules/light-clients/06-solomachine/types/header.go
similarity index 94%
rename from light-clients/06-solomachine/types/header.go
rename to modules/light-clients/06-solomachine/types/header.go
index 384193cf..62a1eda1 100644
--- a/light-clients/06-solomachine/types/header.go
+++ b/modules/light-clients/06-solomachine/types/header.go
@@ -5,8 +5,8 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.Header = &Header{}
diff --git a/light-clients/06-solomachine/types/header_test.go b/modules/light-clients/06-solomachine/types/header_test.go
similarity index 94%
rename from light-clients/06-solomachine/types/header_test.go
rename to modules/light-clients/06-solomachine/types/header_test.go
index 65ca94ad..c4391cae 100644
--- a/light-clients/06-solomachine/types/header_test.go
+++ b/modules/light-clients/06-solomachine/types/header_test.go
@@ -1,8 +1,8 @@
package types_test
import (
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/misbehaviour.go b/modules/light-clients/06-solomachine/types/misbehaviour.go
similarity index 93%
rename from light-clients/06-solomachine/types/misbehaviour.go
rename to modules/light-clients/06-solomachine/types/misbehaviour.go
index d0d9bfe3..7a870ad4 100644
--- a/light-clients/06-solomachine/types/misbehaviour.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour.go
@@ -4,9 +4,9 @@ import (
"bytes"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.Misbehaviour = &Misbehaviour{}
diff --git a/light-clients/06-solomachine/types/misbehaviour_handle.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
similarity index 96%
rename from light-clients/06-solomachine/types/misbehaviour_handle.go
rename to modules/light-clients/06-solomachine/types/misbehaviour_handle.go
index 2306c47f..2597e5e3 100644
--- a/light-clients/06-solomachine/types/misbehaviour_handle.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
@@ -4,8 +4,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CheckMisbehaviourAndUpdateState determines whether or not the currently registered
diff --git a/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
similarity index 97%
rename from light-clients/06-solomachine/types/misbehaviour_handle_test.go
rename to modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
index 50b7523a..efdd0722 100644
--- a/light-clients/06-solomachine/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
@@ -1,9 +1,9 @@
package types_test
import (
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/misbehaviour_test.go b/modules/light-clients/06-solomachine/types/misbehaviour_test.go
similarity index 96%
rename from light-clients/06-solomachine/types/misbehaviour_test.go
rename to modules/light-clients/06-solomachine/types/misbehaviour_test.go
index e8fc4d4f..00f97219 100644
--- a/light-clients/06-solomachine/types/misbehaviour_test.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_test.go
@@ -1,8 +1,8 @@
package types_test
import (
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/proof.go b/modules/light-clients/06-solomachine/types/proof.go
similarity index 97%
rename from light-clients/06-solomachine/types/proof.go
rename to modules/light-clients/06-solomachine/types/proof.go
index e4e1032e..785a7b99 100644
--- a/light-clients/06-solomachine/types/proof.go
+++ b/modules/light-clients/06-solomachine/types/proof.go
@@ -6,11 +6,11 @@ import (
"github.com/cosmos/cosmos-sdk/crypto/types/multisig"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// VerifySignature verifies if the the provided public key generated the signature
diff --git a/light-clients/06-solomachine/types/proof_test.go b/modules/light-clients/06-solomachine/types/proof_test.go
similarity index 94%
rename from light-clients/06-solomachine/types/proof_test.go
rename to modules/light-clients/06-solomachine/types/proof_test.go
index 43e06b15..811929f1 100644
--- a/light-clients/06-solomachine/types/proof_test.go
+++ b/modules/light-clients/06-solomachine/types/proof_test.go
@@ -3,8 +3,8 @@ package types_test
import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/proposal_handle.go b/modules/light-clients/06-solomachine/types/proposal_handle.go
similarity index 94%
rename from light-clients/06-solomachine/types/proposal_handle.go
rename to modules/light-clients/06-solomachine/types/proposal_handle.go
index 269a914a..da96673c 100644
--- a/light-clients/06-solomachine/types/proposal_handle.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CheckSubstituteAndUpdateState verifies that the subject is allowed to be updated by
diff --git a/light-clients/06-solomachine/types/proposal_handle_test.go b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
similarity index 93%
rename from light-clients/06-solomachine/types/proposal_handle_test.go
rename to modules/light-clients/06-solomachine/types/proposal_handle_test.go
index 94f44c88..4b797e6d 100644
--- a/light-clients/06-solomachine/types/proposal_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
@@ -1,9 +1,9 @@
package types_test
import (
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/solomachine.go b/modules/light-clients/06-solomachine/types/solomachine.go
similarity index 97%
rename from light-clients/06-solomachine/types/solomachine.go
rename to modules/light-clients/06-solomachine/types/solomachine.go
index a49953a1..d80b17b2 100644
--- a/light-clients/06-solomachine/types/solomachine.go
+++ b/modules/light-clients/06-solomachine/types/solomachine.go
@@ -3,7 +3,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// Interface implementation checks.
diff --git a/light-clients/06-solomachine/types/solomachine.pb.go b/modules/light-clients/06-solomachine/types/solomachine.pb.go
similarity index 92%
rename from light-clients/06-solomachine/types/solomachine.pb.go
rename to modules/light-clients/06-solomachine/types/solomachine.pb.go
index 90c4110d..3374af0d 100644
--- a/light-clients/06-solomachine/types/solomachine.pb.go
+++ b/modules/light-clients/06-solomachine/types/solomachine.pb.go
@@ -6,8 +6,8 @@ package types
import (
fmt "fmt"
types "github.com/cosmos/cosmos-sdk/codec/types"
- types1 "github.com/cosmos/ibc-go/core/03-connection/types"
- types2 "github.com/cosmos/ibc-go/core/04-channel/types"
+ types1 "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ types2 "github.com/cosmos/ibc-go/modules/core/04-channel/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
@@ -823,93 +823,93 @@ func init() {
}
var fileDescriptor_39862ff634781870 = []byte{
- // 1361 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x8e, 0xda, 0xd6,
- 0x13, 0x5f, 0x13, 0xb2, 0x59, 0x86, 0x0d, 0xcb, 0xdf, 0x21, 0x09, 0xeb, 0x44, 0xe0, 0xbf, 0x23,
- 0xa5, 0xdb, 0x8f, 0x40, 0x37, 0x51, 0xa3, 0x28, 0xad, 0xda, 0x1a, 0xe3, 0x26, 0x24, 0xbb, 0x5e,
- 0x6a, 0x4c, 0xdb, 0xe4, 0xa2, 0x96, 0x31, 0x67, 0xc1, 0x0a, 0xd8, 0x14, 0x1b, 0x08, 0x95, 0x2a,
- 0x55, 0xbd, 0x4a, 0x51, 0x2f, 0xfa, 0x02, 0x48, 0x55, 0xab, 0xbe, 0x4b, 0xa4, 0xde, 0x44, 0xea,
- 0x4d, 0xaf, 0x50, 0x9b, 0xbc, 0x01, 0x4f, 0x50, 0xd9, 0xe7, 0x18, 0xdb, 0xec, 0x2e, 0x49, 0xbf,
- 0xee, 0xce, 0x99, 0xf9, 0xcd, 0x6f, 0xe6, 0xcc, 0x8c, 0xe7, 0x1c, 0xc3, 0x0d, 0xa3, 0xa1, 0xb7,
- 0xac, 0x62, 0xc7, 0x68, 0xb5, 0x1d, 0xbd, 0x63, 0x20, 0xd3, 0xb1, 0x8b, 0xb6, 0xd5, 0xb1, 0xba,
- 0x9a, 0xde, 0x36, 0x4c, 0x54, 0x1c, 0xee, 0x86, 0xb7, 0x85, 0x5e, 0xdf, 0x72, 0x2c, 0xfa, 0xff,
- 0x9e, 0x51, 0x21, 0x6c, 0x54, 0x08, 0xa3, 0x86, 0xbb, 0xcc, 0xeb, 0x98, 0x57, 0xb7, 0xfa, 0xa8,
- 0xa8, 0x5b, 0xa6, 0x89, 0x74, 0xc7, 0xb0, 0x4c, 0x97, 0x2e, 0xd8, 0x61, 0x36, 0xe6, 0x4a, 0x18,
- 0xda, 0xd6, 0x4c, 0x13, 0x75, 0x3c, 0x1c, 0x5e, 0x12, 0x50, 0xa6, 0x65, 0xb5, 0x2c, 0x6f, 0x59,
- 0x74, 0x57, 0x44, 0xba, 0xdd, 0xb2, 0xac, 0x56, 0x07, 0x15, 0xbd, 0x5d, 0x63, 0x70, 0x58, 0xd4,
- 0xcc, 0x31, 0x56, 0x71, 0xbf, 0xc6, 0x20, 0x29, 0x78, 0xb1, 0xd5, 0x1c, 0xcd, 0x41, 0x34, 0x03,
- 0x1b, 0x36, 0xfa, 0x62, 0x80, 0x4c, 0x1d, 0x65, 0x29, 0x96, 0xda, 0x89, 0xcb, 0x8b, 0x3d, 0x2d,
- 0xc0, 0xd6, 0x61, 0xdf, 0xfa, 0x12, 0x99, 0xea, 0x02, 0x12, 0x73, 0x21, 0x25, 0x66, 0x3e, 0xcb,
- 0x5f, 0x18, 0x6b, 0xdd, 0xce, 0x6d, 0x6e, 0x09, 0xc0, 0xc9, 0x29, 0x2c, 0xa9, 0xf9, 0x24, 0x43,
- 0xd8, 0xd2, 0x2d, 0xd3, 0x46, 0xa6, 0x3d, 0xb0, 0x55, 0xdb, 0xf5, 0x99, 0x3d, 0xc5, 0x52, 0x3b,
- 0xc9, 0xeb, 0xbb, 0x85, 0x97, 0xa6, 0xab, 0x20, 0xf8, 0x96, 0x5e, 0xb0, 0x61, 0xbf, 0x4b, 0x9c,
- 0x9c, 0x9c, 0xd2, 0x23, 0x58, 0x1a, 0xc1, 0x25, 0xad, 0xd3, 0xb1, 0x46, 0xea, 0xa0, 0xd7, 0xd4,
- 0x1c, 0xa4, 0x6a, 0x87, 0x0e, 0xea, 0xab, 0xbd, 0xbe, 0xd5, 0xb3, 0x6c, 0xad, 0x93, 0x8d, 0xb3,
- 0xd4, 0xce, 0x46, 0xe9, 0xea, 0x7c, 0x96, 0xe7, 0x30, 0xe1, 0x0a, 0x30, 0x27, 0x67, 0x3d, 0x6d,
- 0xdd, 0x53, 0xf2, 0xae, 0xae, 0x4a, 0x54, 0xb7, 0xe3, 0x4f, 0x7e, 0xc8, 0xaf, 0x71, 0x3f, 0x52,
- 0x90, 0x8a, 0xc6, 0x4a, 0xdf, 0x03, 0xe8, 0x0d, 0x1a, 0x1d, 0x43, 0x57, 0x1f, 0xa1, 0xb1, 0x97,
- 0xda, 0xe4, 0xf5, 0x4c, 0x01, 0x17, 0xa6, 0xe0, 0x17, 0xa6, 0xc0, 0x9b, 0xe3, 0xd2, 0xf9, 0xf9,
- 0x2c, 0xff, 0x3f, 0x1c, 0x44, 0x60, 0xc1, 0xc9, 0x09, 0xbc, 0xb9, 0x8f, 0xc6, 0x34, 0x0b, 0xc9,
- 0xa6, 0x31, 0x44, 0x7d, 0xdb, 0x38, 0x34, 0x50, 0xdf, 0x2b, 0x42, 0x42, 0x0e, 0x8b, 0xe8, 0xcb,
- 0x90, 0x70, 0x8c, 0x2e, 0xb2, 0x1d, 0xad, 0xdb, 0xf3, 0xf2, 0x1b, 0x97, 0x03, 0x01, 0x09, 0xf2,
- 0x9b, 0x18, 0xac, 0xdf, 0x45, 0x5a, 0x13, 0xf5, 0x57, 0x56, 0x3d, 0x42, 0x15, 0x5b, 0xa2, 0x72,
- 0xb5, 0xb6, 0xd1, 0x32, 0x35, 0x67, 0xd0, 0xc7, 0x85, 0xdc, 0x94, 0x03, 0x01, 0x5d, 0x87, 0x94,
- 0x89, 0x46, 0x6a, 0xe8, 0xe0, 0xf1, 0x15, 0x07, 0xdf, 0x9e, 0xcf, 0xf2, 0xe7, 0xf1, 0xc1, 0xa3,
- 0x56, 0x9c, 0xbc, 0x69, 0xa2, 0x51, 0x75, 0x71, 0x7e, 0x01, 0xb6, 0x5c, 0x40, 0x38, 0x07, 0xa7,
- 0xdd, 0x1c, 0x84, 0x1b, 0x62, 0x09, 0xc0, 0xc9, 0x6e, 0x24, 0xe5, 0x40, 0x40, 0x92, 0xf0, 0x4b,
- 0x0c, 0x36, 0xf7, 0x0d, 0xbb, 0x81, 0xda, 0xda, 0xd0, 0xb0, 0x06, 0x7d, 0x7a, 0x17, 0x12, 0xb8,
- 0xf9, 0x54, 0xa3, 0xe9, 0xe5, 0x22, 0x51, 0xca, 0xcc, 0x67, 0xf9, 0x34, 0x69, 0x33, 0x5f, 0xc5,
- 0xc9, 0x1b, 0x78, 0x5d, 0x69, 0x46, 0xb2, 0x17, 0x5b, 0xca, 0x5e, 0x1f, 0xce, 0x2e, 0xd2, 0xa1,
- 0x5a, 0xa6, 0xdf, 0xec, 0x37, 0x5e, 0xa1, 0xd9, 0x6b, 0xbe, 0x1d, 0x6f, 0x36, 0xcb, 0x9a, 0xa3,
- 0x95, 0xb2, 0xf3, 0x59, 0x3e, 0x83, 0xe3, 0x88, 0x70, 0x72, 0xf2, 0xe6, 0x62, 0x7f, 0x60, 0x2e,
- 0xf9, 0x74, 0x46, 0x16, 0x49, 0xfa, 0xbf, 0xe7, 0xd3, 0x19, 0x59, 0x61, 0x9f, 0xca, 0xc8, 0x22,
- 0xd9, 0x7c, 0x4a, 0x41, 0x7a, 0x99, 0x22, 0xda, 0x22, 0xd4, 0x72, 0x8b, 0x7c, 0x0e, 0x89, 0xa6,
- 0xe6, 0x68, 0xaa, 0x33, 0xee, 0xe1, 0xec, 0xa5, 0xae, 0xbf, 0xf9, 0x0a, 0x81, 0xba, 0xcc, 0xca,
- 0xb8, 0x87, 0xc2, 0xc5, 0x59, 0xf0, 0x70, 0xf2, 0x46, 0x93, 0xe8, 0x69, 0x1a, 0xe2, 0xee, 0x9a,
- 0xf4, 0xa6, 0xb7, 0x8e, 0xb6, 0x74, 0xfc, 0xf8, 0xaf, 0xe3, 0x6b, 0x0a, 0xb2, 0x8a, 0x2f, 0x43,
- 0xcd, 0xc5, 0xa9, 0xbc, 0x23, 0x7d, 0x08, 0xa9, 0x20, 0x1b, 0x1e, 0xbd, 0x77, 0xae, 0x70, 0x07,
- 0x47, 0xf5, 0x9c, 0x1c, 0x94, 0xa4, 0x7c, 0x24, 0x84, 0xd8, 0xf1, 0x21, 0xfc, 0x41, 0x41, 0xc2,
- 0xf5, 0x5b, 0x1a, 0x3b, 0xc8, 0xfe, 0x07, 0xdf, 0xe8, 0xd2, 0xb8, 0x38, 0x75, 0x74, 0x5c, 0x44,
- 0x8a, 0x10, 0xff, 0xef, 0x8a, 0x70, 0x3a, 0x28, 0x02, 0x39, 0xe3, 0xcf, 0x14, 0x00, 0x1e, 0x42,
- 0x5e, 0x5a, 0xf6, 0x20, 0x49, 0x3e, 0xfd, 0x97, 0x8e, 0xc9, 0x0b, 0xf3, 0x59, 0x9e, 0x8e, 0x4c,
- 0x0b, 0x32, 0x27, 0xf1, 0xa8, 0x38, 0x61, 0x4e, 0xc4, 0xfe, 0xe6, 0x9c, 0xf8, 0x0a, 0xb6, 0x42,
- 0xd7, 0xa4, 0x17, 0x2b, 0x0d, 0xf1, 0x9e, 0xe6, 0xb4, 0x49, 0x4b, 0x7b, 0x6b, 0xba, 0x0a, 0x9b,
- 0x64, 0x44, 0xe0, 0xab, 0x2d, 0xb6, 0xe2, 0x00, 0x17, 0xe7, 0xb3, 0xfc, 0xb9, 0xc8, 0x58, 0x21,
- 0x57, 0x57, 0x52, 0x0f, 0x3c, 0x11, 0xf7, 0xdf, 0x52, 0x40, 0x47, 0x2f, 0x94, 0x13, 0x43, 0x78,
- 0x70, 0xf4, 0x82, 0x5d, 0x15, 0xc5, 0x5f, 0xb8, 0x43, 0x49, 0x2c, 0x8f, 0xe1, 0x9c, 0xb0, 0x78,
- 0x9c, 0xac, 0x8e, 0xe5, 0x0e, 0x40, 0xf0, 0x8e, 0x21, 0x61, 0xbc, 0x46, 0x1a, 0xcb, 0x7d, 0xc8,
- 0x14, 0x42, 0xaf, 0x1c, 0x7c, 0xbd, 0x93, 0x9d, 0x68, 0x36, 0xe5, 0x90, 0x29, 0xf1, 0x7c, 0x08,
- 0x69, 0x01, 0x3f, 0x77, 0x56, 0xbb, 0xbd, 0x05, 0x67, 0xc8, 0xb3, 0x88, 0xf8, 0xcc, 0x45, 0x7c,
- 0x92, 0x17, 0x93, 0xeb, 0x10, 0x2f, 0x65, 0x1f, 0x4e, 0xfc, 0xdc, 0x83, 0x4c, 0x55, 0xd3, 0x1f,
- 0x21, 0x47, 0xb0, 0xba, 0x5d, 0xc3, 0xe9, 0x22, 0xd3, 0x39, 0xd1, 0x57, 0xce, 0x3d, 0xa2, 0x8f,
- 0xf2, 0xdc, 0x6d, 0xca, 0x21, 0x09, 0xf7, 0x00, 0xb6, 0x31, 0x17, 0xaf, 0x3f, 0x32, 0xad, 0x51,
- 0x07, 0x35, 0x5b, 0x68, 0x25, 0xe1, 0x0e, 0x6c, 0x69, 0x51, 0x28, 0x61, 0x5d, 0x16, 0x73, 0x05,
- 0xc8, 0x62, 0x6a, 0x19, 0xe9, 0xc8, 0xe8, 0x39, 0x7c, 0xc3, 0x76, 0xa7, 0xc1, 0x49, 0xcc, 0x5c,
- 0x1b, 0x32, 0x12, 0x7a, 0xec, 0xf8, 0x4f, 0x31, 0x19, 0xe9, 0xc3, 0x13, 0xa3, 0x78, 0x0f, 0xce,
- 0x9a, 0xe8, 0xb1, 0xe3, 0x3e, 0xe4, 0xd4, 0x3e, 0xd2, 0x87, 0xe4, 0xa5, 0x17, 0xba, 0x0e, 0x22,
- 0x6a, 0x4e, 0x4e, 0x9a, 0x98, 0xda, 0x65, 0x7d, 0xe3, 0xbb, 0x38, 0x6c, 0xf8, 0xc3, 0x81, 0xbe,
- 0x05, 0x57, 0xca, 0xbc, 0xc2, 0xab, 0xca, 0x83, 0xaa, 0xa8, 0xd6, 0xa5, 0x8a, 0x54, 0x51, 0x2a,
- 0xfc, 0x5e, 0xe5, 0xa1, 0x58, 0x56, 0xeb, 0x52, 0xad, 0x2a, 0x0a, 0x95, 0x8f, 0x2a, 0x62, 0x39,
- 0xbd, 0xc6, 0x6c, 0x4d, 0xa6, 0x6c, 0x32, 0x24, 0xa2, 0xaf, 0xc2, 0x85, 0xc0, 0x52, 0xd8, 0xab,
- 0x88, 0x92, 0xa2, 0xd6, 0x14, 0x5e, 0x11, 0xd3, 0x14, 0x03, 0x93, 0x29, 0xbb, 0x8e, 0x65, 0xf4,
- 0x5b, 0xb0, 0x1d, 0xc2, 0x1d, 0x48, 0x35, 0x51, 0xaa, 0xd5, 0x6b, 0x04, 0x1a, 0x63, 0xce, 0x4e,
- 0xa6, 0x6c, 0x62, 0x21, 0xa6, 0x0b, 0xc0, 0x44, 0xd0, 0x92, 0x28, 0x28, 0x95, 0x03, 0x89, 0xc0,
- 0x4f, 0x31, 0xa9, 0xc9, 0x94, 0x85, 0x40, 0x4e, 0xef, 0xc0, 0xc5, 0x10, 0xfe, 0x2e, 0x2f, 0x49,
- 0xe2, 0x1e, 0x01, 0xc7, 0x99, 0xe4, 0x64, 0xca, 0x9e, 0x21, 0x42, 0xfa, 0x1d, 0xb8, 0x14, 0x20,
- 0xab, 0xbc, 0x70, 0x5f, 0x54, 0x54, 0xe1, 0x60, 0x7f, 0xbf, 0xa2, 0xec, 0x8b, 0x92, 0x92, 0x3e,
- 0xcd, 0x64, 0x26, 0x53, 0x36, 0x8d, 0x15, 0x81, 0x9c, 0xfe, 0x00, 0xd8, 0x23, 0x66, 0xbc, 0x70,
- 0x5f, 0x3a, 0xf8, 0x74, 0x4f, 0x2c, 0xdf, 0x11, 0x3d, 0xdb, 0x75, 0x66, 0x7b, 0x32, 0x65, 0xcf,
- 0x63, 0xed, 0x92, 0x92, 0x7e, 0xff, 0x18, 0x02, 0x59, 0x14, 0xc4, 0x4a, 0x55, 0x51, 0xf9, 0x52,
- 0x4d, 0x94, 0x04, 0x31, 0x7d, 0x86, 0xc9, 0x4e, 0xa6, 0x6c, 0x06, 0x6b, 0x89, 0x92, 0xe8, 0xe8,
- 0x9b, 0x70, 0x39, 0xb0, 0x97, 0xc4, 0xcf, 0x14, 0xb5, 0x26, 0x7e, 0x5c, 0x77, 0x55, 0x2e, 0xcd,
- 0x27, 0xe9, 0x0d, 0x1c, 0xb8, 0xab, 0xf1, 0x15, 0xae, 0x9c, 0x66, 0x21, 0x1d, 0xd8, 0xdd, 0x15,
- 0xf9, 0xb2, 0x28, 0xa7, 0x13, 0xb8, 0x32, 0x78, 0xc7, 0xc4, 0x9f, 0xfc, 0x94, 0x5b, 0x2b, 0xd5,
- 0x9f, 0x3e, 0xcf, 0x51, 0xcf, 0x9e, 0xe7, 0xa8, 0xdf, 0x9f, 0xe7, 0xa8, 0xef, 0x5f, 0xe4, 0xd6,
- 0x9e, 0xbd, 0xc8, 0xad, 0xfd, 0xf6, 0x22, 0xb7, 0xf6, 0xf0, 0xdd, 0x96, 0xe1, 0xb4, 0x07, 0x8d,
- 0x82, 0x6e, 0x75, 0x8b, 0xba, 0x65, 0x77, 0x2d, 0xbb, 0x68, 0x34, 0xf4, 0x6b, 0xfe, 0xaf, 0xd6,
- 0x35, 0xff, 0x5f, 0xeb, 0xed, 0x9b, 0xd7, 0xc2, 0xbf, 0x5b, 0xee, 0xfd, 0x62, 0x37, 0xd6, 0xbd,
- 0x41, 0x76, 0xe3, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xb1, 0x1d, 0x54, 0x9d, 0x0d, 0x00,
- 0x00,
+ // 1370 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x5f, 0x8f, 0xda, 0xd6,
+ 0x12, 0x5f, 0x13, 0xb2, 0x59, 0x86, 0x0d, 0xcb, 0x75, 0x48, 0xc2, 0x3a, 0x11, 0xf8, 0x3a, 0x52,
+ 0xee, 0xde, 0x7b, 0x1b, 0xe8, 0x26, 0x6a, 0x14, 0x45, 0x55, 0x5b, 0x63, 0xdc, 0x84, 0x64, 0xd7,
+ 0x4b, 0x8d, 0xb7, 0x6d, 0x22, 0xb5, 0x96, 0x31, 0x67, 0xc1, 0x0a, 0xf8, 0x50, 0x6c, 0x20, 0x54,
+ 0xaa, 0x54, 0xf5, 0x29, 0x45, 0x7d, 0xe8, 0x17, 0x40, 0xaa, 0x5a, 0xf5, 0xbb, 0x44, 0xea, 0x4b,
+ 0xa4, 0xbe, 0xf4, 0x09, 0xb5, 0xc9, 0x37, 0xe0, 0x13, 0x54, 0xf6, 0x39, 0x60, 0x9b, 0xcd, 0x92,
+ 0xf4, 0xdf, 0xdb, 0x39, 0x33, 0xbf, 0xf9, 0xcd, 0x9c, 0x99, 0xf1, 0x9c, 0x63, 0xb8, 0x61, 0xd5,
+ 0xcd, 0x26, 0x2e, 0xb6, 0xad, 0x66, 0xcb, 0x35, 0xdb, 0x16, 0xb2, 0x5d, 0xa7, 0xe8, 0xe0, 0x36,
+ 0xee, 0x18, 0x66, 0xcb, 0xb2, 0x51, 0x71, 0xb0, 0x1b, 0xde, 0x16, 0xba, 0x3d, 0xec, 0x62, 0xf6,
+ 0xdf, 0xbe, 0x51, 0x21, 0x6c, 0x54, 0x08, 0xa3, 0x06, 0xbb, 0xdc, 0x7f, 0x09, 0xaf, 0x89, 0x7b,
+ 0xa8, 0x68, 0x62, 0xdb, 0x46, 0xa6, 0x6b, 0x61, 0xdb, 0xa3, 0x0b, 0x76, 0x84, 0x8d, 0xbb, 0x12,
+ 0x86, 0xb6, 0x0c, 0xdb, 0x46, 0x6d, 0x1f, 0x47, 0x96, 0x14, 0x94, 0x69, 0xe2, 0x26, 0xf6, 0x97,
+ 0x45, 0x6f, 0x45, 0xa5, 0xdb, 0x4d, 0x8c, 0x9b, 0x6d, 0x54, 0xf4, 0x77, 0xf5, 0xfe, 0x51, 0xd1,
+ 0xb0, 0x47, 0x44, 0x25, 0xfc, 0x1c, 0x83, 0xa4, 0xe4, 0xc7, 0x56, 0x73, 0x0d, 0x17, 0xb1, 0x1c,
+ 0x6c, 0x38, 0xe8, 0xb3, 0x3e, 0xb2, 0x4d, 0x94, 0x65, 0x78, 0x66, 0x27, 0xae, 0x2e, 0xf6, 0xac,
+ 0x04, 0x5b, 0x47, 0x3d, 0xfc, 0x39, 0xb2, 0xf5, 0x05, 0x24, 0xe6, 0x41, 0x4a, 0xdc, 0x6c, 0x9a,
+ 0xbf, 0x30, 0x32, 0x3a, 0xed, 0xdb, 0xc2, 0x12, 0x40, 0x50, 0x53, 0x44, 0x52, 0x9b, 0x93, 0x0c,
+ 0x60, 0xcb, 0xc4, 0xb6, 0x83, 0x6c, 0xa7, 0xef, 0xe8, 0x8e, 0xe7, 0x33, 0x7b, 0x8a, 0x67, 0x76,
+ 0x92, 0xd7, 0x77, 0x0b, 0xaf, 0x4c, 0x57, 0x41, 0x9a, 0x5b, 0xfa, 0xc1, 0x86, 0xfd, 0x2e, 0x71,
+ 0x0a, 0x6a, 0xca, 0x8c, 0x60, 0x59, 0x04, 0x97, 0x8c, 0x76, 0x1b, 0x0f, 0xf5, 0x7e, 0xb7, 0x61,
+ 0xb8, 0x48, 0x37, 0x8e, 0x5c, 0xd4, 0xd3, 0xbb, 0x3d, 0xdc, 0xc5, 0x8e, 0xd1, 0xce, 0xc6, 0x79,
+ 0x66, 0x67, 0xa3, 0x74, 0x75, 0x36, 0xcd, 0x0b, 0x84, 0x70, 0x05, 0x58, 0x50, 0xb3, 0xbe, 0xf6,
+ 0xd0, 0x57, 0x8a, 0x9e, 0xae, 0x4a, 0x55, 0xb7, 0xe3, 0x4f, 0xbe, 0xcb, 0xaf, 0x09, 0xdf, 0x33,
+ 0x90, 0x8a, 0xc6, 0xca, 0xde, 0x03, 0xe8, 0xf6, 0xeb, 0x6d, 0xcb, 0xd4, 0x1f, 0xa1, 0x91, 0x9f,
+ 0xda, 0xe4, 0xf5, 0x4c, 0x81, 0x14, 0xa6, 0x30, 0x2f, 0x4c, 0x41, 0xb4, 0x47, 0xa5, 0xf3, 0xb3,
+ 0x69, 0xfe, 0x5f, 0x24, 0x88, 0xc0, 0x42, 0x50, 0x13, 0x64, 0x73, 0x1f, 0x8d, 0x58, 0x1e, 0x92,
+ 0x0d, 0x6b, 0x80, 0x7a, 0x8e, 0x75, 0x64, 0xa1, 0x9e, 0x5f, 0x84, 0x84, 0x1a, 0x16, 0xb1, 0x97,
+ 0x21, 0xe1, 0x5a, 0x1d, 0xe4, 0xb8, 0x46, 0xa7, 0xeb, 0xe7, 0x37, 0xae, 0x06, 0x02, 0x1a, 0xe4,
+ 0x57, 0x31, 0x58, 0xbf, 0x8b, 0x8c, 0x06, 0xea, 0xad, 0xac, 0x7a, 0x84, 0x2a, 0xb6, 0x44, 0xe5,
+ 0x69, 0x1d, 0xab, 0x69, 0x1b, 0x6e, 0xbf, 0x47, 0x0a, 0xb9, 0xa9, 0x06, 0x02, 0xf6, 0x10, 0x52,
+ 0x36, 0x1a, 0xea, 0xa1, 0x83, 0xc7, 0x57, 0x1c, 0x7c, 0x7b, 0x36, 0xcd, 0x9f, 0x27, 0x07, 0x8f,
+ 0x5a, 0x09, 0xea, 0xa6, 0x8d, 0x86, 0xd5, 0xc5, 0xf9, 0x25, 0xd8, 0xf2, 0x00, 0xe1, 0x1c, 0x9c,
+ 0xf6, 0x72, 0x10, 0x6e, 0x88, 0x25, 0x80, 0xa0, 0x7a, 0x91, 0x94, 0x03, 0x01, 0x4d, 0xc2, 0x4f,
+ 0x31, 0xd8, 0xdc, 0xb7, 0x9c, 0x3a, 0x6a, 0x19, 0x03, 0x0b, 0xf7, 0x7b, 0xec, 0x2e, 0x24, 0x48,
+ 0xf3, 0xe9, 0x56, 0xc3, 0xcf, 0x45, 0xa2, 0x94, 0x99, 0x4d, 0xf3, 0x69, 0xda, 0x66, 0x73, 0x95,
+ 0xa0, 0x6e, 0x90, 0x75, 0xa5, 0x11, 0xc9, 0x5e, 0x6c, 0x29, 0x7b, 0x3d, 0x38, 0xbb, 0x48, 0x87,
+ 0x8e, 0xed, 0x79, 0xb3, 0xdf, 0x78, 0x8d, 0x66, 0xaf, 0xcd, 0xed, 0x44, 0xbb, 0x51, 0x36, 0x5c,
+ 0xa3, 0x94, 0x9d, 0x4d, 0xf3, 0x19, 0x12, 0x47, 0x84, 0x53, 0x50, 0x37, 0x17, 0xfb, 0x03, 0x7b,
+ 0xc9, 0xa7, 0x3b, 0xc4, 0x34, 0xe9, 0x7f, 0x9f, 0x4f, 0x77, 0x88, 0xc3, 0x3e, 0xb5, 0x21, 0xa6,
+ 0xd9, 0x7c, 0xca, 0x40, 0x7a, 0x99, 0x22, 0xda, 0x22, 0xcc, 0x72, 0x8b, 0x7c, 0x0a, 0x89, 0x86,
+ 0xe1, 0x1a, 0xba, 0x3b, 0xea, 0x92, 0xec, 0xa5, 0xae, 0xff, 0xff, 0x35, 0x02, 0xf5, 0x98, 0xb5,
+ 0x51, 0x17, 0x85, 0x8b, 0xb3, 0xe0, 0x11, 0xd4, 0x8d, 0x06, 0xd5, 0xb3, 0x2c, 0xc4, 0xbd, 0x35,
+ 0xed, 0x4d, 0x7f, 0x1d, 0x6d, 0xe9, 0xf8, 0xcb, 0xbf, 0x8e, 0x2f, 0x19, 0xc8, 0x6a, 0x73, 0x19,
+ 0x6a, 0x2c, 0x4e, 0xe5, 0x1f, 0xe9, 0x3d, 0x48, 0x05, 0xd9, 0xf0, 0xe9, 0xfd, 0x73, 0x85, 0x3b,
+ 0x38, 0xaa, 0x17, 0xd4, 0xa0, 0x24, 0xe5, 0x63, 0x21, 0xc4, 0x5e, 0x1e, 0xc2, 0x6f, 0x0c, 0x24,
+ 0x3c, 0xbf, 0xa5, 0x91, 0x8b, 0x9c, 0xbf, 0xf0, 0x8d, 0x2e, 0x8d, 0x8b, 0x53, 0xc7, 0xc7, 0x45,
+ 0xa4, 0x08, 0xf1, 0x7f, 0xae, 0x08, 0xa7, 0x83, 0x22, 0xd0, 0x33, 0xfe, 0xc8, 0x00, 0x90, 0x21,
+ 0xe4, 0xa7, 0x65, 0x0f, 0x92, 0xf4, 0xd3, 0x7f, 0xe5, 0x98, 0xbc, 0x30, 0x9b, 0xe6, 0xd9, 0xc8,
+ 0xb4, 0xa0, 0x73, 0x92, 0x8c, 0x8a, 0x13, 0xe6, 0x44, 0xec, 0x4f, 0xce, 0x89, 0x2f, 0x60, 0x2b,
+ 0x74, 0x4d, 0xfa, 0xb1, 0xb2, 0x10, 0xef, 0x1a, 0x6e, 0x8b, 0xb6, 0xb4, 0xbf, 0x66, 0xab, 0xb0,
+ 0x49, 0x47, 0x04, 0xb9, 0xda, 0x62, 0x2b, 0x0e, 0x70, 0x71, 0x36, 0xcd, 0x9f, 0x8b, 0x8c, 0x15,
+ 0x7a, 0x75, 0x25, 0xcd, 0xc0, 0x13, 0x75, 0xff, 0x35, 0x03, 0x6c, 0xf4, 0x42, 0x39, 0x31, 0x84,
+ 0x07, 0xc7, 0x2f, 0xd8, 0x55, 0x51, 0xfc, 0x81, 0x3b, 0x94, 0xc6, 0xf2, 0x18, 0xce, 0x49, 0x8b,
+ 0xc7, 0xc9, 0xea, 0x58, 0xee, 0x00, 0x04, 0xef, 0x18, 0x1a, 0xc6, 0x7f, 0x68, 0x63, 0x79, 0x0f,
+ 0x99, 0x42, 0xe8, 0x95, 0x43, 0xae, 0x77, 0xba, 0x93, 0xed, 0x86, 0x1a, 0x32, 0xa5, 0x9e, 0x8f,
+ 0x20, 0x2d, 0x91, 0xe7, 0xce, 0x6a, 0xb7, 0xb7, 0xe0, 0x0c, 0x7d, 0x16, 0x51, 0x9f, 0xb9, 0x88,
+ 0x4f, 0xfa, 0x62, 0xf2, 0x1c, 0x92, 0xa5, 0x3a, 0x87, 0x53, 0x3f, 0xf7, 0x20, 0x53, 0x35, 0xcc,
+ 0x47, 0xc8, 0x95, 0x70, 0xa7, 0x63, 0xb9, 0x1d, 0x64, 0xbb, 0x27, 0xfa, 0xca, 0x79, 0x47, 0x9c,
+ 0xa3, 0x7c, 0x77, 0x9b, 0x6a, 0x48, 0x22, 0x3c, 0x80, 0x6d, 0xc2, 0x25, 0x9a, 0x8f, 0x6c, 0x3c,
+ 0x6c, 0xa3, 0x46, 0x13, 0xad, 0x24, 0xdc, 0x81, 0x2d, 0x23, 0x0a, 0xa5, 0xac, 0xcb, 0x62, 0xa1,
+ 0x00, 0x59, 0x42, 0xad, 0x22, 0x13, 0x59, 0x5d, 0x57, 0xac, 0x3b, 0xde, 0x34, 0x38, 0x89, 0x59,
+ 0x68, 0x41, 0x46, 0x41, 0x8f, 0xdd, 0xf9, 0x53, 0x4c, 0x45, 0xe6, 0xe0, 0xc4, 0x28, 0xde, 0x86,
+ 0xb3, 0x36, 0x7a, 0xec, 0x7a, 0x0f, 0x39, 0xbd, 0x87, 0xcc, 0x01, 0x7d, 0xe9, 0x85, 0xae, 0x83,
+ 0x88, 0x5a, 0x50, 0x93, 0x36, 0xa1, 0xf6, 0x58, 0xff, 0xf7, 0x4d, 0x1c, 0x36, 0xe6, 0xc3, 0x81,
+ 0xbd, 0x05, 0x57, 0xca, 0xa2, 0x26, 0xea, 0xda, 0x83, 0xaa, 0xac, 0x1f, 0x2a, 0x15, 0xa5, 0xa2,
+ 0x55, 0xc4, 0xbd, 0xca, 0x43, 0xb9, 0xac, 0x1f, 0x2a, 0xb5, 0xaa, 0x2c, 0x55, 0xde, 0xaf, 0xc8,
+ 0xe5, 0xf4, 0x1a, 0xb7, 0x35, 0x9e, 0xf0, 0xc9, 0x90, 0x88, 0xbd, 0x0a, 0x17, 0x02, 0x4b, 0x69,
+ 0xaf, 0x22, 0x2b, 0x9a, 0x5e, 0xd3, 0x44, 0x4d, 0x4e, 0x33, 0x1c, 0x8c, 0x27, 0xfc, 0x3a, 0x91,
+ 0xb1, 0x6f, 0xc0, 0x76, 0x08, 0x77, 0xa0, 0xd4, 0x64, 0xa5, 0x76, 0x58, 0xa3, 0xd0, 0x18, 0x77,
+ 0x76, 0x3c, 0xe1, 0x13, 0x0b, 0x31, 0x5b, 0x00, 0x2e, 0x82, 0x56, 0x64, 0x49, 0xab, 0x1c, 0x28,
+ 0x14, 0x7e, 0x8a, 0x4b, 0x8d, 0x27, 0x3c, 0x04, 0x72, 0x76, 0x07, 0x2e, 0x86, 0xf0, 0x77, 0x45,
+ 0x45, 0x91, 0xf7, 0x28, 0x38, 0xce, 0x25, 0xc7, 0x13, 0xfe, 0x0c, 0x15, 0xb2, 0x6f, 0xc1, 0xa5,
+ 0x00, 0x59, 0x15, 0xa5, 0xfb, 0xb2, 0xa6, 0x4b, 0x07, 0xfb, 0xfb, 0x15, 0x6d, 0x5f, 0x56, 0xb4,
+ 0xf4, 0x69, 0x2e, 0x33, 0x9e, 0xf0, 0x69, 0xa2, 0x08, 0xe4, 0xec, 0xbb, 0xc0, 0x1f, 0x33, 0x13,
+ 0xa5, 0xfb, 0xca, 0xc1, 0x47, 0x7b, 0x72, 0xf9, 0x8e, 0xec, 0xdb, 0xae, 0x73, 0xdb, 0xe3, 0x09,
+ 0x7f, 0x9e, 0x68, 0x97, 0x94, 0xec, 0x3b, 0x2f, 0x21, 0x50, 0x65, 0x49, 0xae, 0x54, 0x35, 0x5d,
+ 0x2c, 0xd5, 0x64, 0x45, 0x92, 0xd3, 0x67, 0xb8, 0xec, 0x78, 0xc2, 0x67, 0x88, 0x96, 0x2a, 0xa9,
+ 0x8e, 0xbd, 0x09, 0x97, 0x03, 0x7b, 0x45, 0xfe, 0x58, 0xd3, 0x6b, 0xf2, 0x07, 0x87, 0x9e, 0xca,
+ 0xa3, 0xf9, 0x30, 0xbd, 0x41, 0x02, 0xf7, 0x34, 0x73, 0x85, 0x27, 0x67, 0x79, 0x48, 0x07, 0x76,
+ 0x77, 0x65, 0xb1, 0x2c, 0xab, 0xe9, 0x04, 0xa9, 0x0c, 0xd9, 0x71, 0xf1, 0x27, 0x3f, 0xe4, 0xd6,
+ 0x4a, 0x9f, 0x3c, 0x7d, 0x9e, 0x63, 0x9e, 0x3d, 0xcf, 0x31, 0xbf, 0x3e, 0xcf, 0x31, 0xdf, 0xbe,
+ 0xc8, 0xad, 0x3d, 0x7b, 0x91, 0x5b, 0xfb, 0xe5, 0x45, 0x6e, 0xed, 0xa1, 0xd4, 0xb4, 0xdc, 0x56,
+ 0xbf, 0x5e, 0x30, 0x71, 0xa7, 0x68, 0x62, 0xa7, 0x83, 0x9d, 0xa2, 0x55, 0x37, 0xaf, 0x35, 0x71,
+ 0xb1, 0x83, 0x1b, 0xfd, 0x36, 0x72, 0xc8, 0x2f, 0xd7, 0xb5, 0xf9, 0x3f, 0xd7, 0x9b, 0x37, 0xaf,
+ 0x85, 0x7f, 0xbb, 0xbc, 0x7b, 0xc6, 0xa9, 0xaf, 0xfb, 0x03, 0xed, 0xc6, 0xef, 0x01, 0x00, 0x00,
+ 0xff, 0xff, 0x36, 0x5b, 0x28, 0xf2, 0xa5, 0x0d, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
diff --git a/light-clients/06-solomachine/types/solomachine_test.go b/modules/light-clients/06-solomachine/types/solomachine_test.go
similarity index 94%
rename from light-clients/06-solomachine/types/solomachine_test.go
rename to modules/light-clients/06-solomachine/types/solomachine_test.go
index deec20be..42d1a384 100644
--- a/light-clients/06-solomachine/types/solomachine_test.go
+++ b/modules/light-clients/06-solomachine/types/solomachine_test.go
@@ -12,9 +12,9 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/testutil/testdata"
sdk "github.com/cosmos/cosmos-sdk/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/06-solomachine/types/update.go b/modules/light-clients/06-solomachine/types/update.go
similarity index 96%
rename from light-clients/06-solomachine/types/update.go
rename to modules/light-clients/06-solomachine/types/update.go
index 5072d3b9..fcd54954 100644
--- a/light-clients/06-solomachine/types/update.go
+++ b/modules/light-clients/06-solomachine/types/update.go
@@ -4,8 +4,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CheckHeaderAndUpdateState checks if the provided header is valid and updates
diff --git a/light-clients/06-solomachine/types/update_test.go b/modules/light-clients/06-solomachine/types/update_test.go
similarity index 95%
rename from light-clients/06-solomachine/types/update_test.go
rename to modules/light-clients/06-solomachine/types/update_test.go
index e6170351..e9f3db3a 100644
--- a/light-clients/06-solomachine/types/update_test.go
+++ b/modules/light-clients/06-solomachine/types/update_test.go
@@ -3,9 +3,9 @@ package types_test
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/07-tendermint/doc.go b/modules/light-clients/07-tendermint/doc.go
similarity index 100%
rename from light-clients/07-tendermint/doc.go
rename to modules/light-clients/07-tendermint/doc.go
diff --git a/light-clients/07-tendermint/module.go b/modules/light-clients/07-tendermint/module.go
similarity index 62%
rename from light-clients/07-tendermint/module.go
rename to modules/light-clients/07-tendermint/module.go
index 0fe57fa9..07a13a7a 100644
--- a/light-clients/07-tendermint/module.go
+++ b/modules/light-clients/07-tendermint/module.go
@@ -1,7 +1,7 @@
package tendermint
import (
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
// Name returns the IBC client name
diff --git a/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
similarity index 97%
rename from light-clients/07-tendermint/types/client_state.go
rename to modules/light-clients/07-tendermint/types/client_state.go
index 75503454..8a21ef9a 100644
--- a/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -10,12 +10,12 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.ClientState = (*ClientState)(nil)
diff --git a/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
similarity index 98%
rename from light-clients/07-tendermint/types/client_state_test.go
rename to modules/light-clients/07-tendermint/types/client_state_test.go
index 84f98551..feb1e7db 100644
--- a/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -5,12 +5,12 @@ import (
ics23 "github.com/confio/ics23/go"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/light-clients/07-tendermint/types/codec.go b/modules/light-clients/07-tendermint/types/codec.go
similarity index 92%
rename from light-clients/07-tendermint/types/codec.go
rename to modules/light-clients/07-tendermint/types/codec.go
index 33911b81..8b30076c 100644
--- a/light-clients/07-tendermint/types/codec.go
+++ b/modules/light-clients/07-tendermint/types/codec.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces registers the tendermint concrete client-related
diff --git a/light-clients/07-tendermint/types/consensus_state.go b/modules/light-clients/07-tendermint/types/consensus_state.go
similarity index 89%
rename from light-clients/07-tendermint/types/consensus_state.go
rename to modules/light-clients/07-tendermint/types/consensus_state.go
index 775b0785..c4a92fed 100644
--- a/light-clients/07-tendermint/types/consensus_state.go
+++ b/modules/light-clients/07-tendermint/types/consensus_state.go
@@ -7,9 +7,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// NewConsensusState creates a new ConsensusState instance.
diff --git a/light-clients/07-tendermint/types/consensus_state_test.go b/modules/light-clients/07-tendermint/types/consensus_state_test.go
similarity index 89%
rename from light-clients/07-tendermint/types/consensus_state_test.go
rename to modules/light-clients/07-tendermint/types/consensus_state_test.go
index 5bcf8ec5..2664071d 100644
--- a/light-clients/07-tendermint/types/consensus_state_test.go
+++ b/modules/light-clients/07-tendermint/types/consensus_state_test.go
@@ -3,9 +3,9 @@ package types_test
import (
"time"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() {
diff --git a/light-clients/07-tendermint/types/errors.go b/modules/light-clients/07-tendermint/types/errors.go
similarity index 100%
rename from light-clients/07-tendermint/types/errors.go
rename to modules/light-clients/07-tendermint/types/errors.go
diff --git a/light-clients/07-tendermint/types/fraction.go b/modules/light-clients/07-tendermint/types/fraction.go
similarity index 100%
rename from light-clients/07-tendermint/types/fraction.go
rename to modules/light-clients/07-tendermint/types/fraction.go
diff --git a/light-clients/07-tendermint/types/genesis.go b/modules/light-clients/07-tendermint/types/genesis.go
similarity index 81%
rename from light-clients/07-tendermint/types/genesis.go
rename to modules/light-clients/07-tendermint/types/genesis.go
index 2c69d35d..9661b53e 100644
--- a/light-clients/07-tendermint/types/genesis.go
+++ b/modules/light-clients/07-tendermint/types/genesis.go
@@ -2,8 +2,8 @@ package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// ExportMetadata exports all the processed times in the client store so they can be included in clients genesis
diff --git a/light-clients/07-tendermint/types/genesis_test.go b/modules/light-clients/07-tendermint/types/genesis_test.go
similarity index 89%
rename from light-clients/07-tendermint/types/genesis_test.go
rename to modules/light-clients/07-tendermint/types/genesis_test.go
index de7ce828..9699c669 100644
--- a/light-clients/07-tendermint/types/genesis_test.go
+++ b/modules/light-clients/07-tendermint/types/genesis_test.go
@@ -4,9 +4,9 @@ import (
"time"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
func (suite *TendermintTestSuite) TestExportMetadata() {
diff --git a/light-clients/07-tendermint/types/header.go b/modules/light-clients/07-tendermint/types/header.go
similarity index 93%
rename from light-clients/07-tendermint/types/header.go
rename to modules/light-clients/07-tendermint/types/header.go
index b346e6b2..9bd59708 100644
--- a/light-clients/07-tendermint/types/header.go
+++ b/modules/light-clients/07-tendermint/types/header.go
@@ -7,9 +7,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.Header = &Header{}
diff --git a/light-clients/07-tendermint/types/header_test.go b/modules/light-clients/07-tendermint/types/header_test.go
similarity index 91%
rename from light-clients/07-tendermint/types/header_test.go
rename to modules/light-clients/07-tendermint/types/header_test.go
index a1a3222d..487b2794 100644
--- a/light-clients/07-tendermint/types/header_test.go
+++ b/modules/light-clients/07-tendermint/types/header_test.go
@@ -5,9 +5,9 @@ import (
tmprotocrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
)
func (suite *TendermintTestSuite) TestGetHeight() {
diff --git a/light-clients/07-tendermint/types/misbehaviour.go b/modules/light-clients/07-tendermint/types/misbehaviour.go
similarity index 96%
rename from light-clients/07-tendermint/types/misbehaviour.go
rename to modules/light-clients/07-tendermint/types/misbehaviour.go
index cc6c86b3..51e59612 100644
--- a/light-clients/07-tendermint/types/misbehaviour.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour.go
@@ -8,9 +8,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.Misbehaviour = &Misbehaviour{}
diff --git a/light-clients/07-tendermint/types/misbehaviour_handle.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
similarity index 97%
rename from light-clients/07-tendermint/types/misbehaviour_handle.go
rename to modules/light-clients/07-tendermint/types/misbehaviour_handle.go
index c5380527..0622372a 100644
--- a/light-clients/07-tendermint/types/misbehaviour_handle.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
@@ -8,8 +8,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CheckMisbehaviourAndUpdateState determines whether or not two conflicting
diff --git a/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
similarity index 98%
rename from light-clients/07-tendermint/types/misbehaviour_handle_test.go
rename to modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
index e5b94da3..da1cd6fb 100644
--- a/light-clients/07-tendermint/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
@@ -7,10 +7,10 @@ import (
"github.com/tendermint/tendermint/crypto/tmhash"
tmtypes "github.com/tendermint/tendermint/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/light-clients/07-tendermint/types/misbehaviour_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_test.go
similarity index 97%
rename from light-clients/07-tendermint/types/misbehaviour_test.go
rename to modules/light-clients/07-tendermint/types/misbehaviour_test.go
index 4acb085e..1b67b729 100644
--- a/light-clients/07-tendermint/types/misbehaviour_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_test.go
@@ -7,9 +7,9 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/light-clients/07-tendermint/types/proposal_handle.go b/modules/light-clients/07-tendermint/types/proposal_handle.go
similarity index 97%
rename from light-clients/07-tendermint/types/proposal_handle.go
rename to modules/light-clients/07-tendermint/types/proposal_handle.go
index 080ee4c2..a4ccaea9 100644
--- a/light-clients/07-tendermint/types/proposal_handle.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle.go
@@ -6,8 +6,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CheckSubstituteAndUpdateState will try to update the client with the state of the
diff --git a/light-clients/07-tendermint/types/proposal_handle_test.go b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
similarity index 98%
rename from light-clients/07-tendermint/types/proposal_handle_test.go
rename to modules/light-clients/07-tendermint/types/proposal_handle_test.go
index 5baf621a..c9d3e71e 100644
--- a/light-clients/07-tendermint/types/proposal_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
@@ -3,9 +3,9 @@ package types_test
import (
"time"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
similarity index 95%
rename from light-clients/07-tendermint/types/store.go
rename to modules/light-clients/07-tendermint/types/store.go
index 4c62eb95..ea2fce2c 100644
--- a/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -6,9 +6,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// KeyProcessedTime is appended to consensus state key to store the processed time
diff --git a/light-clients/07-tendermint/types/store_test.go b/modules/light-clients/07-tendermint/types/store_test.go
similarity index 90%
rename from light-clients/07-tendermint/types/store_test.go
rename to modules/light-clients/07-tendermint/types/store_test.go
index 3bf23500..ed9dc5d9 100644
--- a/light-clients/07-tendermint/types/store_test.go
+++ b/modules/light-clients/07-tendermint/types/store_test.go
@@ -1,12 +1,12 @@
package types_test
import (
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
diff --git a/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go
similarity index 87%
rename from light-clients/07-tendermint/types/tendermint.pb.go
rename to modules/light-clients/07-tendermint/types/tendermint.pb.go
index aa53fb70..fac455d6 100644
--- a/light-clients/07-tendermint/types/tendermint.pb.go
+++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go
@@ -6,8 +6,8 @@ package types
import (
fmt "fmt"
_go "github.com/confio/ics23/go"
- types "github.com/cosmos/ibc-go/core/02-client/types"
- types1 "github.com/cosmos/ibc-go/core/23-commitment/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ types1 "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
@@ -324,75 +324,75 @@ func init() {
}
var fileDescriptor_868940ee8c1cf959 = []byte{
- // 1080 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x41, 0x6f, 0xe3, 0x44,
+ // 1088 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
0x14, 0x6e, 0xda, 0xb2, 0x4d, 0x26, 0xe9, 0xb6, 0x78, 0x4b, 0x37, 0x2d, 0xd9, 0x38, 0x98, 0x15,
- 0x0a, 0x2b, 0xd5, 0x26, 0x59, 0x24, 0xa4, 0x1e, 0x90, 0x70, 0x17, 0xd4, 0x22, 0x56, 0xaa, 0xdc,
- 0x05, 0x24, 0x24, 0x64, 0x4d, 0xec, 0x89, 0x3d, 0x5a, 0xdb, 0x63, 0x3c, 0x93, 0xd0, 0xf2, 0x0b,
- 0xe0, 0xb6, 0xdc, 0x38, 0x70, 0xe0, 0xc4, 0x6f, 0xd9, 0x63, 0x8f, 0x9c, 0x0c, 0x6a, 0xef, 0x1c,
- 0x72, 0xe4, 0x84, 0x3c, 0x33, 0x76, 0x26, 0x6d, 0x57, 0x65, 0xb9, 0x44, 0xf3, 0xde, 0xfb, 0xde,
- 0xf7, 0x65, 0xde, 0xbc, 0x79, 0x63, 0x30, 0xc0, 0x23, 0x2f, 0x20, 0x56, 0x84, 0x83, 0x90, 0x79,
- 0x11, 0x46, 0x09, 0xa3, 0x16, 0x43, 0x89, 0x8f, 0xb2, 0x18, 0x27, 0xcc, 0x9a, 0x0e, 0x14, 0xcb,
- 0x4c, 0x33, 0xc2, 0x88, 0xd6, 0xe3, 0x29, 0xa6, 0x9a, 0x62, 0x2a, 0xa0, 0xe9, 0x60, 0xb7, 0xa7,
- 0x30, 0xb0, 0xb3, 0x14, 0x51, 0x6b, 0x0a, 0x23, 0xec, 0x43, 0x46, 0x32, 0xc1, 0xb1, 0xdb, 0xb9,
- 0x86, 0xe0, 0xbf, 0x32, 0x7a, 0xcf, 0x23, 0xc9, 0x18, 0x13, 0x2b, 0xcd, 0x08, 0x19, 0x97, 0xce,
- 0x6e, 0x40, 0x48, 0x10, 0x21, 0x8b, 0x5b, 0xa3, 0xc9, 0xd8, 0xf2, 0x27, 0x19, 0x64, 0x98, 0x24,
- 0x32, 0xae, 0x5f, 0x8d, 0x33, 0x1c, 0x23, 0xca, 0x60, 0x9c, 0x4a, 0xc0, 0x3b, 0x62, 0xab, 0x1e,
- 0xc9, 0x90, 0x25, 0xfe, 0x77, 0xb1, 0x3d, 0xb1, 0x92, 0x90, 0xf7, 0x55, 0x08, 0x89, 0x63, 0xcc,
- 0xe2, 0x12, 0x56, 0x59, 0x12, 0xba, 0x15, 0x90, 0x80, 0xf0, 0xa5, 0x55, 0xac, 0x84, 0xd7, 0xf8,
- 0x7b, 0x0d, 0x34, 0x0f, 0x38, 0xe3, 0x09, 0x83, 0x0c, 0x69, 0x3b, 0xa0, 0xee, 0x85, 0x10, 0x27,
- 0x2e, 0xf6, 0xdb, 0xb5, 0x5e, 0xad, 0xdf, 0x70, 0xd6, 0xb8, 0x7d, 0xe4, 0x6b, 0x01, 0x68, 0xb2,
- 0x6c, 0x42, 0x99, 0x1b, 0xa1, 0x29, 0x8a, 0xda, 0xcb, 0xbd, 0x5a, 0xbf, 0x39, 0x7c, 0x64, 0xde,
- 0x56, 0x5c, 0xf3, 0xb3, 0x0c, 0x7a, 0xc5, 0xb6, 0xed, 0xdd, 0x97, 0xb9, 0xbe, 0x34, 0xcb, 0x75,
- 0xed, 0x0c, 0xc6, 0xd1, 0xbe, 0xa1, 0x90, 0x19, 0x0e, 0xe0, 0xd6, 0x17, 0x85, 0xa1, 0x8d, 0xc1,
- 0x06, 0xb7, 0x70, 0x12, 0xb8, 0x29, 0xca, 0x30, 0xf1, 0xdb, 0x2b, 0x5c, 0x6c, 0xc7, 0x14, 0x25,
- 0x33, 0xcb, 0x92, 0x99, 0x4f, 0x64, 0x49, 0x6d, 0x43, 0x72, 0x6f, 0x2b, 0xdc, 0xf3, 0x7c, 0xe3,
- 0x97, 0x3f, 0xf5, 0x9a, 0x73, 0xb7, 0xf4, 0x1e, 0x73, 0xa7, 0x86, 0xc1, 0xe6, 0x24, 0x19, 0x91,
- 0xc4, 0x57, 0x84, 0x56, 0x6f, 0x13, 0x7a, 0x57, 0x0a, 0xdd, 0x17, 0x42, 0x57, 0x09, 0x84, 0xd2,
- 0x46, 0xe5, 0x96, 0x52, 0x08, 0x6c, 0xc4, 0xf0, 0xd4, 0xf5, 0x22, 0xe2, 0x3d, 0x77, 0xfd, 0x0c,
- 0x8f, 0x59, 0xfb, 0x8d, 0xd7, 0xdc, 0xd2, 0x95, 0x7c, 0x21, 0xb4, 0x1e, 0xc3, 0xd3, 0x83, 0xc2,
- 0xf9, 0xa4, 0xf0, 0x69, 0x2e, 0x58, 0x1f, 0x67, 0xe4, 0x07, 0x94, 0xb8, 0x21, 0x2a, 0x0e, 0xa4,
- 0x7d, 0x87, 0x8b, 0x74, 0xe4, 0x21, 0x15, 0x6d, 0x62, 0xca, 0xfe, 0x99, 0x0e, 0xcc, 0x43, 0x8e,
- 0xb1, 0x3b, 0x52, 0x67, 0x4b, 0xe8, 0x2c, 0x10, 0x18, 0x4e, 0x4b, 0xd8, 0x02, 0x5b, 0x08, 0x44,
- 0x90, 0x21, 0xca, 0x4a, 0x81, 0xb5, 0xd7, 0x17, 0x58, 0x20, 0x30, 0x9c, 0x96, 0xb0, 0xa5, 0xc0,
- 0x11, 0x68, 0xf2, 0x4b, 0xe4, 0xd2, 0x14, 0x79, 0xb4, 0x5d, 0xef, 0xad, 0xf4, 0x9b, 0xc3, 0x4d,
- 0x13, 0x7b, 0x74, 0xf8, 0xd8, 0x3c, 0x2e, 0x22, 0x27, 0x29, 0xf2, 0xec, 0xed, 0x79, 0x1b, 0x29,
- 0x70, 0xc3, 0x01, 0x69, 0x09, 0xa1, 0xda, 0x3e, 0x68, 0x4d, 0xd2, 0x20, 0x83, 0x3e, 0x72, 0x53,
- 0xc8, 0xc2, 0x76, 0xa3, 0xb7, 0xd2, 0x6f, 0xd8, 0xf7, 0x67, 0xb9, 0x7e, 0x4f, 0x9e, 0x9d, 0x12,
- 0x35, 0x9c, 0xa6, 0x34, 0x8f, 0x21, 0x0b, 0x35, 0x17, 0xec, 0xc0, 0x28, 0x22, 0xdf, 0xbb, 0x93,
- 0xd4, 0x87, 0x0c, 0xb9, 0x70, 0xcc, 0x50, 0xe6, 0xa2, 0xd3, 0x14, 0x67, 0x67, 0x6d, 0xd0, 0xab,
- 0xf5, 0xeb, 0xf6, 0xc3, 0x59, 0xae, 0xf7, 0x04, 0xd1, 0x2b, 0xa1, 0x86, 0xb3, 0xcd, 0x63, 0x5f,
- 0xf2, 0xd0, 0x27, 0x45, 0xe4, 0x53, 0x1e, 0xd0, 0xbe, 0x03, 0xfa, 0x0d, 0x59, 0x31, 0xa6, 0x23,
- 0x14, 0xc2, 0x29, 0x26, 0x93, 0xac, 0xdd, 0xe4, 0x32, 0x8f, 0x66, 0xb9, 0xfe, 0xde, 0x2b, 0x65,
- 0xd4, 0x04, 0xc3, 0xe9, 0x5c, 0x15, 0x7b, 0xaa, 0x84, 0xf7, 0x57, 0x7f, 0xfc, 0x4d, 0x5f, 0x32,
- 0x7e, 0x5f, 0x06, 0x77, 0x0f, 0x48, 0x42, 0x51, 0x42, 0x27, 0x54, 0xdc, 0x79, 0x1b, 0x34, 0xaa,
- 0xd1, 0xc3, 0x2f, 0x7d, 0x73, 0xb8, 0x7b, 0xad, 0x2d, 0x9f, 0x95, 0x08, 0xbb, 0x5e, 0x1c, 0xe7,
- 0x8b, 0xa2, 0xfb, 0xe6, 0x69, 0xda, 0xc7, 0x60, 0x35, 0x23, 0x84, 0xc9, 0xa9, 0xf0, 0x70, 0xa1,
- 0x1f, 0xe6, 0x93, 0x68, 0x3a, 0x30, 0x9f, 0xa2, 0xec, 0x79, 0x84, 0x1c, 0x42, 0x98, 0xbd, 0x5a,
- 0x10, 0x39, 0x3c, 0x4f, 0xfb, 0xa9, 0x06, 0xb6, 0x12, 0x74, 0xca, 0xdc, 0x6a, 0xf0, 0x52, 0x37,
- 0x84, 0x34, 0xe4, 0x37, 0xbf, 0x65, 0x7f, 0x3d, 0xcb, 0xf5, 0xb7, 0x45, 0x15, 0x6e, 0x42, 0x19,
- 0xff, 0xe4, 0xfa, 0x87, 0x01, 0x66, 0xe1, 0x64, 0x54, 0xc8, 0xa9, 0x0f, 0x82, 0xb2, 0x8c, 0xf0,
- 0x88, 0x5a, 0xa3, 0x33, 0x86, 0xa8, 0x79, 0x88, 0x4e, 0xed, 0x62, 0xe1, 0x68, 0x05, 0xdd, 0x57,
- 0x15, 0xdb, 0x21, 0xa4, 0xa1, 0x2c, 0xd4, 0xcf, 0xcb, 0xa0, 0xa5, 0xd6, 0x4f, 0x1b, 0x80, 0x86,
- 0x68, 0xed, 0x6a, 0x36, 0xda, 0x5b, 0xb3, 0x5c, 0xdf, 0x14, 0x7f, 0xab, 0x0a, 0x19, 0x4e, 0x5d,
- 0xac, 0x8f, 0x7c, 0xcd, 0x03, 0xf5, 0x10, 0x41, 0x1f, 0x65, 0xee, 0x40, 0x56, 0xa6, 0x7f, 0xfb,
- 0xbc, 0x3c, 0xe4, 0x19, 0x76, 0xf7, 0x22, 0xd7, 0xd7, 0xc4, 0x7a, 0x30, 0xcb, 0xf5, 0x0d, 0x21,
- 0x53, 0xd2, 0x19, 0xce, 0x9a, 0x58, 0x0e, 0x14, 0x91, 0xa1, 0x9c, 0x93, 0xff, 0x4b, 0x64, 0x78,
- 0x4d, 0x64, 0x58, 0x89, 0x0c, 0x65, 0x4d, 0x7e, 0x5d, 0x01, 0x77, 0x04, 0x5a, 0x83, 0x60, 0x9d,
- 0xe2, 0x20, 0x41, 0xbe, 0x2b, 0x20, 0xb2, 0x71, 0xba, 0xaa, 0x8e, 0x78, 0x22, 0x4f, 0x38, 0x4c,
- 0x0a, 0x76, 0xce, 0x73, 0xbd, 0x36, 0x9f, 0x05, 0x0b, 0x14, 0x86, 0xd3, 0xa2, 0x0a, 0x56, 0xfb,
- 0x16, 0xac, 0x57, 0xe7, 0xec, 0x52, 0x54, 0x36, 0xd7, 0x0d, 0x12, 0xd5, 0x01, 0x9e, 0x20, 0x66,
- 0xb7, 0xe7, 0xf4, 0x0b, 0xe9, 0x86, 0xd3, 0x9a, 0x2a, 0x38, 0x6d, 0x04, 0xc4, 0x83, 0xc0, 0xf5,
- 0xf9, 0x30, 0x5b, 0xf9, 0x0f, 0xc3, 0xec, 0x81, 0x1c, 0x66, 0x6f, 0x29, 0x0f, 0x4d, 0xc5, 0x60,
- 0x38, 0xeb, 0xd2, 0x21, 0xc7, 0x59, 0x04, 0xb4, 0x12, 0x31, 0x6f, 0x59, 0xf9, 0xc8, 0xdc, 0xb6,
- 0x8f, 0x07, 0xb3, 0x5c, 0xdf, 0x59, 0x54, 0x99, 0x73, 0x18, 0xce, 0x9b, 0xd2, 0x39, 0x6f, 0x5e,
- 0xe3, 0x73, 0x50, 0x2f, 0x1f, 0x5b, 0xad, 0x03, 0x1a, 0xc9, 0x24, 0x46, 0x59, 0x11, 0xe1, 0x67,
- 0xb3, 0xea, 0xcc, 0x1d, 0x5a, 0x0f, 0x34, 0x7d, 0x94, 0x90, 0x18, 0x27, 0x3c, 0xbe, 0xcc, 0xe3,
- 0xaa, 0xcb, 0x7e, 0xf6, 0xf2, 0xa2, 0x5b, 0x3b, 0xbf, 0xe8, 0xd6, 0xfe, 0xba, 0xe8, 0xd6, 0x5e,
- 0x5c, 0x76, 0x97, 0xce, 0x2f, 0xbb, 0x4b, 0x7f, 0x5c, 0x76, 0x97, 0xbe, 0xd9, 0x57, 0x2e, 0x9a,
- 0x47, 0x68, 0x4c, 0xa8, 0x85, 0x47, 0xde, 0x5e, 0xf9, 0x51, 0xb6, 0x57, 0x7e, 0x95, 0x7d, 0xf0,
- 0xd1, 0xde, 0xd5, 0x8f, 0xa6, 0xd1, 0x1d, 0x3e, 0x4f, 0x1e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff,
- 0xf8, 0x0c, 0x1b, 0x17, 0xc6, 0x09, 0x00, 0x00,
+ 0x0a, 0x2b, 0xd5, 0x26, 0x59, 0x24, 0xa4, 0x1e, 0x90, 0x70, 0x17, 0xd4, 0x22, 0x56, 0xaa, 0x5c,
+ 0x7e, 0x48, 0x08, 0x64, 0x4d, 0xec, 0x89, 0x33, 0x5a, 0xdb, 0x63, 0x3c, 0xe3, 0xd0, 0xf2, 0x17,
+ 0xc0, 0x6d, 0xb9, 0x71, 0xe0, 0xc0, 0x89, 0xbf, 0x65, 0x8f, 0x3d, 0x72, 0x32, 0xa8, 0xbd, 0x73,
+ 0xc8, 0x91, 0x13, 0xf2, 0xcc, 0xd8, 0x99, 0xb4, 0x5d, 0x95, 0xe5, 0x12, 0xcd, 0x7b, 0xef, 0x7b,
+ 0xdf, 0x97, 0x79, 0xf3, 0xe6, 0x8d, 0xc1, 0x00, 0x8f, 0xbc, 0x80, 0x58, 0x21, 0x0e, 0x26, 0xcc,
+ 0x0b, 0x31, 0x8a, 0x19, 0xb5, 0x18, 0x8a, 0x7d, 0x94, 0x46, 0x38, 0x66, 0xd6, 0x74, 0xa0, 0x58,
+ 0x66, 0x92, 0x12, 0x46, 0xb4, 0x1e, 0x4f, 0x31, 0xd5, 0x14, 0x53, 0x01, 0x4d, 0x07, 0xbb, 0x3d,
+ 0x85, 0x81, 0x9d, 0x25, 0x88, 0x5a, 0x53, 0x18, 0x62, 0x1f, 0x32, 0x92, 0x0a, 0x8e, 0xdd, 0xce,
+ 0x35, 0x04, 0xff, 0x95, 0xd1, 0x7b, 0x1e, 0x89, 0xc7, 0x98, 0x58, 0x49, 0x4a, 0xc8, 0xb8, 0x74,
+ 0x76, 0x03, 0x42, 0x82, 0x10, 0x59, 0xdc, 0x1a, 0x65, 0x63, 0xcb, 0xcf, 0x52, 0xc8, 0x30, 0x89,
+ 0x65, 0x5c, 0xbf, 0x1a, 0x67, 0x38, 0x42, 0x94, 0xc1, 0x28, 0x91, 0x80, 0xb7, 0xc4, 0x56, 0x3d,
+ 0x92, 0x22, 0x4b, 0xfc, 0xef, 0x62, 0x7b, 0x62, 0x25, 0x21, 0xef, 0xaa, 0x10, 0x12, 0x45, 0x98,
+ 0x45, 0x25, 0xac, 0xb2, 0x24, 0x74, 0x2b, 0x20, 0x01, 0xe1, 0x4b, 0xab, 0x58, 0x09, 0xaf, 0xf1,
+ 0xf7, 0x1a, 0x68, 0x1e, 0x70, 0xc6, 0x13, 0x06, 0x19, 0xd2, 0x76, 0x40, 0xdd, 0x9b, 0x40, 0x1c,
+ 0xbb, 0xd8, 0x6f, 0xd7, 0x7a, 0xb5, 0x7e, 0xc3, 0x59, 0xe3, 0xf6, 0x91, 0xaf, 0x05, 0xa0, 0xc9,
+ 0xd2, 0x8c, 0x32, 0x37, 0x44, 0x53, 0x14, 0xb6, 0x97, 0x7b, 0xb5, 0x7e, 0x73, 0xf8, 0xc8, 0xbc,
+ 0xad, 0xb8, 0xe6, 0x27, 0x29, 0xf4, 0x8a, 0x6d, 0xdb, 0xbb, 0x2f, 0x72, 0x7d, 0x69, 0x96, 0xeb,
+ 0xda, 0x19, 0x8c, 0xc2, 0x7d, 0x43, 0x21, 0x33, 0x1c, 0xc0, 0xad, 0xcf, 0x0a, 0x43, 0x1b, 0x83,
+ 0x0d, 0x6e, 0xe1, 0x38, 0x70, 0x13, 0x94, 0x62, 0xe2, 0xb7, 0x57, 0xb8, 0xd8, 0x8e, 0x29, 0x4a,
+ 0x66, 0x96, 0x25, 0x33, 0x9f, 0xc8, 0x92, 0xda, 0x86, 0xe4, 0xde, 0x56, 0xb8, 0xe7, 0xf9, 0xc6,
+ 0x2f, 0x7f, 0xea, 0x35, 0xe7, 0x6e, 0xe9, 0x3d, 0xe6, 0x4e, 0x0d, 0x83, 0xcd, 0x2c, 0x1e, 0x91,
+ 0xd8, 0x57, 0x84, 0x56, 0x6f, 0x13, 0x7a, 0x5b, 0x0a, 0xdd, 0x17, 0x42, 0x57, 0x09, 0x84, 0xd2,
+ 0x46, 0xe5, 0x96, 0x52, 0x08, 0x6c, 0x44, 0xf0, 0xd4, 0xf5, 0x42, 0xe2, 0x3d, 0x73, 0xfd, 0x14,
+ 0x8f, 0x59, 0xfb, 0xb5, 0x57, 0xdc, 0xd2, 0x95, 0x7c, 0x21, 0xb4, 0x1e, 0xc1, 0xd3, 0x83, 0xc2,
+ 0xf9, 0xa4, 0xf0, 0x69, 0x2e, 0x58, 0x1f, 0xa7, 0xe4, 0x07, 0x14, 0xbb, 0x13, 0x54, 0x1c, 0x48,
+ 0xfb, 0x0e, 0x17, 0xe9, 0xc8, 0x43, 0x2a, 0xda, 0xc4, 0x94, 0xfd, 0x33, 0x1d, 0x98, 0x87, 0x1c,
+ 0x63, 0x77, 0xa4, 0xce, 0x96, 0xd0, 0x59, 0x20, 0x30, 0x9c, 0x96, 0xb0, 0x05, 0xb6, 0x10, 0x08,
+ 0x21, 0x43, 0x94, 0x95, 0x02, 0x6b, 0xaf, 0x2e, 0xb0, 0x40, 0x60, 0x38, 0x2d, 0x61, 0x4b, 0x81,
+ 0x23, 0xd0, 0xe4, 0x97, 0xc8, 0xa5, 0x09, 0xf2, 0x68, 0xbb, 0xde, 0x5b, 0xe9, 0x37, 0x87, 0x9b,
+ 0x26, 0xf6, 0xe8, 0xf0, 0xb1, 0x79, 0x5c, 0x44, 0x4e, 0x12, 0xe4, 0xd9, 0xdb, 0xf3, 0x36, 0x52,
+ 0xe0, 0x86, 0x03, 0x92, 0x12, 0x42, 0xb5, 0x7d, 0xd0, 0xca, 0x92, 0x20, 0x85, 0x3e, 0x72, 0x13,
+ 0xc8, 0x26, 0xed, 0x46, 0x6f, 0xa5, 0xdf, 0xb0, 0xef, 0xcf, 0x72, 0xfd, 0x9e, 0x3c, 0x3b, 0x25,
+ 0x6a, 0x38, 0x4d, 0x69, 0x1e, 0x43, 0x36, 0xd1, 0x5c, 0xb0, 0x03, 0xc3, 0x90, 0x7c, 0xef, 0x66,
+ 0x89, 0x0f, 0x19, 0x72, 0xe1, 0x98, 0xa1, 0xd4, 0x45, 0xa7, 0x09, 0x4e, 0xcf, 0xda, 0xa0, 0x57,
+ 0xeb, 0xd7, 0xed, 0x87, 0xb3, 0x5c, 0xef, 0x09, 0xa2, 0x97, 0x42, 0x0d, 0x67, 0x9b, 0xc7, 0xbe,
+ 0xe0, 0xa1, 0x8f, 0x8a, 0xc8, 0xc7, 0x3c, 0xa0, 0x7d, 0x07, 0xf4, 0x1b, 0xb2, 0x22, 0x4c, 0x47,
+ 0x68, 0x02, 0xa7, 0x98, 0x64, 0x69, 0xbb, 0xc9, 0x65, 0x1e, 0xcd, 0x72, 0xfd, 0x9d, 0x97, 0xca,
+ 0xa8, 0x09, 0x86, 0xd3, 0xb9, 0x2a, 0xf6, 0x54, 0x09, 0xef, 0xaf, 0xfe, 0xf8, 0x9b, 0xbe, 0x64,
+ 0xfc, 0xbe, 0x0c, 0xee, 0x1e, 0x90, 0x98, 0xa2, 0x98, 0x66, 0x54, 0xdc, 0x79, 0x1b, 0x34, 0xaa,
+ 0xd1, 0xc3, 0x2f, 0x7d, 0x73, 0xb8, 0x7b, 0xad, 0x2d, 0x3f, 0x2f, 0x11, 0x76, 0xbd, 0x38, 0xce,
+ 0xe7, 0x45, 0xf7, 0xcd, 0xd3, 0xb4, 0x0f, 0xc1, 0x6a, 0x4a, 0x08, 0x93, 0x53, 0xe1, 0xe1, 0x42,
+ 0x3f, 0xcc, 0x27, 0xd1, 0x74, 0x60, 0x3e, 0x45, 0xe9, 0xb3, 0x10, 0x39, 0x84, 0x30, 0x7b, 0xb5,
+ 0x20, 0x72, 0x78, 0x9e, 0xf6, 0x53, 0x0d, 0x6c, 0xc5, 0xe8, 0x94, 0xb9, 0xd5, 0xe0, 0xa5, 0xee,
+ 0x04, 0xd2, 0x09, 0xbf, 0xf9, 0x2d, 0xfb, 0xab, 0x59, 0xae, 0xbf, 0x29, 0xaa, 0x70, 0x13, 0xca,
+ 0xf8, 0x27, 0xd7, 0xdf, 0x0f, 0x30, 0x9b, 0x64, 0xa3, 0x42, 0x4e, 0x7d, 0x10, 0x94, 0x65, 0x88,
+ 0x47, 0xd4, 0x1a, 0x9d, 0x31, 0x44, 0xcd, 0x43, 0x74, 0x6a, 0x17, 0x0b, 0x47, 0x2b, 0xe8, 0xbe,
+ 0xac, 0xd8, 0x0e, 0x21, 0x9d, 0xc8, 0x42, 0xfd, 0xbc, 0x0c, 0x5a, 0x6a, 0xfd, 0xb4, 0x01, 0x68,
+ 0x88, 0xd6, 0xae, 0x66, 0xa3, 0xbd, 0x35, 0xcb, 0xf5, 0x4d, 0xf1, 0xb7, 0xaa, 0x90, 0xe1, 0xd4,
+ 0xc5, 0xfa, 0xc8, 0xd7, 0x3c, 0x50, 0x9f, 0x20, 0xe8, 0xa3, 0xd4, 0x1d, 0xc8, 0xca, 0xf4, 0x6f,
+ 0x9f, 0x97, 0x87, 0x3c, 0xc3, 0xee, 0x5e, 0xe4, 0xfa, 0x9a, 0x58, 0x0f, 0x66, 0xb9, 0xbe, 0x21,
+ 0x64, 0x4a, 0x3a, 0xc3, 0x59, 0x13, 0xcb, 0x81, 0x22, 0x32, 0x94, 0x73, 0xf2, 0x7f, 0x89, 0x0c,
+ 0xaf, 0x89, 0x0c, 0x2b, 0x91, 0xa1, 0xac, 0xc9, 0xaf, 0x2b, 0xe0, 0x8e, 0x40, 0x6b, 0x10, 0xac,
+ 0x53, 0x1c, 0xc4, 0xc8, 0x77, 0x05, 0x44, 0x36, 0x4e, 0x57, 0xd5, 0x11, 0x4f, 0xe4, 0x09, 0x87,
+ 0x49, 0xc1, 0xce, 0x79, 0xae, 0xd7, 0xe6, 0xb3, 0x60, 0x81, 0xc2, 0x70, 0x5a, 0x54, 0xc1, 0x6a,
+ 0xdf, 0x82, 0xf5, 0xea, 0x9c, 0x5d, 0x8a, 0xca, 0xe6, 0xba, 0x41, 0xa2, 0x3a, 0xc0, 0x13, 0xc4,
+ 0xec, 0xf6, 0x9c, 0x7e, 0x21, 0xdd, 0x70, 0x5a, 0x53, 0x05, 0xa7, 0x8d, 0x80, 0x78, 0x10, 0xb8,
+ 0x3e, 0x1f, 0x66, 0x2b, 0xff, 0x61, 0x98, 0x3d, 0x90, 0xc3, 0xec, 0x0d, 0xe5, 0xa1, 0xa9, 0x18,
+ 0x0c, 0x67, 0x5d, 0x3a, 0xe4, 0x38, 0x0b, 0x81, 0x56, 0x22, 0xe6, 0x2d, 0x2b, 0x1f, 0x99, 0xdb,
+ 0xf6, 0xf1, 0x60, 0x96, 0xeb, 0x3b, 0x8b, 0x2a, 0x73, 0x0e, 0xc3, 0x79, 0x5d, 0x3a, 0xe7, 0xcd,
+ 0x6b, 0x7c, 0x0a, 0xea, 0xe5, 0x63, 0xab, 0x75, 0x40, 0x23, 0xce, 0x22, 0x94, 0x16, 0x11, 0x7e,
+ 0x36, 0xab, 0xce, 0xdc, 0xa1, 0xf5, 0x40, 0xd3, 0x47, 0x31, 0x89, 0x70, 0xcc, 0xe3, 0xcb, 0x3c,
+ 0xae, 0xba, 0xec, 0x6f, 0x5e, 0x5c, 0x74, 0x6b, 0xe7, 0x17, 0xdd, 0xda, 0x5f, 0x17, 0xdd, 0xda,
+ 0xf3, 0xcb, 0xee, 0xd2, 0xf9, 0x65, 0x77, 0xe9, 0x8f, 0xcb, 0xee, 0xd2, 0xd7, 0xb6, 0x72, 0xd1,
+ 0x3c, 0x42, 0x23, 0x42, 0x2d, 0x3c, 0xf2, 0xf6, 0x02, 0x62, 0x45, 0xc4, 0xcf, 0x42, 0x44, 0xc5,
+ 0xc7, 0xd9, 0x5e, 0xf9, 0x75, 0xf6, 0xde, 0x07, 0x7b, 0x57, 0x3f, 0x9e, 0x46, 0x77, 0xf8, 0x5c,
+ 0x79, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xd6, 0x80, 0x90, 0xce, 0x09, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
diff --git a/light-clients/07-tendermint/types/tendermint_test.go b/modules/light-clients/07-tendermint/types/tendermint_test.go
similarity index 95%
rename from light-clients/07-tendermint/types/tendermint_test.go
rename to modules/light-clients/07-tendermint/types/tendermint_test.go
index cb939548..b42f564c 100644
--- a/light-clients/07-tendermint/types/tendermint_test.go
+++ b/modules/light-clients/07-tendermint/types/tendermint_test.go
@@ -12,8 +12,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go
similarity index 97%
rename from light-clients/07-tendermint/types/update.go
rename to modules/light-clients/07-tendermint/types/update.go
index da64ef87..f1183cdc 100644
--- a/light-clients/07-tendermint/types/update.go
+++ b/modules/light-clients/07-tendermint/types/update.go
@@ -10,9 +10,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// CheckHeaderAndUpdateState checks if the provided header is valid, and if valid it will:
diff --git a/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go
similarity index 98%
rename from light-clients/07-tendermint/types/update_test.go
rename to modules/light-clients/07-tendermint/types/update_test.go
index 9f89a0fb..f72d6fba 100644
--- a/light-clients/07-tendermint/types/update_test.go
+++ b/modules/light-clients/07-tendermint/types/update_test.go
@@ -5,9 +5,9 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- types "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ types "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
)
diff --git a/light-clients/07-tendermint/types/upgrade.go b/modules/light-clients/07-tendermint/types/upgrade.go
similarity index 97%
rename from light-clients/07-tendermint/types/upgrade.go
rename to modules/light-clients/07-tendermint/types/upgrade.go
index ce408325..788a4a80 100644
--- a/light-clients/07-tendermint/types/upgrade.go
+++ b/modules/light-clients/07-tendermint/types/upgrade.go
@@ -6,9 +6,9 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/light-clients/07-tendermint/types/upgrade_test.go b/modules/light-clients/07-tendermint/types/upgrade_test.go
similarity index 98%
rename from light-clients/07-tendermint/types/upgrade_test.go
rename to modules/light-clients/07-tendermint/types/upgrade_test.go
index ffafdfcb..263c11cd 100644
--- a/light-clients/07-tendermint/types/upgrade_test.go
+++ b/modules/light-clients/07-tendermint/types/upgrade_test.go
@@ -1,10 +1,10 @@
package types_test
import (
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
diff --git a/light-clients/09-localhost/doc.go b/modules/light-clients/09-localhost/doc.go
similarity index 100%
rename from light-clients/09-localhost/doc.go
rename to modules/light-clients/09-localhost/doc.go
diff --git a/light-clients/09-localhost/module.go b/modules/light-clients/09-localhost/module.go
similarity index 63%
rename from light-clients/09-localhost/module.go
rename to modules/light-clients/09-localhost/module.go
index 68a59226..6e6fd317 100644
--- a/light-clients/09-localhost/module.go
+++ b/modules/light-clients/09-localhost/module.go
@@ -1,7 +1,7 @@
package localhost
import (
- "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
)
// Name returns the IBC client name
diff --git a/light-clients/09-localhost/types/client_state.go b/modules/light-clients/09-localhost/types/client_state.go
similarity index 96%
rename from light-clients/09-localhost/types/client_state.go
rename to modules/light-clients/09-localhost/types/client_state.go
index fdfc7a41..6336a213 100644
--- a/light-clients/09-localhost/types/client_state.go
+++ b/modules/light-clients/09-localhost/types/client_state.go
@@ -11,11 +11,11 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var _ exported.ClientState = (*ClientState)(nil)
diff --git a/light-clients/09-localhost/types/client_state_test.go b/modules/light-clients/09-localhost/types/client_state_test.go
similarity index 96%
rename from light-clients/09-localhost/types/client_state_test.go
rename to modules/light-clients/09-localhost/types/client_state_test.go
index d46e63a8..658824df 100644
--- a/light-clients/09-localhost/types/client_state_test.go
+++ b/modules/light-clients/09-localhost/types/client_state_test.go
@@ -2,14 +2,14 @@ package types_test
import (
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
- "github.com/cosmos/ibc-go/light-clients/09-localhost/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
)
const (
diff --git a/light-clients/09-localhost/types/codec.go b/modules/light-clients/09-localhost/types/codec.go
similarity index 86%
rename from light-clients/09-localhost/types/codec.go
rename to modules/light-clients/09-localhost/types/codec.go
index a672323a..d45e21c4 100644
--- a/light-clients/09-localhost/types/codec.go
+++ b/modules/light-clients/09-localhost/types/codec.go
@@ -2,7 +2,7 @@ package types
import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- "github.com/cosmos/ibc-go/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf
diff --git a/light-clients/09-localhost/types/errors.go b/modules/light-clients/09-localhost/types/errors.go
similarity index 100%
rename from light-clients/09-localhost/types/errors.go
rename to modules/light-clients/09-localhost/types/errors.go
diff --git a/light-clients/09-localhost/types/keys.go b/modules/light-clients/09-localhost/types/keys.go
similarity index 100%
rename from light-clients/09-localhost/types/keys.go
rename to modules/light-clients/09-localhost/types/keys.go
diff --git a/light-clients/09-localhost/types/localhost.pb.go b/modules/light-clients/09-localhost/types/localhost.pb.go
similarity index 94%
rename from light-clients/09-localhost/types/localhost.pb.go
rename to modules/light-clients/09-localhost/types/localhost.pb.go
index bf2ec3a5..c8793d62 100644
--- a/light-clients/09-localhost/types/localhost.pb.go
+++ b/modules/light-clients/09-localhost/types/localhost.pb.go
@@ -5,7 +5,7 @@ package types
import (
fmt "fmt"
- types "github.com/cosmos/ibc-go/core/02-client/types"
+ types "github.com/cosmos/ibc-go/modules/core/02-client/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
@@ -75,7 +75,7 @@ func init() {
}
var fileDescriptor_1a6dbd867337bf2e = []byte{
- // 275 bytes of a gzipped FileDescriptorProto
+ // 285 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcf, 0x4c, 0x4a, 0x4e,
0xcf, 0xd7, 0xcf, 0xc9, 0x4c, 0xcf, 0x28, 0x49, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0x29, 0xd6, 0xcf,
0xc9, 0x4f, 0x4e, 0xcc, 0xc9, 0xc8, 0x2f, 0x2e, 0xd1, 0x2f, 0x33, 0x44, 0x70, 0xf4, 0x0a, 0x8a,
@@ -87,13 +87,13 @@ var fileDescriptor_1a6dbd867337bf2e = []byte{
0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x19, 0xa5, 0x20, 0x76, 0x30, 0xd3, 0x33, 0x45,
0xc8, 0x8a, 0x8b, 0x2d, 0x23, 0x15, 0xe4, 0x2a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x19,
0x3d, 0x88, 0x4b, 0x41, 0x56, 0xea, 0x41, 0x2d, 0x2a, 0x33, 0xd4, 0xf3, 0x00, 0xab, 0x71, 0x62,
- 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xc3, 0x8a, 0xa5, 0x63, 0x81, 0x3c, 0x83, 0x53, 0xf0,
+ 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xc3, 0x8a, 0xa5, 0x63, 0x81, 0x3c, 0x83, 0x53, 0xf4,
0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c,
- 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x59, 0xa6, 0x67, 0x96, 0x64, 0x94,
- 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0x83, 0xc2, 0x4d, 0x17,
- 0x16, 0x70, 0xba, 0xb0, 0x90, 0x33, 0xb0, 0xd4, 0x45, 0x04, 0x5e, 0x49, 0x65, 0x41, 0x6a, 0x71,
- 0x12, 0x1b, 0xd8, 0x73, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, 0xa2, 0xe9, 0xaa, 0x69,
- 0x01, 0x00, 0x00,
+ 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x39, 0xa6, 0x67, 0x96, 0x64, 0x94,
+ 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0x83, 0xc2, 0x4d, 0x37,
+ 0x3d, 0x5f, 0x3f, 0x37, 0x3f, 0xa5, 0x34, 0x27, 0xb5, 0x18, 0x12, 0x80, 0xba, 0xb0, 0x10, 0x34,
+ 0xb0, 0xd4, 0x45, 0x04, 0x62, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x93, 0xc6, 0x80,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0xf3, 0x24, 0x25, 0x71, 0x01, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
diff --git a/light-clients/09-localhost/types/localhost_test.go b/modules/light-clients/09-localhost/types/localhost_test.go
similarity index 87%
rename from light-clients/09-localhost/types/localhost_test.go
rename to modules/light-clients/09-localhost/types/localhost_test.go
index 69d5c1cf..b4c267b6 100644
--- a/light-clients/09-localhost/types/localhost_test.go
+++ b/modules/light-clients/09-localhost/types/localhost_test.go
@@ -9,8 +9,8 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- "github.com/cosmos/ibc-go/core/exported"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
const (
diff --git a/proto/ibcgo/apps/transfer/v1/genesis.proto b/proto/ibcgo/apps/transfer/v1/genesis.proto
index 50a68179..70b6438c 100644
--- a/proto/ibcgo/apps/transfer/v1/genesis.proto
+++ b/proto/ibcgo/apps/transfer/v1/genesis.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.apps.transfer.v1;
-option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
import "ibcgo/apps/transfer/v1/transfer.proto";
import "gogoproto/gogo.proto";
diff --git a/proto/ibcgo/apps/transfer/v1/query.proto b/proto/ibcgo/apps/transfer/v1/query.proto
index f7dcb5f8..d8be3728 100644
--- a/proto/ibcgo/apps/transfer/v1/query.proto
+++ b/proto/ibcgo/apps/transfer/v1/query.proto
@@ -7,7 +7,7 @@ import "cosmos/base/query/v1beta1/pagination.proto";
import "ibcgo/apps/transfer/v1/transfer.proto";
import "google/api/annotations.proto";
-option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
// Query provides defines the gRPC querier service.
service Query {
diff --git a/proto/ibcgo/apps/transfer/v1/transfer.proto b/proto/ibcgo/apps/transfer/v1/transfer.proto
index 78c9ed91..b3dda2f9 100644
--- a/proto/ibcgo/apps/transfer/v1/transfer.proto
+++ b/proto/ibcgo/apps/transfer/v1/transfer.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.apps.transfer.v1;
-option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
import "gogoproto/gogo.proto";
diff --git a/proto/ibcgo/apps/transfer/v1/tx.proto b/proto/ibcgo/apps/transfer/v1/tx.proto
index a6b6a5d6..383f6e67 100644
--- a/proto/ibcgo/apps/transfer/v1/tx.proto
+++ b/proto/ibcgo/apps/transfer/v1/tx.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.apps.transfer.v1;
-option go_package = "github.com/cosmos/ibc-go/apps/transfer/types";
+option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
import "gogoproto/gogo.proto";
import "cosmos/base/v1beta1/coin.proto";
diff --git a/proto/ibcgo/core/channel/v1/channel.proto b/proto/ibcgo/core/channel/v1/channel.proto
index 459e852d..eb1b627c 100644
--- a/proto/ibcgo/core/channel/v1/channel.proto
+++ b/proto/ibcgo/core/channel/v1/channel.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.channel.v1;
-option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/client/v1/client.proto";
diff --git a/proto/ibcgo/core/channel/v1/genesis.proto b/proto/ibcgo/core/channel/v1/genesis.proto
index 12f67486..c4d29781 100644
--- a/proto/ibcgo/core/channel/v1/genesis.proto
+++ b/proto/ibcgo/core/channel/v1/genesis.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.channel.v1;
-option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/channel/v1/channel.proto";
diff --git a/proto/ibcgo/core/channel/v1/query.proto b/proto/ibcgo/core/channel/v1/query.proto
index a989b2ad..e8628ff5 100644
--- a/proto/ibcgo/core/channel/v1/query.proto
+++ b/proto/ibcgo/core/channel/v1/query.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.channel.v1;
-option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "ibcgo/core/client/v1/client.proto";
import "cosmos/base/query/v1beta1/pagination.proto";
diff --git a/proto/ibcgo/core/channel/v1/tx.proto b/proto/ibcgo/core/channel/v1/tx.proto
index 290c3a94..9ee96e63 100644
--- a/proto/ibcgo/core/channel/v1/tx.proto
+++ b/proto/ibcgo/core/channel/v1/tx.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.channel.v1;
-option go_package = "github.com/cosmos/ibc-go/core/04-channel/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/client/v1/client.proto";
diff --git a/proto/ibcgo/core/client/v1/client.proto b/proto/ibcgo/core/client/v1/client.proto
index 36ef478c..84b0fd18 100644
--- a/proto/ibcgo/core/client/v1/client.proto
+++ b/proto/ibcgo/core/client/v1/client.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.client.v1;
-option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
diff --git a/proto/ibcgo/core/client/v1/genesis.proto b/proto/ibcgo/core/client/v1/genesis.proto
index fc1c5d2d..f34d1208 100644
--- a/proto/ibcgo/core/client/v1/genesis.proto
+++ b/proto/ibcgo/core/client/v1/genesis.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.client.v1;
-option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
import "ibcgo/core/client/v1/client.proto";
import "gogoproto/gogo.proto";
diff --git a/proto/ibcgo/core/client/v1/query.proto b/proto/ibcgo/core/client/v1/query.proto
index 22c61ea3..5672653d 100644
--- a/proto/ibcgo/core/client/v1/query.proto
+++ b/proto/ibcgo/core/client/v1/query.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.client.v1;
-option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
import "cosmos/base/query/v1beta1/pagination.proto";
import "ibcgo/core/client/v1/client.proto";
diff --git a/proto/ibcgo/core/client/v1/tx.proto b/proto/ibcgo/core/client/v1/tx.proto
index 722f6b49..cde8e9f2 100644
--- a/proto/ibcgo/core/client/v1/tx.proto
+++ b/proto/ibcgo/core/client/v1/tx.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.client.v1;
-option go_package = "github.com/cosmos/ibc-go/core/02-client/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
diff --git a/proto/ibcgo/core/commitment/v1/commitment.proto b/proto/ibcgo/core/commitment/v1/commitment.proto
index 373a77ff..229ac212 100644
--- a/proto/ibcgo/core/commitment/v1/commitment.proto
+++ b/proto/ibcgo/core/commitment/v1/commitment.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.commitment.v1;
-option go_package = "github.com/cosmos/ibc-go/core/23-commitment/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/23-commitment/types";
import "gogoproto/gogo.proto";
import "confio/proofs.proto";
diff --git a/proto/ibcgo/core/connection/v1/connection.proto b/proto/ibcgo/core/connection/v1/connection.proto
index 39f3925c..191d5515 100644
--- a/proto/ibcgo/core/connection/v1/connection.proto
+++ b/proto/ibcgo/core/connection/v1/connection.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.connection.v1;
-option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/commitment/v1/commitment.proto";
diff --git a/proto/ibcgo/core/connection/v1/genesis.proto b/proto/ibcgo/core/connection/v1/genesis.proto
index 3e693c84..cbb5b0ae 100644
--- a/proto/ibcgo/core/connection/v1/genesis.proto
+++ b/proto/ibcgo/core/connection/v1/genesis.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.connection.v1;
-option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/connection/v1/connection.proto";
diff --git a/proto/ibcgo/core/connection/v1/query.proto b/proto/ibcgo/core/connection/v1/query.proto
index c4ff165a..ec84f831 100644
--- a/proto/ibcgo/core/connection/v1/query.proto
+++ b/proto/ibcgo/core/connection/v1/query.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.connection.v1;
-option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
import "cosmos/base/query/v1beta1/pagination.proto";
diff --git a/proto/ibcgo/core/connection/v1/tx.proto b/proto/ibcgo/core/connection/v1/tx.proto
index a371633c..46375618 100644
--- a/proto/ibcgo/core/connection/v1/tx.proto
+++ b/proto/ibcgo/core/connection/v1/tx.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.connection.v1;
-option go_package = "github.com/cosmos/ibc-go/core/03-connection/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
diff --git a/proto/ibcgo/core/types/v1/genesis.proto b/proto/ibcgo/core/types/v1/genesis.proto
index fd73a2b0..a2c7845c 100644
--- a/proto/ibcgo/core/types/v1/genesis.proto
+++ b/proto/ibcgo/core/types/v1/genesis.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.core.types.v1;
-option go_package = "github.com/cosmos/ibc-go/core/types";
+option go_package = "github.com/cosmos/ibc-go/modules/core/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/client/v1/genesis.proto";
diff --git a/proto/ibcgo/lightclients/localhost/v1/localhost.proto b/proto/ibcgo/lightclients/localhost/v1/localhost.proto
index 110a81b7..ff37c7a1 100644
--- a/proto/ibcgo/lightclients/localhost/v1/localhost.proto
+++ b/proto/ibcgo/lightclients/localhost/v1/localhost.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.lightclients.localhost.v1;
-option go_package = "github.com/cosmos/ibc-go/light-clients/09-localhost/types";
+option go_package = "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types";
import "gogoproto/gogo.proto";
import "ibcgo/core/client/v1/client.proto";
diff --git a/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto b/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
index d4d22848..965eb382 100644
--- a/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
+++ b/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.lightclients.solomachine.v1;
-option go_package = "github.com/cosmos/ibc-go/light-clients/06-solomachine/types";
+option go_package = "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types";
import "ibcgo/core/connection/v1/connection.proto";
import "ibcgo/core/channel/v1/channel.proto";
diff --git a/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto b/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
index d6a408b6..b58561ac 100644
--- a/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
+++ b/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package ibcgo.lightclients.tendermint.v1;
-option go_package = "github.com/cosmos/ibc-go/light-clients/07-tendermint/types";
+option go_package = "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types";
import "tendermint/types/validator.proto";
import "tendermint/types/types.proto";
diff --git a/testing/chain.go b/testing/chain.go
index be337f4f..6f1f4e78 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -26,15 +26,15 @@ import (
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
"github.com/cosmos/cosmos-sdk/x/staking/teststaking"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
- ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- "github.com/cosmos/ibc-go/core/types"
- ibctmtypes "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"
+ ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/modules/core/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
"github.com/cosmos/ibc-go/testing/mock"
"github.com/cosmos/ibc-go/testing/simapp"
)
diff --git a/testing/coordinator.go b/testing/coordinator.go
index 282416ab..80f45157 100644
--- a/testing/coordinator.go
+++ b/testing/coordinator.go
@@ -10,9 +10,9 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
sdk "github.com/cosmos/cosmos-sdk/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index 5df4d542..8a709fba 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -18,8 +18,8 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
- host "github.com/cosmos/ibc-go/core/24-host"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
)
const (
diff --git a/testing/sdk_test.go b/testing/sdk_test.go
index c71c47a2..41bd7066 100644
--- a/testing/sdk_test.go
+++ b/testing/sdk_test.go
@@ -28,8 +28,8 @@ import (
tmrand "github.com/tendermint/tendermint/libs/rand"
dbm "github.com/tendermint/tm-db"
- ibcclientcli "github.com/cosmos/ibc-go/core/02-client/client/cli"
- ibccli "github.com/cosmos/ibc-go/core/04-channel/client/cli"
+ ibcclientcli "github.com/cosmos/ibc-go/modules/core/02-client/client/cli"
+ ibccli "github.com/cosmos/ibc-go/modules/core/04-channel/client/cli"
"github.com/cosmos/ibc-go/testing/simapp"
)
diff --git a/testing/simapp/app.go b/testing/simapp/app.go
index 872d1609..7e4ba042 100644
--- a/testing/simapp/app.go
+++ b/testing/simapp/app.go
@@ -82,16 +82,16 @@ import (
upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client"
upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
- transfer "github.com/cosmos/ibc-go/apps/transfer"
- ibctransferkeeper "github.com/cosmos/ibc-go/apps/transfer/keeper"
- ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
- ibc "github.com/cosmos/ibc-go/core"
- ibcclient "github.com/cosmos/ibc-go/core/02-client"
- ibcclientclient "github.com/cosmos/ibc-go/core/02-client/client"
- ibcclienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- porttypes "github.com/cosmos/ibc-go/core/05-port/types"
- ibchost "github.com/cosmos/ibc-go/core/24-host"
- ibckeeper "github.com/cosmos/ibc-go/core/keeper"
+ transfer "github.com/cosmos/ibc-go/modules/apps/transfer"
+ ibctransferkeeper "github.com/cosmos/ibc-go/modules/apps/transfer/keeper"
+ ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ ibc "github.com/cosmos/ibc-go/modules/core"
+ ibcclient "github.com/cosmos/ibc-go/modules/core/02-client"
+ ibcclientclient "github.com/cosmos/ibc-go/modules/core/02-client/client"
+ ibcclienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ ibchost "github.com/cosmos/ibc-go/modules/core/24-host"
+ ibckeeper "github.com/cosmos/ibc-go/modules/core/keeper"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
authz "github.com/cosmos/cosmos-sdk/x/authz"
diff --git a/testing/simapp/app_test.go b/testing/simapp/app_test.go
index 38adb14a..0e6adcc1 100644
--- a/testing/simapp/app_test.go
+++ b/testing/simapp/app_test.go
@@ -30,8 +30,8 @@ import (
"github.com/cosmos/cosmos-sdk/x/slashing"
"github.com/cosmos/cosmos-sdk/x/staking"
"github.com/cosmos/cosmos-sdk/x/upgrade"
- transfer "github.com/cosmos/ibc-go/apps/transfer"
- ibc "github.com/cosmos/ibc-go/core"
+ transfer "github.com/cosmos/ibc-go/modules/apps/transfer"
+ ibc "github.com/cosmos/ibc-go/modules/core"
)
func TestSimAppExportAndBlockedAddrs(t *testing.T) {
diff --git a/testing/simapp/sim_test.go b/testing/simapp/sim_test.go
index c1423655..acac8731 100644
--- a/testing/simapp/sim_test.go
+++ b/testing/simapp/sim_test.go
@@ -29,8 +29,8 @@ import (
"github.com/cosmos/cosmos-sdk/x/simulation"
slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
- ibctransfertypes "github.com/cosmos/ibc-go/apps/transfer/types"
- ibchost "github.com/cosmos/ibc-go/core/24-host"
+ ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ ibchost "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/testing/simapp/helpers"
)
diff --git a/testing/solomachine.go b/testing/solomachine.go
index 2d04a904..c418a15a 100644
--- a/testing/solomachine.go
+++ b/testing/solomachine.go
@@ -12,11 +12,11 @@ import (
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/crypto/types/multisig"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
- clienttypes "github.com/cosmos/ibc-go/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/core/23-commitment/types"
- host "github.com/cosmos/ibc-go/core/24-host"
- "github.com/cosmos/ibc-go/core/exported"
- solomachinetypes "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
)
var prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
diff --git a/testing/types.go b/testing/types.go
index 78231352..9712a951 100644
--- a/testing/types.go
+++ b/testing/types.go
@@ -1,7 +1,7 @@
package ibctesting
import (
- channeltypes "github.com/cosmos/ibc-go/core/04-channel/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
)
// TestConnection is a testing helper struct to keep track of the connectionID, source clientID,
From 4d1ac73df53743668ac537a47f43b5042531c5dc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 8 Mar 2021 11:56:27 +0100
Subject: [PATCH 011/393] update 'modules' in import changes; add link to
reference for import command
---
.../{ibc-migration-042.md => ibc-migration-043.md} | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
rename docs/migrations/{ibc-migration-042.md => ibc-migration-043.md} (90%)
diff --git a/docs/migrations/ibc-migration-042.md b/docs/migrations/ibc-migration-043.md
similarity index 90%
rename from docs/migrations/ibc-migration-042.md
rename to docs/migrations/ibc-migration-043.md
index 06e56d59..c68af39c 100644
--- a/docs/migrations/ibc-migration-042.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -1,6 +1,6 @@
# Migrating to ibc-go
-This file contains information on how to migrate from the IBC module contained in the SDK 0.41.x line to the IBC module in the ibc-go repository based on the 0.42 SDK version.
+This file contains information on how to migrate from the IBC module contained in the SDK 0.41.x line to the IBC module in the ibc-go repository based on the 0.43 SDK version.
## Import Changes
@@ -10,9 +10,11 @@ The most obvious changes is import name changes. We need to change:
On my GNU/Linux based machine I used the following commands, executed in order:
-`grep -RiIl 'cosmos-sdk\/x\/ibc\/applications' | xargs sed -i 's/cosmos-sdk\/x\/ibc\/applications/ibc-go\/apps/g'`
+`grep -RiIl 'cosmos-sdk\/x\/ibc\/applications' | xargs sed -i 's/cosmos-sdk\/x\/ibc\/applications/ibc-go\/modules\/apps/g'`
-`grep -RiIl 'cosmos-sdk\/x\/ibc' | xargs sed -i 's/cosmos-sdk\/x\/ibc/ibc-go/g'`
+`grep -RiIl 'cosmos-sdk\/x\/ibc' | xargs sed -i 's/cosmos-sdk\/x\/ibc/ibc-go\/modules/g'`
+
+ref: [explanation of the above commands](https://www.internalpointers.com/post/linux-find-and-replace-text-multiple-files)
Executing these commands out of order will cause issues.
From 7bc871d57393d3b0f4e2b900f061ef2d4a4a4ad2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 9 Mar 2021 13:31:27 +0100
Subject: [PATCH 012/393] update migration guide based on experience
---
docs/migrations/ibc-migration-043.md | 68 ++++++++++++++++++++++------
1 file changed, 54 insertions(+), 14 deletions(-)
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index c68af39c..72ae695d 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -20,37 +20,77 @@ Executing these commands out of order will cause issues.
Feel free to use your own method for modifying import names.
-## Proto file changes
+NOTE: Updating to the `v0.43.0` SDK release and then running `go mod tidy` will cause a downgrade to `v0.42.0` in order to support the old IBC import paths.
+Update the import paths before running `go mod tidy`.
-The protobuf files have change package naming.
-The new package naming begins with `ibcgo` instead of `ibc`.
+## IBC Keeper Changes
-The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
+The IBC Keeper now takes in the Upgrade Keeper. Please add the chains' Upgrade Keeper after the Staking Keeper:
+
+```diff
+ // Create IBC Keeper
+ app.IBCKeeper = ibckeeper.NewKeeper(
+- appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, scopedIBCKeeper,
++ appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper,
+ )
+
+```
## Proposals
### UpdateClientProposal
-The `UpdateClient` has been modified to take in two client-identifiers and one initial height. Please see the [documentation](..//proposals.md) for more information.
-Simapp registration was incorrect in the 0.41.x releases. The `UpdateClient` proposal should be registered with the router key belonging to `ibc-go/core/02-client/types`.
-See this [commit](https://github.com/cosmos/cosmos-sdk/pull/8405/commits/9fae3ce6a335a6e2137aee09f7359c45957fb6fc#diff-8d1ca8086ee74e8f0490825ba21e7435be4753922192ff691311483aa3e71a0aL312)
+The `UpdateClient` has been modified to take in two client-identifiers and one initial height. Please see the [documentation](../proposals.md) for more information.
### UpgradeProposal
-A new IBC proposal type has been added, `UpgradeProposal`. This handles an IBC (breaking) Upgrade. The previous `UpgradedClientState` field in an Upgrade `Plan` has been deprecated in favor of this new proposal type.
+A new IBC proposal type has been added, `UpgradeProposal`. This handles an IBC (breaking) Upgrade.
+The previous `UpgradedClientState` field in an Upgrade `Plan` has been deprecated in favor of this new proposal type.
+
+### Proposal Handler Registration
+
+The `ClientUpdateProposalHandler` has been renamed to `ClientProposalHandler`.
+It handles both `UpdateClientProposal`s and `UpgradeProposal`s.
+
+Add this import:
+
+```diff
++ ibcclienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+```
+
+Please ensure the governance module adds the correct route:
+
+```diff
+- AddRoute(ibchost.RouterKey, ibcclient.NewClientUpdateProposalHandler(app.IBCKeeper.ClientKeeper))
++ AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper))
+```
+
+NOTE: Simapp registration was incorrect in the 0.41.x releases. The `UpdateClient` proposal handler should be registered with the router key belonging to `ibc-go/core/02-client/types`
+as shown in the diffs above.
### Proposal CLI Registration
Please ensure both proposal type CLI commands are registered on the governance module by adding the following arguments to `gov.NewAppModuleBasic()`:
-`ibcclientclient.UpdateClientProposalHandler, ibcclientclient.UpgradeProposalHandler`
+Add the following import:
+```diff
++ ibcclientclient "github.com/cosmos/ibc-go/modules/core/02-client/client"
+```
-REST routes are not supported for these proposals.
+Register the cli commands:
-### Proposal Handler Registration
+```diff
+ gov.NewAppModuleBasic(
+ paramsclient.ProposalHandler, distrclient.ProposalHandler, upgradeclient.ProposalHandler, upgradeclient.CancelProposalHandler,
++ ibcclientclient.UpdateClientProposalHandler, ibcclientclient.UpgradeProposalHandler,
+ ),
+```
-The `ClientUpdateProposalHandler` has been renamed to `ClientProposalHandler`. It handles both `UpdateClientProposal`s and `UpgradeProposal`s.
+REST routes are not supported for these proposals.
-Please ensure the governance module adds the following route:
+## Proto file changes
-`AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper))`
+The protobuf files have change package naming.
+The new package naming begins with `ibcgo` instead of `ibc`.
+
+The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
From cb363330897cc4ad467d9f1aa801e4e2d152fb0d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 11 Mar 2021 11:48:27 +0100
Subject: [PATCH 013/393] add pr and issue templates (#78)
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
---
.github/ISSUE_TEMPLATE/bug-report.md | 37 +++++++++++++++++++++++
.github/ISSUE_TEMPLATE/feature-request.md | 35 +++++++++++++++++++++
.github/PULL_REQUEST_TEMPLATE.md | 29 ++++++++++++++++++
3 files changed, 101 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/bug-report.md
create mode 100644 .github/ISSUE_TEMPLATE/feature-request.md
create mode 100644 .github/PULL_REQUEST_TEMPLATE.md
diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 00000000..5bdfad7c
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,37 @@
+---
+name: Bug Report
+about: Create a report to help us squash bugs!
+
+---
+
+
+
+
+
+## Summary of Bug
+
+
+
+## Version
+
+
+
+## Steps to Reproduce
+
+
+
+____
+
+#### For Admin Use
+
+- [ ] Not duplicate issue
+- [ ] Appropriate labels applied
+- [ ] Appropriate contributors tagged/assigned
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
new file mode 100644
index 00000000..714d70a2
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.md
@@ -0,0 +1,35 @@
+---
+name: Feature Request
+about: Create a proposal to request a feature
+
+---
+
+
+
+## Summary
+
+
+
+## Problem Definition
+
+
+
+## Proposal
+
+
+
+____
+
+#### For Admin Use
+
+- [ ] Not duplicate issue
+- [ ] Appropriate labels applied
+- [ ] Appropriate contributors tagged/assigned
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..435e6a89
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,29 @@
+
+
+## Description
+
+
+
+closes: #XXXX
+
+---
+
+Before we can merge this PR, please make sure that all the following items have been
+checked off. If any of the checklist items are not applicable, please leave them but
+write a little note why.
+
+- [ ] Targeted PR against correct branch (see [CONTRIBUTING.md](https://github.com/cosmos/ibc-go/blob/master/CONTRIBUTING.md#pr-targeting))
+- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
+- [ ] Code follows the [module structure standards](https://github.com/cosmos/cosmos-sdk/blob/master/docs/building-modules/structure.md).
+- [ ] Wrote unit and integration [tests](https://github.com/cosmos/ibc-go/blob/master/CONTRIBUTING.md#testing)
+- [ ] Updated relevant documentation (`docs/`) or specification (`x//spec/`)
+- [ ] Added relevant `godoc` [comments](https://blog.golang.org/godoc-documenting-go-code).
+- [ ] Added a relevant changelog entry to the `Unreleased` section in `CHANGELOG.md`
+- [ ] Re-reviewed `Files changed` in the Github PR explorer
+- [ ] Review `Codecov Report` in the comment section below once CI passes
From 05d1e4ac4afc0ddb32a4f2e11e21259fa6bdee0f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 11 Mar 2021 17:27:39 +0100
Subject: [PATCH 014/393] revert proto package naming to 'ibc'
---
docs/ibc/proto-docs.md | 1130 ++++++++---------
docs/migrations/ibc-migration-043.md | 3 -
modules/apps/transfer/types/genesis.pb.go | 52 +-
modules/apps/transfer/types/query.pb.go | 122 +-
modules/apps/transfer/types/query.pb.gw.go | 2 +-
modules/apps/transfer/types/transfer.pb.go | 64 +-
modules/apps/transfer/types/tx.pb.go | 92 +-
modules/core/02-client/types/client.pb.go | 136 +-
modules/core/02-client/types/codec.go | 10 +-
modules/core/02-client/types/genesis.pb.go | 90 +-
modules/core/02-client/types/query.pb.go | 202 +--
modules/core/02-client/types/query.pb.gw.go | 2 +-
modules/core/02-client/types/tx.pb.go | 142 +--
modules/core/03-connection/types/codec.go | 6 +-
.../core/03-connection/types/connection.pb.go | 127 +-
.../core/03-connection/types/genesis.pb.go | 54 +-
modules/core/03-connection/types/query.pb.go | 188 +--
.../core/03-connection/types/query.pb.gw.go | 2 +-
modules/core/03-connection/types/tx.pb.go | 182 +--
modules/core/04-channel/types/channel.pb.go | 171 ++-
modules/core/04-channel/types/codec.go | 6 +-
modules/core/04-channel/types/genesis.pb.go | 80 +-
modules/core/04-channel/types/query.pb.go | 361 +++---
modules/core/04-channel/types/query.pb.gw.go | 2 +-
modules/core/04-channel/types/tx.pb.go | 280 ++--
modules/core/23-commitment/types/codec.go | 8 +-
.../core/23-commitment/types/commitment.pb.go | 66 +-
modules/core/types/genesis.pb.go | 50 +-
.../06-solomachine/types/solomachine.pb.go | 256 ++--
.../07-tendermint/types/tendermint.pb.go | 164 +--
.../09-localhost/types/localhost.pb.go | 46 +-
.../apps/transfer/v1/genesis.proto | 4 +-
.../apps/transfer/v1/query.proto | 4 +-
.../apps/transfer/v1/transfer.proto | 2 +-
.../{ibcgo => ibc}/apps/transfer/v1/tx.proto | 6 +-
.../core/channel/v1/channel.proto | 6 +-
.../core/channel/v1/genesis.proto | 4 +-
.../core/channel/v1/query.proto | 44 +-
proto/{ibcgo => ibc}/core/channel/v1/tx.proto | 22 +-
.../core/client/v1/client.proto | 2 +-
.../core/client/v1/genesis.proto | 4 +-
.../{ibcgo => ibc}/core/client/v1/query.proto | 8 +-
proto/{ibcgo => ibc}/core/client/v1/tx.proto | 4 +-
.../core/commitment/v1/commitment.proto | 2 +-
.../core/connection/v1/connection.proto | 6 +-
.../core/connection/v1/genesis.proto | 4 +-
.../core/connection/v1/query.proto | 22 +-
.../core/connection/v1/tx.proto | 16 +-
.../core/types/v1/genesis.proto | 14 +-
.../lightclients/localhost/v1/localhost.proto | 6 +-
.../solomachine/v1/solomachine.proto | 10 +-
.../tendermint/v1/tendermint.proto | 14 +-
52 files changed, 2144 insertions(+), 2156 deletions(-)
rename proto/{ibcgo => ibc}/apps/transfer/v1/genesis.proto (86%)
rename proto/{ibcgo => ibc}/apps/transfer/v1/query.proto (96%)
rename proto/{ibcgo => ibc}/apps/transfer/v1/transfer.proto (98%)
rename proto/{ibcgo => ibc}/apps/transfer/v1/tx.proto (92%)
rename proto/{ibcgo => ibc}/core/channel/v1/channel.proto (98%)
rename proto/{ibcgo => ibc}/core/channel/v1/genesis.proto (94%)
rename proto/{ibcgo => ibc}/core/channel/v1/query.proto (89%)
rename proto/{ibcgo => ibc}/core/channel/v1/tx.proto (94%)
rename proto/{ibcgo => ibc}/core/client/v1/client.proto (99%)
rename proto/{ibcgo => ibc}/core/client/v1/genesis.proto (96%)
rename proto/{ibcgo => ibc}/core/client/v1/query.proto (96%)
rename proto/{ibcgo => ibc}/core/client/v1/tx.proto (98%)
rename proto/{ibcgo => ibc}/core/commitment/v1/commitment.proto (97%)
rename proto/{ibcgo => ibc}/core/connection/v1/connection.proto (96%)
rename proto/{ibcgo => ibc}/core/connection/v1/genesis.proto (88%)
rename proto/{ibcgo => ibc}/core/connection/v1/query.proto (86%)
rename proto/{ibcgo => ibc}/core/connection/v1/tx.proto (93%)
rename proto/{ibcgo => ibc}/core/types/v1/genesis.proto (62%)
rename proto/{ibcgo => ibc}/lightclients/localhost/v1/localhost.proto (73%)
rename proto/{ibcgo => ibc}/lightclients/solomachine/v1/solomachine.proto (96%)
rename proto/{ibcgo => ibc}/lightclients/tendermint/v1/tendermint.proto (93%)
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 504b4d1f..28c9be8e 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -175,19 +175,19 @@
- ibcgo/apps/transfer/v1/transfer.proto
+ ibc/apps/transfer/v1/transfer.proto
- M DenomTrace
+ M DenomTrace
- M FungibleTokenPacketData
+ M FungibleTokenPacketData
- M Params
+ M Params
@@ -198,11 +198,11 @@
- ibcgo/apps/transfer/v1/genesis.proto
+ ibc/apps/transfer/v1/genesis.proto
- M GenesisState
+ M GenesisState
@@ -213,38 +213,38 @@
- ibcgo/apps/transfer/v1/query.proto
+ ibc/apps/transfer/v1/query.proto
@@ -252,35 +252,35 @@
- ibcgo/core/client/v1/client.proto
+ ibc/core/client/v1/client.proto
- M ClientConsensusStates
+ M ClientConsensusStates
- M ClientUpdateProposal
+ M ClientUpdateProposal
- M ConsensusStateWithHeight
+ M ConsensusStateWithHeight
- M Height
+ M Height
- M IdentifiedClientState
+ M IdentifiedClientState
- M Params
+ M Params
- M UpgradeProposal
+ M UpgradeProposal
@@ -291,22 +291,22 @@
- ibcgo/apps/transfer/v1/tx.proto
+ ibc/apps/transfer/v1/tx.proto
@@ -314,40 +314,40 @@
- ibcgo/core/channel/v1/channel.proto
+ ibc/core/channel/v1/channel.proto
- M Acknowledgement
+ M Acknowledgement
- M Channel
+ M Channel
- M Counterparty
+ M Counterparty
- M IdentifiedChannel
+ M IdentifiedChannel
- M Packet
+ M Packet
- M PacketState
+ M PacketState
- E Order
+ E Order
- E State
+ E State
@@ -357,15 +357,15 @@
- ibcgo/core/channel/v1/genesis.proto
+ ibc/core/channel/v1/genesis.proto
- M GenesisState
+ M GenesisState
- M PacketSequence
+ M PacketSequence
@@ -376,118 +376,118 @@
- ibcgo/core/channel/v1/query.proto
+ ibc/core/channel/v1/query.proto
@@ -495,94 +495,94 @@
- ibcgo/core/channel/v1/tx.proto
+ ibc/core/channel/v1/tx.proto
@@ -590,19 +590,19 @@
- ibcgo/core/client/v1/genesis.proto
+ ibc/core/client/v1/genesis.proto
- M GenesisMetadata
+ M GenesisMetadata
- M GenesisState
+ M GenesisState
- M IdentifiedGenesisMetadata
+ M IdentifiedGenesisMetadata
@@ -613,62 +613,62 @@
- ibcgo/core/client/v1/query.proto
+ ibc/core/client/v1/query.proto
@@ -676,46 +676,46 @@
- ibcgo/core/client/v1/tx.proto
+ ibc/core/client/v1/tx.proto
@@ -723,23 +723,23 @@
- ibcgo/core/commitment/v1/commitment.proto
+ ibc/core/commitment/v1/commitment.proto
- M MerklePath
+ M MerklePath
- M MerklePrefix
+ M MerklePrefix
- M MerkleProof
+ M MerkleProof
- M MerkleRoot
+ M MerkleRoot
@@ -750,36 +750,36 @@
- ibcgo/core/connection/v1/connection.proto
+ ibc/core/connection/v1/connection.proto
- M ClientPaths
+ M ClientPaths
- M ConnectionEnd
+ M ConnectionEnd
- M ConnectionPaths
+ M ConnectionPaths
- M Counterparty
+ M Counterparty
- M IdentifiedConnection
+ M IdentifiedConnection
- M Version
+ M Version
- E State
+ E State
@@ -789,11 +789,11 @@
- ibcgo/core/connection/v1/genesis.proto
+ ibc/core/connection/v1/genesis.proto
- M GenesisState
+ M GenesisState
@@ -804,54 +804,54 @@
- ibcgo/core/connection/v1/query.proto
+ ibc/core/connection/v1/query.proto
@@ -859,46 +859,46 @@
- ibcgo/core/connection/v1/tx.proto
+ ibc/core/connection/v1/tx.proto
@@ -906,11 +906,11 @@
- ibcgo/core/types/v1/genesis.proto
+ ibc/core/types/v1/genesis.proto
- M GenesisState
+ M GenesisState
@@ -921,11 +921,11 @@
- ibcgo/lightclients/localhost/v1/localhost.proto
+ ibc/lightclients/localhost/v1/localhost.proto
- M ClientState
+ M ClientState
@@ -936,76 +936,76 @@
- ibcgo/lightclients/solomachine/v1/solomachine.proto
+ ibc/lightclients/solomachine/v1/solomachine.proto
- M ChannelStateData
+ M ChannelStateData
- M ClientState
+ M ClientState
- M ClientStateData
+ M ClientStateData
- M ConnectionStateData
+ M ConnectionStateData
- M ConsensusState
+ M ConsensusState
- M ConsensusStateData
+ M ConsensusStateData
- M Header
+ M Header
- M HeaderData
+ M HeaderData
- M Misbehaviour
+ M Misbehaviour
- M NextSequenceRecvData
+ M NextSequenceRecvData
- M PacketAcknowledgementData
+ M PacketAcknowledgementData
- M PacketCommitmentData
+ M PacketCommitmentData
- M PacketReceiptAbsenceData
+ M PacketReceiptAbsenceData
- M SignBytes
+ M SignBytes
- M SignatureAndData
+ M SignatureAndData
- M TimestampedSignatureData
+ M TimestampedSignatureData
- E DataType
+ E DataType
@@ -1015,27 +1015,27 @@
- ibcgo/lightclients/tendermint/v1/tendermint.proto
+ ibc/lightclients/tendermint/v1/tendermint.proto
- M ClientState
+ M ClientState
- M ConsensusState
+ M ConsensusState
- M Fraction
+ M Fraction
- M Header
+ M Header
- M Misbehaviour
+ M Misbehaviour
@@ -1051,12 +1051,12 @@
-
ibcgo/apps/transfer/v1/transfer.proto Top
+
ibc/apps/transfer/v1/transfer.proto Top
- DenomTrace
+ DenomTrace
DenomTrace contains the base denomination for ICS20 fungible tokens and the
source tracing information path.
@@ -1088,7 +1088,7 @@ source of the fungible token.
- FungibleTokenPacketData
+ FungibleTokenPacketData
FungibleTokenPacketData defines a struct for the packet payload
See FungibleTokenPacketData spec:
https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
@@ -1133,7 +1133,7 @@ source of the fungible token.
- Params
+ Params
Params defines the set of IBC transfer parameters.
NOTE: To prevent a single token from being transferred, set the
TransfersEnabled parameter to true and then set the bank module's SendEnabled
parameter for the denomination to false.
@@ -1175,12 +1175,12 @@ chain.
-
ibcgo/apps/transfer/v1/genesis.proto Top
+
ibc/apps/transfer/v1/genesis.proto Top
- GenesisState
+ GenesisState
GenesisState defines the ibc-transfer genesis state
@@ -1199,14 +1199,14 @@ chain.
denom_traces
- DenomTrace
+ DenomTrace
repeated
params
- Params
+ Params
@@ -1227,12 +1227,12 @@ chain.
-
ibcgo/apps/transfer/v1/query.proto Top
+
ibc/apps/transfer/v1/query.proto Top
- QueryDenomTraceRequest
+ QueryDenomTraceRequest
QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
method
@@ -1256,7 +1256,7 @@ chain.
- QueryDenomTraceResponse
+ QueryDenomTraceResponse
QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
method.
@@ -1268,7 +1268,7 @@ chain.
denom_trace
- DenomTrace
+ DenomTrace
denom_trace returns the requested denomination trace information.
@@ -1280,7 +1280,7 @@ chain.
- QueryDenomTracesRequest
+ QueryDenomTracesRequest
QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
method
@@ -1304,7 +1304,7 @@ chain.
- QueryDenomTracesResponse
+ QueryDenomTracesResponse
QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
method.
@@ -1316,7 +1316,7 @@ chain.
denom_traces
- DenomTrace
+ DenomTrace
repeated
denom_traces returns all denominations trace information.
@@ -1335,14 +1335,14 @@ chain.
- QueryParamsRequest
+ QueryParamsRequest
QueryParamsRequest is the request type for the Query/Params RPC method.
- QueryParamsResponse
+ QueryParamsResponse
QueryParamsResponse is the response type for the Query/Params RPC method.
@@ -1354,7 +1354,7 @@ chain.
params
- Params
+ Params
params defines the parameters of the module.
@@ -1372,7 +1372,7 @@ chain.
- Query
+ Query
Query provides defines the gRPC querier service.
@@ -1382,22 +1382,22 @@ chain.
DenomTrace
- QueryDenomTraceRequest
- QueryDenomTraceResponse
+ QueryDenomTraceRequest
+ QueryDenomTraceResponse
DenomTrace queries a denomination trace information.
DenomTraces
- QueryDenomTracesRequest
- QueryDenomTracesResponse
+ QueryDenomTracesRequest
+ QueryDenomTracesResponse
DenomTraces queries all denomination traces.
Params
- QueryParamsRequest
- QueryParamsResponse
+ QueryParamsRequest
+ QueryParamsResponse
Params queries all parameters of the ibc-transfer module.
@@ -1456,12 +1456,12 @@ chain.
-
ibcgo/core/client/v1/client.proto Top
+
ibc/core/client/v1/client.proto Top
- ClientConsensusStates
+ ClientConsensusStates
ClientConsensusStates defines all the stored consensus states for a given
client.
@@ -1480,7 +1480,7 @@ chain.
consensus_states
- ConsensusStateWithHeight
+ ConsensusStateWithHeight
repeated
consensus states and their heights associated with the client
@@ -1492,7 +1492,7 @@ chain.
- ClientUpdateProposal
+ ClientUpdateProposal
ClientUpdateProposal is a governance proposal. If it passes, the substitute
client's consensus states starting from the 'initial height' are copied over
to the subjects client state. The proposal handler may fail if the subject
and the substitute do not match in client and chain parameters (with
exception to latest height, frozen height, and chain-id). The updated client
must also be valid (cannot be expired).
@@ -1533,7 +1533,7 @@ client
initial_height
- Height
+ Height
the intital height to copy consensus states from the substitute to the
subject
@@ -1546,7 +1546,7 @@ subject
- ConsensusStateWithHeight
+ ConsensusStateWithHeight
ConsensusStateWithHeight defines a consensus state with an additional height
field.
@@ -1558,7 +1558,7 @@ subject
height
- Height
+ Height
consensus state height
@@ -1577,7 +1577,7 @@ subject
- Height
+ Height
Height is a monotonically increasing data type
that can be compared against another Height for the purposes of updating and
freezing clients
Normally the RevisionHeight is incremented at each height while keeping
RevisionNumber the same. However some consensus algorithms may choose to
reset the height in certain conditions e.g. hard forks, state-machine
breaking changes In these cases, the RevisionNumber is incremented so that
height continues to be monitonically increasing even as the RevisionHeight
gets reset
@@ -1608,7 +1608,7 @@ subject
- IdentifiedClientState
+ IdentifiedClientState
IdentifiedClientState defines a client state with an additional client
identifier field.
@@ -1639,7 +1639,7 @@ subject
- Params
+ Params
Params defines the set of IBC light client parameters.
@@ -1663,7 +1663,7 @@ subject
- UpgradeProposal
+ UpgradeProposal
UpgradeProposal is a gov Content type for initiating an IBC breaking
upgrade.
@@ -1722,12 +1722,12 @@ planned chain upgrades
-
ibcgo/apps/transfer/v1/tx.proto Top
+
ibc/apps/transfer/v1/tx.proto Top
- MsgTransfer
+ MsgTransfer
MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
ICS20 enabled chains. See ICS Spec here:
https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
@@ -1774,7 +1774,7 @@ planned chain upgrades
timeout_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
Timeout height relative to the current block height.
The timeout is disabled when set to 0.
@@ -1795,7 +1795,7 @@ The timeout is disabled when set to 0.
- MsgTransferResponse
+ MsgTransferResponse
MsgTransferResponse defines the Msg/Transfer response type.
@@ -1808,7 +1808,7 @@ The timeout is disabled when set to 0.
- Msg
+ Msg
Msg defines the ibc/transfer Msg service.
@@ -1818,8 +1818,8 @@ The timeout is disabled when set to 0.
Transfer
- MsgTransfer
- MsgTransferResponse
+ MsgTransfer
+ MsgTransferResponse
Transfer defines a rpc handler method for MsgTransfer.
@@ -1830,12 +1830,12 @@ The timeout is disabled when set to 0.
-
ibcgo/core/channel/v1/channel.proto Top
+
ibc/core/channel/v1/channel.proto Top
- Acknowledgement
+ Acknowledgement
Acknowledgement is the recommended acknowledgement format to be used by
app-specific protocols.
NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
conflicts with other protobuf message formats used for acknowledgements.
The first byte of any message with this format will be the non-ASCII values
`0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
@@ -1866,7 +1866,7 @@ The timeout is disabled when set to 0.
- Channel
+ Channel
Channel defines pipeline for exactly-once packet delivery between specific
modules on separate blockchains, which has at least one end capable of
sending packets and one end capable of receiving packets.
@@ -1878,21 +1878,21 @@ The timeout is disabled when set to 0.
state
- State
+ State
current state of the channel end
ordering
- Order
+ Order
whether the channel is ordered or unordered
counterparty
- Counterparty
+ Counterparty
counterparty channel end
@@ -1919,7 +1919,7 @@ this channel will travel
- Counterparty
+ Counterparty
Counterparty defines a channel end counterparty
@@ -1950,7 +1950,7 @@ this channel will travel
- IdentifiedChannel
+ IdentifiedChannel
IdentifiedChannel defines a channel with additional port and channel
identifier fields.
@@ -1962,21 +1962,21 @@ this channel will travel
state
- State
+ State
current state of the channel end
ordering
- Order
+ Order
whether the channel is ordered or unordered
counterparty
- Counterparty
+ Counterparty
counterparty channel end
@@ -2017,7 +2017,7 @@ this channel will travel
- Packet
+ Packet
Packet defines a type that carries data across different chains through IBC
@@ -2073,7 +2073,7 @@ with a later sequence number.
timeout_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
block height after which the packet times out
@@ -2092,7 +2092,7 @@ with a later sequence number.
- PacketState
+ PacketState
PacketState defines the generic type necessary to retrieve and store
packet commitments, acknowledgements, and receipts.
Caller is responsible for knowing the context necessary to interpret this
state as a commitment, acknowledgement, or a receipt.
@@ -2139,7 +2139,7 @@ with a later sequence number.
- Order
+ Order
Order defines if a channel is ORDERED or UNORDERED
@@ -2169,7 +2169,7 @@ which they were sent.
- State
+ State
State defines if a channel is in one of the following states:
CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
@@ -2219,12 +2219,12 @@ packets.
-
ibcgo/core/channel/v1/genesis.proto Top
+
ibc/core/channel/v1/genesis.proto Top
- GenesisState
+ GenesisState
GenesisState defines the ibc channel submodule's genesis state.
@@ -2236,49 +2236,49 @@ packets.
channels
- IdentifiedChannel
+ IdentifiedChannel
repeated
acknowledgements
- PacketState
+ PacketState
repeated
commitments
- PacketState
+ PacketState
repeated
receipts
- PacketState
+ PacketState
repeated
send_sequences
- PacketSequence
+ PacketSequence
repeated
recv_sequences
- PacketSequence
+ PacketSequence
repeated
ack_sequences
- PacketSequence
+ PacketSequence
repeated
@@ -2297,7 +2297,7 @@ packets.
- PacketSequence
+ PacketSequence
PacketSequence defines the genesis type necessary to retrieve and store
next send and receive sequences.
@@ -2344,12 +2344,12 @@ packets.
-
ibcgo/core/channel/v1/query.proto Top
+
ibc/core/channel/v1/query.proto Top
- QueryChannelClientStateRequest
+ QueryChannelClientStateRequest
QueryChannelClientStateRequest is the request type for the Query/ClientState
RPC method
@@ -2380,7 +2380,7 @@ packets.
- QueryChannelClientStateResponse
+ QueryChannelClientStateResponse
QueryChannelClientStateResponse is the Response type for the
Query/QueryChannelClientState RPC method
@@ -2392,7 +2392,7 @@ packets.
identified_client_state
- ibcgo.core.client.v1.IdentifiedClientState
+ ibc.core.client.v1.IdentifiedClientState
client state associated with the channel
@@ -2406,7 +2406,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -2418,7 +2418,7 @@ packets.
- QueryChannelConsensusStateRequest
+ QueryChannelConsensusStateRequest
QueryChannelConsensusStateRequest is the request type for the
Query/ConsensusState RPC method
@@ -2463,7 +2463,7 @@ packets.
- QueryChannelConsensusStateResponse
+ QueryChannelConsensusStateResponse
QueryChannelClientStateResponse is the Response type for the
Query/QueryChannelClientState RPC method
@@ -2496,7 +2496,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -2508,7 +2508,7 @@ packets.
- QueryChannelRequest
+ QueryChannelRequest
QueryChannelRequest is the request type for the Query/Channel RPC method
@@ -2539,7 +2539,7 @@ packets.
- QueryChannelResponse
+ QueryChannelResponse
QueryChannelResponse is the response type for the Query/Channel RPC method.
Besides the Channel end, it includes a proof and the height from which the
proof was retrieved.
@@ -2551,7 +2551,7 @@ packets.
channel
- Channel
+ Channel
channel associated with the request identifiers
@@ -2565,7 +2565,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -2577,7 +2577,7 @@ packets.
- QueryChannelsRequest
+ QueryChannelsRequest
QueryChannelsRequest is the request type for the Query/Channels RPC method
@@ -2601,7 +2601,7 @@ packets.
- QueryChannelsResponse
+ QueryChannelsResponse
QueryChannelsResponse is the response type for the Query/Channels RPC method.
@@ -2613,7 +2613,7 @@ packets.
channels
- IdentifiedChannel
+ IdentifiedChannel
repeated
list of stored channels of the chain.
@@ -2627,7 +2627,7 @@ packets.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -2639,7 +2639,7 @@ packets.
- QueryConnectionChannelsRequest
+ QueryConnectionChannelsRequest
QueryConnectionChannelsRequest is the request type for the
Query/QueryConnectionChannels RPC method
@@ -2670,7 +2670,7 @@ packets.
- QueryConnectionChannelsResponse
+ QueryConnectionChannelsResponse
QueryConnectionChannelsResponse is the Response type for the
Query/QueryConnectionChannels RPC method
@@ -2682,7 +2682,7 @@ packets.
channels
- IdentifiedChannel
+ IdentifiedChannel
repeated
list of channels associated with a connection.
@@ -2696,7 +2696,7 @@ packets.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -2708,7 +2708,7 @@ packets.
- QueryNextSequenceReceiveRequest
+ QueryNextSequenceReceiveRequest
QueryNextSequenceReceiveRequest is the request type for the
Query/QueryNextSequenceReceiveRequest RPC method
@@ -2739,7 +2739,7 @@ packets.
- QueryNextSequenceReceiveResponse
+ QueryNextSequenceReceiveResponse
QuerySequenceResponse is the request type for the
Query/QueryNextSequenceReceiveResponse RPC method
@@ -2765,7 +2765,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -2777,7 +2777,7 @@ packets.
- QueryPacketAcknowledgementRequest
+ QueryPacketAcknowledgementRequest
QueryPacketAcknowledgementRequest is the request type for the
Query/PacketAcknowledgement RPC method
@@ -2815,7 +2815,7 @@ packets.
- QueryPacketAcknowledgementResponse
+ QueryPacketAcknowledgementResponse
QueryPacketAcknowledgementResponse defines the client query response for a
packet which also includes a proof and the height from which the
proof was retrieved
@@ -2841,7 +2841,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -2853,7 +2853,7 @@ packets.
- QueryPacketAcknowledgementsRequest
+ QueryPacketAcknowledgementsRequest
QueryPacketAcknowledgementsRequest is the request type for the
Query/QueryPacketCommitments RPC method
@@ -2891,7 +2891,7 @@ packets.
- QueryPacketAcknowledgementsResponse
+ QueryPacketAcknowledgementsResponse
QueryPacketAcknowledgemetsResponse is the request type for the
Query/QueryPacketAcknowledgements RPC method
@@ -2903,7 +2903,7 @@ packets.
acknowledgements
- PacketState
+ PacketState
repeated
@@ -2917,7 +2917,7 @@ packets.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -2929,7 +2929,7 @@ packets.
- QueryPacketCommitmentRequest
+ QueryPacketCommitmentRequest
QueryPacketCommitmentRequest is the request type for the
Query/PacketCommitment RPC method
@@ -2967,7 +2967,7 @@ packets.
- QueryPacketCommitmentResponse
+ QueryPacketCommitmentResponse
QueryPacketCommitmentResponse defines the client query response for a packet
which also includes a proof and the height from which the proof was
retrieved
@@ -2993,7 +2993,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -3005,7 +3005,7 @@ packets.
- QueryPacketCommitmentsRequest
+ QueryPacketCommitmentsRequest
QueryPacketCommitmentsRequest is the request type for the
Query/QueryPacketCommitments RPC method
@@ -3043,7 +3043,7 @@ packets.
- QueryPacketCommitmentsResponse
+ QueryPacketCommitmentsResponse
QueryPacketCommitmentsResponse is the request type for the
Query/QueryPacketCommitments RPC method
@@ -3055,7 +3055,7 @@ packets.
commitments
- PacketState
+ PacketState
repeated
@@ -3069,7 +3069,7 @@ packets.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -3081,7 +3081,7 @@ packets.
- QueryPacketReceiptRequest
+ QueryPacketReceiptRequest
QueryPacketReceiptRequest is the request type for the
Query/PacketReceipt RPC method
@@ -3119,7 +3119,7 @@ packets.
- QueryPacketReceiptResponse
+ QueryPacketReceiptResponse
QueryPacketReceiptResponse defines the client query response for a packet
receipt which also includes a proof, and the height from which the proof was
retrieved
@@ -3145,7 +3145,7 @@ packets.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -3157,7 +3157,7 @@ packets.
- QueryUnreceivedAcksRequest
+ QueryUnreceivedAcksRequest
QueryUnreceivedAcks is the request type for the
Query/UnreceivedAcks RPC method
@@ -3195,7 +3195,7 @@ packets.
- QueryUnreceivedAcksResponse
+ QueryUnreceivedAcksResponse
QueryUnreceivedAcksResponse is the response type for the
Query/UnreceivedAcks RPC method
@@ -3214,7 +3214,7 @@ packets.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -3226,7 +3226,7 @@ packets.
- QueryUnreceivedPacketsRequest
+ QueryUnreceivedPacketsRequest
QueryUnreceivedPacketsRequest is the request type for the
Query/UnreceivedPackets RPC method
@@ -3264,7 +3264,7 @@ packets.
- QueryUnreceivedPacketsResponse
+ QueryUnreceivedPacketsResponse
QueryUnreceivedPacketsResponse is the response type for the
Query/UnreceivedPacketCommitments RPC method
@@ -3283,7 +3283,7 @@ packets.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -3301,7 +3301,7 @@ packets.
- Query
+ Query
Query provides defines the gRPC querier service
@@ -3311,100 +3311,100 @@ packets.
Channel
- QueryChannelRequest
- QueryChannelResponse
+ QueryChannelRequest
+ QueryChannelResponse
Channel queries an IBC Channel.
Channels
- QueryChannelsRequest
- QueryChannelsResponse
+ QueryChannelsRequest
+ QueryChannelsResponse
Channels queries all the IBC channels of a chain.
ConnectionChannels
- QueryConnectionChannelsRequest
- QueryConnectionChannelsResponse
+ QueryConnectionChannelsRequest
+ QueryConnectionChannelsResponse
ConnectionChannels queries all the channels associated with a connection
end.
ChannelClientState
- QueryChannelClientStateRequest
- QueryChannelClientStateResponse
+ QueryChannelClientStateRequest
+ QueryChannelClientStateResponse
ChannelClientState queries for the client state for the channel associated
with the provided channel identifiers.
ChannelConsensusState
- QueryChannelConsensusStateRequest
- QueryChannelConsensusStateResponse
+ QueryChannelConsensusStateRequest
+ QueryChannelConsensusStateResponse
ChannelConsensusState queries for the consensus state for the channel
associated with the provided channel identifiers.
PacketCommitment
- QueryPacketCommitmentRequest
- QueryPacketCommitmentResponse
+ QueryPacketCommitmentRequest
+ QueryPacketCommitmentResponse
PacketCommitment queries a stored packet commitment hash.
PacketCommitments
- QueryPacketCommitmentsRequest
- QueryPacketCommitmentsResponse
+ QueryPacketCommitmentsRequest
+ QueryPacketCommitmentsResponse
PacketCommitments returns all the packet commitments hashes associated
with a channel.
PacketReceipt
- QueryPacketReceiptRequest
- QueryPacketReceiptResponse
+ QueryPacketReceiptRequest
+ QueryPacketReceiptResponse
PacketReceipt queries if a given packet sequence has been received on the
queried chain
PacketAcknowledgement
- QueryPacketAcknowledgementRequest
- QueryPacketAcknowledgementResponse
+ QueryPacketAcknowledgementRequest
+ QueryPacketAcknowledgementResponse
PacketAcknowledgement queries a stored packet acknowledgement hash.
PacketAcknowledgements
- QueryPacketAcknowledgementsRequest
- QueryPacketAcknowledgementsResponse
+ QueryPacketAcknowledgementsRequest
+ QueryPacketAcknowledgementsResponse
PacketAcknowledgements returns all the packet acknowledgements associated
with a channel.
UnreceivedPackets
- QueryUnreceivedPacketsRequest
- QueryUnreceivedPacketsResponse
+ QueryUnreceivedPacketsRequest
+ QueryUnreceivedPacketsResponse
UnreceivedPackets returns all the unreceived IBC packets associated with a
channel and sequences.
UnreceivedAcks
- QueryUnreceivedAcksRequest
- QueryUnreceivedAcksResponse
+ QueryUnreceivedAcksRequest
+ QueryUnreceivedAcksResponse
UnreceivedAcks returns all the unreceived IBC acknowledgements associated
with a channel and sequences.
NextSequenceReceive
- QueryNextSequenceReceiveRequest
- QueryNextSequenceReceiveResponse
+ QueryNextSequenceReceiveRequest
+ QueryNextSequenceReceiveResponse
NextSequenceReceive returns the next receive sequence for a given channel.
@@ -3563,12 +3563,12 @@ with a channel and sequences.
-
ibcgo/core/channel/v1/tx.proto Top
+
ibc/core/channel/v1/tx.proto Top
- MsgAcknowledgement
+ MsgAcknowledgement
MsgAcknowledgement receives incoming IBC acknowledgement
@@ -3580,7 +3580,7 @@ with a channel and sequences.
packet
- Packet
+ Packet
@@ -3601,7 +3601,7 @@ with a channel and sequences.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -3620,14 +3620,14 @@ with a channel and sequences.
- MsgAcknowledgementResponse
+ MsgAcknowledgementResponse
MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.
- MsgChannelCloseConfirm
+ MsgChannelCloseConfirm
MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
to acknowledge the change of channel state to CLOSED on Chain A.
@@ -3660,7 +3660,7 @@ with a channel and sequences.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -3679,14 +3679,14 @@ with a channel and sequences.
- MsgChannelCloseConfirmResponse
+ MsgChannelCloseConfirmResponse
MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response
type.
- MsgChannelCloseInit
+ MsgChannelCloseInit
MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
to close a channel with Chain B.
@@ -3724,14 +3724,14 @@ with a channel and sequences.
- MsgChannelCloseInitResponse
+ MsgChannelCloseInitResponse
MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
- MsgChannelOpenAck
+ MsgChannelOpenAck
MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
the change of channel state to TRYOPEN on Chain B.
@@ -3778,7 +3778,7 @@ with a channel and sequences.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -3797,14 +3797,14 @@ with a channel and sequences.
- MsgChannelOpenAckResponse
+ MsgChannelOpenAckResponse
MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.
- MsgChannelOpenConfirm
+ MsgChannelOpenConfirm
MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
acknowledge the change of channel state to OPEN on Chain A.
@@ -3837,7 +3837,7 @@ with a channel and sequences.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -3856,14 +3856,14 @@ with a channel and sequences.
- MsgChannelOpenConfirmResponse
+ MsgChannelOpenConfirmResponse
MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response
type.
- MsgChannelOpenInit
+ MsgChannelOpenInit
MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
is called by a relayer on Chain A.
@@ -3882,7 +3882,7 @@ with a channel and sequences.
channel
- Channel
+ Channel
@@ -3901,14 +3901,14 @@ with a channel and sequences.
- MsgChannelOpenInitResponse
+ MsgChannelOpenInitResponse
MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
- MsgChannelOpenTry
+ MsgChannelOpenTry
MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
on Chain B.
@@ -3935,7 +3935,7 @@ the channel identifier of the previous channel in state INIT
channel
- Channel
+ Channel
@@ -3956,7 +3956,7 @@ the channel identifier of the previous channel in state INIT
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -3975,14 +3975,14 @@ the channel identifier of the previous channel in state INIT
- MsgChannelOpenTryResponse
+ MsgChannelOpenTryResponse
MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.
- MsgRecvPacket
+ MsgRecvPacket
MsgRecvPacket receives incoming IBC packet
@@ -3994,7 +3994,7 @@ the channel identifier of the previous channel in state INIT
packet
- Packet
+ Packet
@@ -4008,7 +4008,7 @@ the channel identifier of the previous channel in state INIT
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -4027,14 +4027,14 @@ the channel identifier of the previous channel in state INIT
- MsgRecvPacketResponse
+ MsgRecvPacketResponse
MsgRecvPacketResponse defines the Msg/RecvPacket response type.
- MsgTimeout
+ MsgTimeout
MsgTimeout receives timed-out packet
@@ -4046,7 +4046,7 @@ the channel identifier of the previous channel in state INIT
packet
- Packet
+ Packet
@@ -4060,7 +4060,7 @@ the channel identifier of the previous channel in state INIT
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -4086,7 +4086,7 @@ the channel identifier of the previous channel in state INIT
- MsgTimeoutOnClose
+ MsgTimeoutOnClose
MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
@@ -4098,7 +4098,7 @@ the channel identifier of the previous channel in state INIT
packet
- Packet
+ Packet
@@ -4119,7 +4119,7 @@ the channel identifier of the previous channel in state INIT
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -4145,14 +4145,14 @@ the channel identifier of the previous channel in state INIT
- MsgTimeoutOnCloseResponse
+ MsgTimeoutOnCloseResponse
MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
- MsgTimeoutResponse
+ MsgTimeoutResponse
MsgTimeoutResponse defines the Msg/Timeout response type.
@@ -4165,7 +4165,7 @@ the channel identifier of the previous channel in state INIT
- Msg
+ Msg
Msg defines the ibc/channel Msg service.
@@ -4175,72 +4175,72 @@ the channel identifier of the previous channel in state INIT
ChannelOpenInit
- MsgChannelOpenInit
- MsgChannelOpenInitResponse
+ MsgChannelOpenInit
+ MsgChannelOpenInitResponse
ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit.
ChannelOpenTry
- MsgChannelOpenTry
- MsgChannelOpenTryResponse
+ MsgChannelOpenTry
+ MsgChannelOpenTryResponse
ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry.
ChannelOpenAck
- MsgChannelOpenAck
- MsgChannelOpenAckResponse
+ MsgChannelOpenAck
+ MsgChannelOpenAckResponse
ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck.
ChannelOpenConfirm
- MsgChannelOpenConfirm
- MsgChannelOpenConfirmResponse
+ MsgChannelOpenConfirm
+ MsgChannelOpenConfirmResponse
ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.
ChannelCloseInit
- MsgChannelCloseInit
- MsgChannelCloseInitResponse
+ MsgChannelCloseInit
+ MsgChannelCloseInitResponse
ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.
ChannelCloseConfirm
- MsgChannelCloseConfirm
- MsgChannelCloseConfirmResponse
+ MsgChannelCloseConfirm
+ MsgChannelCloseConfirmResponse
ChannelCloseConfirm defines a rpc handler method for
MsgChannelCloseConfirm.
RecvPacket
- MsgRecvPacket
- MsgRecvPacketResponse
+ MsgRecvPacket
+ MsgRecvPacketResponse
RecvPacket defines a rpc handler method for MsgRecvPacket.
Timeout
- MsgTimeout
- MsgTimeoutResponse
+ MsgTimeout
+ MsgTimeoutResponse
Timeout defines a rpc handler method for MsgTimeout.
TimeoutOnClose
- MsgTimeoutOnClose
- MsgTimeoutOnCloseResponse
+ MsgTimeoutOnClose
+ MsgTimeoutOnCloseResponse
TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose.
Acknowledgement
- MsgAcknowledgement
- MsgAcknowledgementResponse
+ MsgAcknowledgement
+ MsgAcknowledgementResponse
Acknowledgement defines a rpc handler method for MsgAcknowledgement.
@@ -4251,12 +4251,12 @@ MsgChannelCloseConfirm.
-
ibcgo/core/client/v1/genesis.proto Top
+
ibc/core/client/v1/genesis.proto Top
-
+
GenesisMetadata defines the genesis type for metadata that clients may return
with ExportMetadata
@@ -4287,7 +4287,7 @@ MsgChannelCloseConfirm.
- GenesisState
+ GenesisState
GenesisState defines the ibc client submodule's genesis state.
@@ -4299,28 +4299,28 @@ MsgChannelCloseConfirm.
clients
- IdentifiedClientState
+ IdentifiedClientState
repeated
client states with their corresponding identifiers
clients_consensus
- ClientConsensusStates
+ ClientConsensusStates
repeated
consensus states from each client
clients_metadata
- IdentifiedGenesisMetadata
+ IdentifiedGenesisMetadata
repeated
metadata from each client
params
- Params
+ Params
@@ -4346,7 +4346,7 @@ MsgChannelCloseConfirm.
-
+
IdentifiedGenesisMetadata has the client metadata with the corresponding
client id.
@@ -4365,7 +4365,7 @@ MsgChannelCloseConfirm.
client_metadata
- GenesisMetadata
+ GenesisMetadata
repeated
@@ -4386,19 +4386,19 @@ MsgChannelCloseConfirm.
-
ibcgo/core/client/v1/query.proto Top
+
ibc/core/client/v1/query.proto Top
- QueryClientParamsRequest
+ QueryClientParamsRequest
QueryClientParamsRequest is the request type for the Query/ClientParams RPC
method.
- QueryClientParamsResponse
+ QueryClientParamsResponse
QueryClientParamsResponse is the response type for the Query/ClientParams RPC
method.
@@ -4410,7 +4410,7 @@ MsgChannelCloseConfirm.
params
- Params
+ Params
params defines the parameters of the module.
@@ -4422,7 +4422,7 @@ MsgChannelCloseConfirm.
- QueryClientStateRequest
+ QueryClientStateRequest
QueryClientStateRequest is the request type for the Query/ClientState RPC
method
@@ -4446,7 +4446,7 @@ MsgChannelCloseConfirm.
- QueryClientStateResponse
+ QueryClientStateResponse
QueryClientStateResponse is the response type for the Query/ClientState RPC
method. Besides the client state, it includes a proof and the height from
which the proof was retrieved.
@@ -4472,7 +4472,7 @@ MsgChannelCloseConfirm.
proof_height
- Height
+ Height
height at which the proof was retrieved
@@ -4484,7 +4484,7 @@ MsgChannelCloseConfirm.
- QueryClientStatesRequest
+ QueryClientStatesRequest
QueryClientStatesRequest is the request type for the Query/ClientStates RPC
method
@@ -4508,7 +4508,7 @@ MsgChannelCloseConfirm.
- QueryClientStatesResponse
+ QueryClientStatesResponse
QueryClientStatesResponse is the response type for the Query/ClientStates RPC
method.
@@ -4520,7 +4520,7 @@ MsgChannelCloseConfirm.
client_states
- IdentifiedClientState
+ IdentifiedClientState
repeated
list of stored ClientStates of the chain.
@@ -4539,7 +4539,7 @@ MsgChannelCloseConfirm.
- QueryConsensusStateRequest
+ QueryConsensusStateRequest
QueryConsensusStateRequest is the request type for the Query/ConsensusState
RPC method. Besides the consensus state, it includes a proof and the height
from which the proof was retrieved.
@@ -4585,7 +4585,7 @@ ConsensusState
- QueryConsensusStateResponse
+ QueryConsensusStateResponse
QueryConsensusStateResponse is the response type for the Query/ConsensusState
RPC method
@@ -4611,7 +4611,7 @@ ConsensusState
proof_height
- Height
+ Height
height at which the proof was retrieved
@@ -4623,7 +4623,7 @@ ConsensusState
- QueryConsensusStatesRequest
+ QueryConsensusStatesRequest
QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
RPC method.
@@ -4654,7 +4654,7 @@ ConsensusState
- QueryConsensusStatesResponse
+ QueryConsensusStatesResponse
QueryConsensusStatesResponse is the response type for the
Query/ConsensusStates RPC method
@@ -4666,7 +4666,7 @@ ConsensusState
consensus_states
- ConsensusStateWithHeight
+ ConsensusStateWithHeight
repeated
consensus states associated with the identifier
@@ -4685,7 +4685,7 @@ ConsensusState
- QueryUpgradedClientStateRequest
+ QueryUpgradedClientStateRequest
QueryUpgradedClientStateRequest is the request type for the
Query/UpgradedClientState RPC method
@@ -4717,7 +4717,7 @@ as this is the height under which upgraded client state is stored
- QueryUpgradedClientStateResponse
+ QueryUpgradedClientStateResponse
QueryUpgradedClientStateResponse is the response type for the
Query/UpgradedClientState RPC method.
@@ -4747,7 +4747,7 @@ as this is the height under which upgraded client state is stored
- Query
+ Query
Query provides defines the gRPC querier service
@@ -4757,45 +4757,45 @@ as this is the height under which upgraded client state is stored
ClientState
- QueryClientStateRequest
- QueryClientStateResponse
+ QueryClientStateRequest
+ QueryClientStateResponse
ClientState queries an IBC light client.
ClientStates
- QueryClientStatesRequest
- QueryClientStatesResponse
+ QueryClientStatesRequest
+ QueryClientStatesResponse
ClientStates queries all the IBC light clients of a chain.
ConsensusState
- QueryConsensusStateRequest
- QueryConsensusStateResponse
+ QueryConsensusStateRequest
+ QueryConsensusStateResponse
ConsensusState queries a consensus state associated with a client state at
a given height.
ConsensusStates
- QueryConsensusStatesRequest
- QueryConsensusStatesResponse
+ QueryConsensusStatesRequest
+ QueryConsensusStatesResponse
ConsensusStates queries all the consensus state associated with a given
client.
ClientParams
- QueryClientParamsRequest
- QueryClientParamsResponse
+ QueryClientParamsRequest
+ QueryClientParamsResponse
ClientParams queries all parameters of the ibc client.
UpgradedClientState
- QueryUpgradedClientStateRequest
- QueryUpgradedClientStateResponse
+ QueryUpgradedClientStateRequest
+ QueryUpgradedClientStateResponse
UpgradedClientState queries an Upgraded IBC light client.
@@ -4884,12 +4884,12 @@ client.
-
ibcgo/core/client/v1/tx.proto Top
+
ibc/core/client/v1/tx.proto Top
- MsgCreateClient
+ MsgCreateClient
MsgCreateClient defines a message to create an IBC client
@@ -4928,14 +4928,14 @@ height.
- MsgCreateClientResponse
+ MsgCreateClientResponse
MsgCreateClientResponse defines the Msg/CreateClient response type.
- MsgSubmitMisbehaviour
+ MsgSubmitMisbehaviour
MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
light client misbehaviour.
@@ -4973,14 +4973,14 @@ height.
- MsgSubmitMisbehaviourResponse
+ MsgSubmitMisbehaviourResponse
MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response
type.
- MsgUpdateClient
+ MsgUpdateClient
MsgUpdateClient defines an sdk.Msg to update a IBC client state using
the given header.
@@ -5018,14 +5018,14 @@ height.
- MsgUpdateClientResponse
+ MsgUpdateClientResponse
MsgUpdateClientResponse defines the Msg/UpdateClient response type.
- MsgUpgradeClient
+ MsgUpgradeClient
MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
state
@@ -5085,7 +5085,7 @@ basis of trust in update logic
- MsgUpgradeClientResponse
+ MsgUpgradeClientResponse
MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.
@@ -5098,7 +5098,7 @@ basis of trust in update logic
- Msg
+ Msg
Msg defines the ibc/client Msg service.
@@ -5108,29 +5108,29 @@ basis of trust in update logic
CreateClient
- MsgCreateClient
- MsgCreateClientResponse
+ MsgCreateClient
+ MsgCreateClientResponse
CreateClient defines a rpc handler method for MsgCreateClient.
UpdateClient
- MsgUpdateClient
- MsgUpdateClientResponse
+ MsgUpdateClient
+ MsgUpdateClientResponse
UpdateClient defines a rpc handler method for MsgUpdateClient.
UpgradeClient
- MsgUpgradeClient
- MsgUpgradeClientResponse
+ MsgUpgradeClient
+ MsgUpgradeClientResponse
UpgradeClient defines a rpc handler method for MsgUpgradeClient.
SubmitMisbehaviour
- MsgSubmitMisbehaviour
- MsgSubmitMisbehaviourResponse
+ MsgSubmitMisbehaviour
+ MsgSubmitMisbehaviourResponse
SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.
@@ -5141,12 +5141,12 @@ basis of trust in update logic
-
ibcgo/core/commitment/v1/commitment.proto Top
+
ibc/core/commitment/v1/commitment.proto Top
- MerklePath
+ MerklePath
MerklePath is the path used to verify commitment proofs, which can be an
arbitrary structured object (defined by a commitment type).
MerklePath is represented from root-to-leaf
@@ -5170,7 +5170,7 @@ basis of trust in update logic
- MerklePrefix
+ MerklePrefix
MerklePrefix is merkle path prefixed to the key.
The constructed key from the Path and the key will be append(Path.KeyPath,
append(Path.KeyPrefix, key...))
@@ -5194,7 +5194,7 @@ basis of trust in update logic
- MerkleProof
+ MerkleProof
MerkleProof is a wrapper type over a chain of CommitmentProofs.
It demonstrates membership or non-membership for an element or set of
elements, verifiable in conjunction with a known commitment root. Proofs
should be succinct.
MerkleProofs are ordered from leaf-to-root
@@ -5218,7 +5218,7 @@ basis of trust in update logic
- MerkleRoot
+ MerkleRoot
MerkleRoot defines a merkle root hash.
In the Cosmos SDK, the AppHash of a block header becomes the root.
@@ -5251,12 +5251,12 @@ basis of trust in update logic
-
ibcgo/core/connection/v1/connection.proto Top
+
ibc/core/connection/v1/connection.proto Top
- ClientPaths
+ ClientPaths
ClientPaths define all the connection paths for a client state.
@@ -5280,7 +5280,7 @@ basis of trust in update logic
- ConnectionEnd
+ ConnectionEnd
ConnectionEnd defines a stateful object on a chain connected to another
separate one.
NOTE: there must only be 2 defined ConnectionEnds to establish
a connection between two chains.
@@ -5299,7 +5299,7 @@ basis of trust in update logic
versions
- Version
+ Version
repeated
IBC version which can be utilised to determine encodings or protocols for
channels or packets utilising this connection.
@@ -5307,14 +5307,14 @@ channels or packets utilising this connection.
state
- State
+ State
current state of the connection end.
counterparty
- Counterparty
+ Counterparty
counterparty chain associated with this connection.
@@ -5335,7 +5335,7 @@ clients.
- ConnectionPaths
+ ConnectionPaths
ConnectionPaths define all the connection paths for a given client state.
@@ -5366,7 +5366,7 @@ clients.
- Counterparty
+ Counterparty
Counterparty defines the counterparty chain associated with a connection end.
@@ -5394,7 +5394,7 @@ given connection.
prefix
- ibcgo.core.commitment.v1.MerklePrefix
+ ibc.core.commitment.v1.MerklePrefix
commitment merkle prefix of the counterparty chain.
@@ -5406,7 +5406,7 @@ given connection.
- IdentifiedConnection
+ IdentifiedConnection
IdentifiedConnection defines a connection with additional connection
identifier field.
@@ -5432,7 +5432,7 @@ given connection.
versions
- Version
+ Version
repeated
IBC version which can be utilised to determine encodings or protocols for
channels or packets utilising this connection
@@ -5440,14 +5440,14 @@ channels or packets utilising this connection
state
- State
+ State
current state of the connection end.
counterparty
- Counterparty
+ Counterparty
counterparty chain associated with this connection.
@@ -5466,7 +5466,7 @@ channels or packets utilising this connection
- Version
+ Version
Version defines the versioning scheme used to negotiate the IBC verison in
the connection handshake.
@@ -5499,7 +5499,7 @@ channels or packets utilising this connection
- State
+ State
State defines if a connection is in one of the following states:
INIT, TRYOPEN, OPEN or UNINITIALIZED.
@@ -5542,12 +5542,12 @@ chain.
-
ibcgo/core/connection/v1/genesis.proto Top
+
ibc/core/connection/v1/genesis.proto Top
- GenesisState
+ GenesisState
GenesisState defines the ibc connection submodule's genesis state.
@@ -5559,14 +5559,14 @@ chain.
connections
- IdentifiedConnection
+ IdentifiedConnection
repeated
client_connection_paths
- ConnectionPaths
+ ConnectionPaths
repeated
@@ -5594,12 +5594,12 @@ chain.
-
ibcgo/core/connection/v1/query.proto Top
+
ibc/core/connection/v1/query.proto Top
- QueryClientConnectionsRequest
+ QueryClientConnectionsRequest
QueryClientConnectionsRequest is the request type for the
Query/ClientConnections RPC method
@@ -5623,7 +5623,7 @@ chain.
- QueryClientConnectionsResponse
+ QueryClientConnectionsResponse
QueryClientConnectionsResponse is the response type for the
Query/ClientConnections RPC method
@@ -5649,7 +5649,7 @@ chain.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was generated
@@ -5661,7 +5661,7 @@ chain.
- QueryConnectionClientStateRequest
+ QueryConnectionClientStateRequest
QueryConnectionClientStateRequest is the request type for the
Query/ConnectionClientState RPC method
@@ -5685,7 +5685,7 @@ chain.
- QueryConnectionClientStateResponse
+ QueryConnectionClientStateResponse
QueryConnectionClientStateResponse is the response type for the
Query/ConnectionClientState RPC method
@@ -5697,7 +5697,7 @@ chain.
identified_client_state
- ibcgo.core.client.v1.IdentifiedClientState
+ ibc.core.client.v1.IdentifiedClientState
client state associated with the channel
@@ -5711,7 +5711,7 @@ chain.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -5723,7 +5723,7 @@ chain.
- QueryConnectionConsensusStateRequest
+ QueryConnectionConsensusStateRequest
QueryConnectionConsensusStateRequest is the request type for the
Query/ConnectionConsensusState RPC method
@@ -5761,7 +5761,7 @@ chain.
- QueryConnectionConsensusStateResponse
+ QueryConnectionConsensusStateResponse
QueryConnectionConsensusStateResponse is the response type for the
Query/ConnectionConsensusState RPC method
@@ -5794,7 +5794,7 @@ chain.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -5806,7 +5806,7 @@ chain.
- QueryConnectionRequest
+ QueryConnectionRequest
QueryConnectionRequest is the request type for the Query/Connection RPC
method
@@ -5830,7 +5830,7 @@ chain.
- QueryConnectionResponse
+ QueryConnectionResponse
QueryConnectionResponse is the response type for the Query/Connection RPC
method. Besides the connection end, it includes a proof and the height from
which the proof was retrieved.
@@ -5842,7 +5842,7 @@ chain.
connection
- ConnectionEnd
+ ConnectionEnd
connection associated with the request identifier
@@ -5856,7 +5856,7 @@ chain.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
height at which the proof was retrieved
@@ -5868,7 +5868,7 @@ chain.
- QueryConnectionsRequest
+ QueryConnectionsRequest
QueryConnectionsRequest is the request type for the Query/Connections RPC
method
@@ -5892,7 +5892,7 @@ chain.
- QueryConnectionsResponse
+ QueryConnectionsResponse
QueryConnectionsResponse is the response type for the Query/Connections RPC
method.
@@ -5904,7 +5904,7 @@ chain.
connections
- IdentifiedConnection
+ IdentifiedConnection
repeated
list of stored connections of the chain.
@@ -5918,7 +5918,7 @@ chain.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
query block height
@@ -5936,7 +5936,7 @@ chain.
- Query
+ Query
Query provides defines the gRPC querier service
@@ -5946,38 +5946,38 @@ chain.
Connection
- QueryConnectionRequest
- QueryConnectionResponse
+ QueryConnectionRequest
+ QueryConnectionResponse
Connection queries an IBC connection end.
Connections
- QueryConnectionsRequest
- QueryConnectionsResponse
+ QueryConnectionsRequest
+ QueryConnectionsResponse
Connections queries all the IBC connections of a chain.
ClientConnections
- QueryClientConnectionsRequest
- QueryClientConnectionsResponse
+ QueryClientConnectionsRequest
+ QueryClientConnectionsResponse
ClientConnections queries the connection paths associated with a client
state.
ConnectionClientState
- QueryConnectionClientStateRequest
- QueryConnectionClientStateResponse
+ QueryConnectionClientStateRequest
+ QueryConnectionClientStateResponse
ConnectionClientState queries the client state associated with the
connection.
ConnectionConsensusState
- QueryConnectionConsensusStateRequest
- QueryConnectionConsensusStateResponse
+ QueryConnectionConsensusStateRequest
+ QueryConnectionConsensusStateResponse
ConnectionConsensusState queries the consensus state associated with the
connection.
@@ -6057,12 +6057,12 @@ connection.
-
ibcgo/core/connection/v1/tx.proto Top
+
ibc/core/connection/v1/tx.proto Top
- MsgConnectionOpenAck
+ MsgConnectionOpenAck
MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
acknowledge the change of connection state to TRYOPEN on Chain B.
@@ -6088,7 +6088,7 @@ connection.
version
- Version
+ Version
@@ -6102,7 +6102,7 @@ connection.
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -6131,7 +6131,7 @@ TRYOPEN`
consensus_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -6150,14 +6150,14 @@ TRYOPEN`
- MsgConnectionOpenAckResponse
+ MsgConnectionOpenAckResponse
MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.
- MsgConnectionOpenConfirm
+ MsgConnectionOpenConfirm
MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
acknowledge the change of connection state to OPEN on Chain A.
@@ -6183,7 +6183,7 @@ TRYOPEN`
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -6202,14 +6202,14 @@ TRYOPEN`
- MsgConnectionOpenConfirmResponse
+ MsgConnectionOpenConfirmResponse
MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm
response type.
- MsgConnectionOpenInit
+ MsgConnectionOpenInit
MsgConnectionOpenInit defines the msg sent by an account on Chain A to
initialize a connection with Chain B.
@@ -6228,14 +6228,14 @@ TRYOPEN`
counterparty
- Counterparty
+ Counterparty
version
- Version
+ Version
@@ -6261,14 +6261,14 @@ TRYOPEN`
- MsgConnectionOpenInitResponse
+ MsgConnectionOpenInitResponse
MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
type.
- MsgConnectionOpenTry
+ MsgConnectionOpenTry
MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
connection on Chain B.
@@ -6302,7 +6302,7 @@ the connection identifier of the previous connection in state INIT
counterparty
- Counterparty
+ Counterparty
@@ -6316,14 +6316,14 @@ the connection identifier of the previous connection in state INIT
counterparty_versions
- Version
+ Version
repeated
proof_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -6352,7 +6352,7 @@ INIT`
consensus_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -6371,7 +6371,7 @@ INIT`
- MsgConnectionOpenTryResponse
+ MsgConnectionOpenTryResponse
MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.
@@ -6384,7 +6384,7 @@ INIT`
- Msg
+ Msg
Msg defines the ibc/connection Msg service.
@@ -6394,29 +6394,29 @@ INIT`
ConnectionOpenInit
- MsgConnectionOpenInit
- MsgConnectionOpenInitResponse
+ MsgConnectionOpenInit
+ MsgConnectionOpenInitResponse
ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.
ConnectionOpenTry
- MsgConnectionOpenTry
- MsgConnectionOpenTryResponse
+ MsgConnectionOpenTry
+ MsgConnectionOpenTryResponse
ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.
ConnectionOpenAck
- MsgConnectionOpenAck
- MsgConnectionOpenAckResponse
+ MsgConnectionOpenAck
+ MsgConnectionOpenAckResponse
ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.
ConnectionOpenConfirm
- MsgConnectionOpenConfirm
- MsgConnectionOpenConfirmResponse
+ MsgConnectionOpenConfirm
+ MsgConnectionOpenConfirmResponse
ConnectionOpenConfirm defines a rpc handler method for
MsgConnectionOpenConfirm.
@@ -6428,12 +6428,12 @@ MsgConnectionOpenConfirm.
-
ibcgo/core/types/v1/genesis.proto Top
+
ibc/core/types/v1/genesis.proto Top
- GenesisState
+ GenesisState
GenesisState defines the ibc module's genesis state.
@@ -6445,21 +6445,21 @@ MsgConnectionOpenConfirm.
client_genesis
- ibcgo.core.client.v1.GenesisState
+ ibc.core.client.v1.GenesisState
ICS002 - Clients genesis state
connection_genesis
- ibcgo.core.connection.v1.GenesisState
+ ibc.core.connection.v1.GenesisState
ICS003 - Connections genesis state
channel_genesis
- ibcgo.core.channel.v1.GenesisState
+ ibc.core.channel.v1.GenesisState
ICS004 - Channel genesis state
@@ -6480,12 +6480,12 @@ MsgConnectionOpenConfirm.
-
ibcgo/lightclients/localhost/v1/localhost.proto Top
+
ibc/lightclients/localhost/v1/localhost.proto Top
- ClientState
+ ClientState
ClientState defines a loopback (localhost) client. It requires (read-only)
access to keys outside the client prefix.
@@ -6504,7 +6504,7 @@ MsgConnectionOpenConfirm.
height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
self latest block height
@@ -6525,12 +6525,12 @@ MsgConnectionOpenConfirm.
-
ibcgo/lightclients/solomachine/v1/solomachine.proto Top
+
ibc/lightclients/solomachine/v1/solomachine.proto Top
- ChannelStateData
+ ChannelStateData
ChannelStateData returns the SignBytes data for channel state
verification.
@@ -6549,7 +6549,7 @@ MsgConnectionOpenConfirm.
channel
- ibcgo.core.channel.v1.Channel
+ ibc.core.channel.v1.Channel
@@ -6561,7 +6561,7 @@ MsgConnectionOpenConfirm.
- ClientState
+ ClientState
ClientState defines a solo machine client that tracks the current consensus
state and if the client is frozen.
@@ -6587,7 +6587,7 @@ MsgConnectionOpenConfirm.
consensus_state
- ConsensusState
+ ConsensusState
@@ -6607,7 +6607,7 @@ The client will be unfrozen if it is frozen.
- ClientStateData
+ ClientStateData
ClientStateData returns the SignBytes data for client state verification.
@@ -6638,7 +6638,7 @@ The client will be unfrozen if it is frozen.
- ConnectionStateData
+ ConnectionStateData
ConnectionStateData returns the SignBytes data for connection state
verification.
@@ -6657,7 +6657,7 @@ The client will be unfrozen if it is frozen.
connection
- ibcgo.core.connection.v1.ConnectionEnd
+ ibc.core.connection.v1.ConnectionEnd
@@ -6669,7 +6669,7 @@ The client will be unfrozen if it is frozen.
- ConsensusState
+ ConsensusState
ConsensusState defines a solo machine consensus state. The sequence of a
consensus state is contained in the "height" key used in storing the
consensus state.
@@ -6709,7 +6709,7 @@ misbehaviour.
- ConsensusStateData
+ ConsensusStateData
ConsensusStateData returns the SignBytes data for consensus state
verification.
@@ -6740,7 +6740,7 @@ misbehaviour.
-
+
Header defines a solo machine consensus header
@@ -6792,7 +6792,7 @@ misbehaviour.
-
+
HeaderData returns the SignBytes data for update verification.
@@ -6823,7 +6823,7 @@ misbehaviour.
- Misbehaviour
+ Misbehaviour
Misbehaviour defines misbehaviour for a solo machine which consists
of a sequence and two signatures over different messages at that sequence.
@@ -6849,14 +6849,14 @@ misbehaviour.
signature_one
- SignatureAndData
+ SignatureAndData
signature_two
- SignatureAndData
+ SignatureAndData
@@ -6868,7 +6868,7 @@ misbehaviour.
- NextSequenceRecvData
+ NextSequenceRecvData
NextSequenceRecvData returns the SignBytes data for verification of the next
sequence to be received.
@@ -6899,7 +6899,7 @@ misbehaviour.
- PacketAcknowledgementData
+ PacketAcknowledgementData
PacketAcknowledgementData returns the SignBytes data for acknowledgement
verification.
@@ -6930,7 +6930,7 @@ misbehaviour.
- PacketCommitmentData
+ PacketCommitmentData
PacketCommitmentData returns the SignBytes data for packet commitment
verification.
@@ -6961,7 +6961,7 @@ misbehaviour.
- PacketReceiptAbsenceData
+ PacketReceiptAbsenceData
PacketReceiptAbsenceData returns the SignBytes data for
packet receipt absence verification.
@@ -6985,7 +6985,7 @@ misbehaviour.
- SignBytes
+ SignBytes
SignBytes defines the signed bytes used for signature verification.
@@ -7018,7 +7018,7 @@ misbehaviour.
data_type
- DataType
+ DataType
type of the data used
@@ -7037,7 +7037,7 @@ misbehaviour.
- SignatureAndData
+ SignatureAndData
SignatureAndData contains a signature and the data signed over to create that
signature.
@@ -7056,7 +7056,7 @@ misbehaviour.
data_type
- DataType
+ DataType
@@ -7082,7 +7082,7 @@ misbehaviour.
- TimestampedSignatureData
+ TimestampedSignatureData
TimestampedSignatureData contains the signature data and the timestamp of the
signature.
@@ -7115,7 +7115,7 @@ misbehaviour.
- DataType
+ DataType
DataType defines the type of solo machine proof being created. This is done
to preserve uniqueness of different data sign byte encodings.
@@ -7193,12 +7193,12 @@ misbehaviour.
-
ibcgo/lightclients/tendermint/v1/tendermint.proto Top
+
ibc/lightclients/tendermint/v1/tendermint.proto Top
- ClientState
+ ClientState
ClientState from Tendermint tracks the current validator set, latest height,
and a possible frozen height.
@@ -7217,7 +7217,7 @@ misbehaviour.
trust_level
- Fraction
+ Fraction
@@ -7246,14 +7246,14 @@ submitted headers are valid for upgrade
frozen_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
Block height when the client was frozen due to a misbehaviour
latest_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
Latest height the client was updated to
@@ -7301,7 +7301,7 @@ whose chain has experienced a misbehaviour event
- ConsensusState
+ ConsensusState
ConsensusState defines the consensus state from Tendermint.
@@ -7321,7 +7321,7 @@ was stored.
root
- ibcgo.core.commitment.v1.MerkleRoot
+ ibc.core.commitment.v1.MerkleRoot
commitment root (i.e app hash)
@@ -7340,7 +7340,7 @@ was stored.
- Fraction
+ Fraction
Fraction defines the protobuf message type for tmmath.Fraction that only
supports positive values.
@@ -7371,7 +7371,7 @@ was stored.
-
+
Header defines the Tendermint client consensus Header.
It encapsulates all the information necessary to update from a trusted
Tendermint ConsensusState. The inclusion of TrustedHeight and
TrustedValidators allows this update to process correctly, so long as the
ConsensusState for the TrustedHeight exists, this removes race conditions
among relayers The SignedHeader and ValidatorSet are the new untrusted update
fields for the client. The TrustedHeight is the height of a stored
ConsensusState on the client that will be used to verify the new untrusted
header. The Trusted ConsensusState must be within the unbonding period of
current time in order to correctly verify, and the TrustedValidators must
hash to TrustedConsensusState.NextValidatorsHash since that is the last
trusted validator set at the TrustedHeight.
@@ -7397,7 +7397,7 @@ was stored.
trusted_height
- ibcgo.core.client.v1.Height
+ ibc.core.client.v1.Height
@@ -7416,7 +7416,7 @@ was stored.
- Misbehaviour
+ Misbehaviour
Misbehaviour is a wrapper over two conflicting Headers
that implements Misbehaviour interface expected by ICS-02
@@ -7435,14 +7435,14 @@ was stored.
header_1
- Header
+ Header
header_2
- Header
+ Header
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 72ae695d..93afc79e 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -90,7 +90,4 @@ REST routes are not supported for these proposals.
## Proto file changes
-The protobuf files have change package naming.
-The new package naming begins with `ibcgo` instead of `ibc`.
-
The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
diff --git a/modules/apps/transfer/types/genesis.pb.go b/modules/apps/transfer/types/genesis.pb.go
index f06c2fb8..b0a9942e 100644
--- a/modules/apps/transfer/types/genesis.pb.go
+++ b/modules/apps/transfer/types/genesis.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/apps/transfer/v1/genesis.proto
+// source: ibc/apps/transfer/v1/genesis.proto
package types
@@ -34,7 +34,7 @@ func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_19e19f3d07c11479, []int{0}
+ return fileDescriptor_33776620329d43dd, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -85,35 +85,35 @@ func (m *GenesisState) GetParams() Params {
}
func init() {
- proto.RegisterType((*GenesisState)(nil), "ibcgo.apps.transfer.v1.GenesisState")
+ proto.RegisterType((*GenesisState)(nil), "ibc.apps.transfer.v1.GenesisState")
}
func init() {
- proto.RegisterFile("ibcgo/apps/transfer/v1/genesis.proto", fileDescriptor_19e19f3d07c11479)
+ proto.RegisterFile("ibc/apps/transfer/v1/genesis.proto", fileDescriptor_33776620329d43dd)
}
-var fileDescriptor_19e19f3d07c11479 = []byte{
- // 315 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xc9, 0x4c, 0x4a, 0x4e,
- 0xcf, 0xd7, 0x4f, 0x2c, 0x28, 0x28, 0xd6, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2,
- 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
- 0x17, 0x12, 0x03, 0xab, 0xd2, 0x03, 0xa9, 0xd2, 0x83, 0xa9, 0xd2, 0x2b, 0x33, 0x94, 0x52, 0xc5,
- 0xa1, 0x1b, 0xae, 0x06, 0xac, 0x5d, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1,
- 0x20, 0xa2, 0x4a, 0xcf, 0x19, 0xb9, 0x78, 0xdc, 0x21, 0xd6, 0x04, 0x97, 0x24, 0x96, 0xa4, 0x0a,
- 0x69, 0x73, 0xb1, 0x17, 0xe4, 0x17, 0x95, 0xc4, 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70,
- 0x3a, 0x09, 0x7d, 0xba, 0x27, 0xcf, 0x57, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0x95, 0x50, 0x0a,
- 0x62, 0x03, 0xb1, 0x3c, 0x53, 0x84, 0x72, 0xb8, 0x78, 0x52, 0x52, 0xf3, 0xf2, 0x73, 0xe3, 0x4b,
- 0x8a, 0x12, 0x93, 0x53, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0x94, 0xf4, 0xb0, 0xbb,
- 0x54, 0xcf, 0x05, 0xa4, 0x36, 0x04, 0xa4, 0xd4, 0x49, 0xf5, 0xc4, 0x3d, 0x79, 0x86, 0x4f, 0xf7,
- 0xe4, 0x85, 0x21, 0x26, 0x23, 0x9b, 0xa2, 0xb4, 0xea, 0xbe, 0x3c, 0x1b, 0x58, 0x55, 0x71, 0x10,
- 0x77, 0x0a, 0x5c, 0x4b, 0xb1, 0x90, 0x0d, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04,
- 0xb3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x1c, 0x2e, 0x7b, 0x02, 0xc0, 0xaa, 0x9c, 0x58, 0x40, 0x76,
- 0x04, 0x41, 0xf5, 0x38, 0xf9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
- 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94,
- 0x49, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e,
- 0x7e, 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x6e, 0x7a, 0xbe, 0x7e, 0x6e, 0x7e, 0x4a, 0x69, 0x4e, 0x6a,
- 0x31, 0x5a, 0xd8, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0x03, 0xd0, 0x18, 0x10, 0x00,
- 0x00, 0xff, 0xff, 0xfe, 0xa6, 0xd7, 0x5d, 0xbd, 0x01, 0x00, 0x00,
+var fileDescriptor_33776620329d43dd = []byte{
+ // 314 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xca, 0x4c, 0x4a, 0xd6,
+ 0x4f, 0x2c, 0x28, 0x28, 0xd6, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33,
+ 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12,
+ 0xc9, 0x4c, 0x4a, 0xd6, 0x03, 0xa9, 0xd1, 0x83, 0xa9, 0xd1, 0x2b, 0x33, 0x94, 0x52, 0xc6, 0xaa,
+ 0x13, 0xae, 0x02, 0xac, 0x55, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, 0x20,
+ 0xa2, 0x4a, 0x8f, 0x19, 0xb9, 0x78, 0xdc, 0x21, 0x56, 0x04, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x69,
+ 0x73, 0xb1, 0x17, 0xe4, 0x17, 0x95, 0xc4, 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x3a,
+ 0x09, 0x7d, 0xba, 0x27, 0xcf, 0x57, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0x95, 0x50, 0x0a, 0x62,
+ 0x03, 0xb1, 0x3c, 0x53, 0x84, 0xb2, 0xb8, 0x78, 0x52, 0x52, 0xf3, 0xf2, 0x73, 0xe3, 0x4b, 0x8a,
+ 0x12, 0x93, 0x53, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0x14, 0xf4, 0xb0, 0xb9, 0x52,
+ 0xcf, 0x05, 0xa4, 0x32, 0x04, 0xa4, 0xd0, 0x49, 0xf5, 0xc4, 0x3d, 0x79, 0x86, 0x4f, 0xf7, 0xe4,
+ 0x85, 0x21, 0xe6, 0x22, 0x9b, 0xa1, 0xb4, 0xea, 0xbe, 0x3c, 0x1b, 0x58, 0x55, 0x71, 0x10, 0x77,
+ 0x0a, 0x5c, 0x4b, 0xb1, 0x90, 0x15, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04, 0xb3,
+ 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x0c, 0x76, 0x5b, 0x02, 0xc0, 0x6a, 0x9c, 0x58, 0x40, 0x36, 0x04,
+ 0x41, 0x75, 0x38, 0xf9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72,
+ 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x49,
+ 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e, 0x7e,
+ 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x6e, 0x7a, 0xbe, 0x7e, 0x6e, 0x7e, 0x4a, 0x69, 0x4e, 0x6a, 0x31,
+ 0x5a, 0xb8, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0x03, 0xcf, 0x18, 0x10, 0x00, 0x00,
+ 0xff, 0xff, 0x48, 0x6d, 0x4a, 0x3d, 0xb3, 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/apps/transfer/types/query.pb.go b/modules/apps/transfer/types/query.pb.go
index bf77c5e7..2b99d429 100644
--- a/modules/apps/transfer/types/query.pb.go
+++ b/modules/apps/transfer/types/query.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/apps/transfer/v1/query.proto
+// source: ibc/apps/transfer/v1/query.proto
package types
@@ -41,7 +41,7 @@ func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{}
func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTraceRequest) ProtoMessage() {}
func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_956e6703e65895ef, []int{0}
+ return fileDescriptor_8bfda59865efaa24, []int{0}
}
func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -88,7 +88,7 @@ func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse
func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTraceResponse) ProtoMessage() {}
func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_956e6703e65895ef, []int{1}
+ return fileDescriptor_8bfda59865efaa24, []int{1}
}
func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -135,7 +135,7 @@ func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest
func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTracesRequest) ProtoMessage() {}
func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_956e6703e65895ef, []int{2}
+ return fileDescriptor_8bfda59865efaa24, []int{2}
}
func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -184,7 +184,7 @@ func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesRespon
func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTracesResponse) ProtoMessage() {}
func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_956e6703e65895ef, []int{3}
+ return fileDescriptor_8bfda59865efaa24, []int{3}
}
func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -235,7 +235,7 @@ func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} }
func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryParamsRequest) ProtoMessage() {}
func (*QueryParamsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_956e6703e65895ef, []int{4}
+ return fileDescriptor_8bfda59865efaa24, []int{4}
}
func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -274,7 +274,7 @@ func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} }
func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryParamsResponse) ProtoMessage() {}
func (*QueryParamsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_956e6703e65895ef, []int{5}
+ return fileDescriptor_8bfda59865efaa24, []int{5}
}
func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -311,53 +311,51 @@ func (m *QueryParamsResponse) GetParams() *Params {
}
func init() {
- proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibcgo.apps.transfer.v1.QueryDenomTraceRequest")
- proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibcgo.apps.transfer.v1.QueryDenomTraceResponse")
- proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibcgo.apps.transfer.v1.QueryDenomTracesRequest")
- proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibcgo.apps.transfer.v1.QueryDenomTracesResponse")
- proto.RegisterType((*QueryParamsRequest)(nil), "ibcgo.apps.transfer.v1.QueryParamsRequest")
- proto.RegisterType((*QueryParamsResponse)(nil), "ibcgo.apps.transfer.v1.QueryParamsResponse")
-}
-
-func init() {
- proto.RegisterFile("ibcgo/apps/transfer/v1/query.proto", fileDescriptor_956e6703e65895ef)
-}
-
-var fileDescriptor_956e6703e65895ef = []byte{
- // 525 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4d, 0x6f, 0xd3, 0x40,
- 0x10, 0xcd, 0xb6, 0x10, 0x89, 0x09, 0xe2, 0xb0, 0x54, 0x25, 0xb2, 0x2a, 0xb7, 0xb2, 0xca, 0x57,
- 0x0a, 0xbb, 0xb8, 0x20, 0x7e, 0x40, 0x41, 0x70, 0x02, 0x95, 0x08, 0x2e, 0x1c, 0x40, 0x6b, 0x67,
- 0x71, 0x2c, 0xd5, 0x5e, 0xd7, 0xbb, 0x89, 0x54, 0x21, 0x2e, 0x5c, 0xb8, 0x22, 0x71, 0xe5, 0xc0,
- 0x99, 0x9f, 0xc0, 0x2f, 0xe8, 0xb1, 0x52, 0x2f, 0x9c, 0x00, 0x25, 0xfc, 0x10, 0xe4, 0xdd, 0x75,
- 0xe3, 0x90, 0xa4, 0xf1, 0x6d, 0x35, 0x7e, 0xf3, 0xe6, 0xbd, 0x37, 0x23, 0x83, 0x17, 0x07, 0x61,
- 0x24, 0x28, 0xcb, 0x32, 0x49, 0x55, 0xce, 0x52, 0xf9, 0x8e, 0xe7, 0x74, 0xe8, 0xd3, 0xc3, 0x01,
- 0xcf, 0x8f, 0x48, 0x96, 0x0b, 0x25, 0xf0, 0xba, 0xc6, 0x90, 0x02, 0x43, 0x4a, 0x0c, 0x19, 0xfa,
- 0xce, 0x5a, 0x24, 0x22, 0xa1, 0x21, 0xb4, 0x78, 0x19, 0xb4, 0xd3, 0x09, 0x85, 0x4c, 0x84, 0xa4,
- 0x01, 0x93, 0xdc, 0xd0, 0xd0, 0xa1, 0x1f, 0x70, 0xc5, 0x7c, 0x9a, 0xb1, 0x28, 0x4e, 0x99, 0x8a,
- 0x45, 0x6a, 0xb1, 0xd7, 0x17, 0x4c, 0x3f, 0x9b, 0x62, 0x60, 0x1b, 0x91, 0x10, 0xd1, 0x01, 0xa7,
- 0x2c, 0x8b, 0x29, 0x4b, 0x53, 0xa1, 0x34, 0x87, 0x34, 0x5f, 0xbd, 0x3b, 0xb0, 0xfe, 0xa2, 0x18,
- 0xf3, 0x98, 0xa7, 0x22, 0x79, 0x99, 0xb3, 0x90, 0x77, 0xf9, 0xe1, 0x80, 0x4b, 0x85, 0x31, 0x5c,
- 0xe8, 0x33, 0xd9, 0x6f, 0xa3, 0x2d, 0x74, 0xeb, 0x52, 0x57, 0xbf, 0xbd, 0x37, 0x70, 0x6d, 0x06,
- 0x2d, 0x33, 0x91, 0x4a, 0x8e, 0x1f, 0x41, 0xab, 0x57, 0x54, 0xdf, 0xaa, 0xa2, 0xac, 0xbb, 0x5a,
- 0xbb, 0x1e, 0x99, 0xef, 0x9e, 0x54, 0x08, 0xa0, 0x77, 0xf6, 0xf6, 0xd8, 0x0c, 0xbf, 0x2c, 0xe5,
- 0x3c, 0x01, 0x98, 0x24, 0x60, 0xe9, 0x6f, 0x10, 0x13, 0x17, 0x29, 0xe2, 0x22, 0x26, 0x75, 0x1b,
- 0x17, 0xd9, 0x67, 0x51, 0x69, 0xa5, 0x5b, 0xe9, 0xf4, 0x7e, 0x20, 0x68, 0xcf, 0xce, 0xb0, 0x26,
- 0x5e, 0xc1, 0xe5, 0x8a, 0x09, 0xd9, 0x46, 0x5b, 0xab, 0xf5, 0x5c, 0xec, 0x5d, 0x39, 0xfe, 0xb5,
- 0xd9, 0xf8, 0xfe, 0x7b, 0xb3, 0x69, 0x19, 0x5b, 0x13, 0x57, 0x12, 0x3f, 0x9d, 0xd2, 0xbe, 0xa2,
- 0xb5, 0xdf, 0x5c, 0xaa, 0xdd, 0x68, 0x9a, 0x12, 0xbf, 0x06, 0x58, 0x6b, 0xdf, 0x67, 0x39, 0x4b,
- 0xca, 0x68, 0xbc, 0x67, 0x70, 0x75, 0xaa, 0x6a, 0xcd, 0x3c, 0x84, 0x66, 0xa6, 0x2b, 0x36, 0x2d,
- 0x77, 0x91, 0x0d, 0xdb, 0x67, 0xd1, 0xbb, 0xa7, 0xab, 0x70, 0x51, 0xf3, 0xe1, 0x6f, 0x08, 0x60,
- 0xe2, 0x11, 0x93, 0x45, 0x04, 0xf3, 0x2f, 0xc8, 0xa1, 0xb5, 0xf1, 0x46, 0xb1, 0xe7, 0x7f, 0x3c,
- 0xfd, 0xfb, 0x65, 0x65, 0x07, 0xdf, 0xa6, 0x71, 0x10, 0xce, 0x1e, 0x76, 0x75, 0x35, 0xf4, 0x7d,
- 0x71, 0x90, 0x1f, 0xf0, 0x57, 0x04, 0xad, 0xca, 0x26, 0x71, 0xdd, 0x99, 0x65, 0x78, 0xce, 0xbd,
- 0xfa, 0x0d, 0x56, 0x65, 0x47, 0xab, 0xdc, 0xc6, 0xde, 0x72, 0x95, 0xf8, 0x13, 0x82, 0xa6, 0x89,
- 0x17, 0x77, 0xce, 0x1d, 0x34, 0xb5, 0x51, 0x67, 0xa7, 0x16, 0xd6, 0xea, 0xd9, 0xd6, 0x7a, 0x5c,
- 0xbc, 0x31, 0x5f, 0x8f, 0xd9, 0xea, 0xde, 0xf3, 0xe3, 0x91, 0x8b, 0x4e, 0x46, 0x2e, 0xfa, 0x33,
- 0x72, 0xd1, 0xe7, 0xb1, 0xdb, 0x38, 0x19, 0xbb, 0x8d, 0x9f, 0x63, 0xb7, 0xf1, 0xfa, 0x41, 0x14,
- 0xab, 0xfe, 0x20, 0x20, 0xa1, 0x48, 0xa8, 0xfd, 0xfd, 0xc4, 0x41, 0x78, 0x37, 0x12, 0x34, 0x11,
- 0xbd, 0xc1, 0x01, 0x97, 0xff, 0x71, 0xaa, 0xa3, 0x8c, 0xcb, 0xa0, 0xa9, 0xff, 0x1f, 0xf7, 0xff,
- 0x05, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x96, 0x10, 0x04, 0x05, 0x00, 0x00,
+ proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibc.apps.transfer.v1.QueryDenomTraceRequest")
+ proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibc.apps.transfer.v1.QueryDenomTraceResponse")
+ proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibc.apps.transfer.v1.QueryDenomTracesRequest")
+ proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibc.apps.transfer.v1.QueryDenomTracesResponse")
+ proto.RegisterType((*QueryParamsRequest)(nil), "ibc.apps.transfer.v1.QueryParamsRequest")
+ proto.RegisterType((*QueryParamsResponse)(nil), "ibc.apps.transfer.v1.QueryParamsResponse")
+}
+
+func init() { proto.RegisterFile("ibc/apps/transfer/v1/query.proto", fileDescriptor_8bfda59865efaa24) }
+
+var fileDescriptor_8bfda59865efaa24 = []byte{
+ // 523 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xc1, 0x6a, 0xd4, 0x40,
+ 0x18, 0xc7, 0x77, 0x5a, 0x5d, 0xf0, 0x5b, 0xf1, 0x30, 0x16, 0x5d, 0xc2, 0x92, 0x2e, 0xb1, 0x68,
+ 0x5b, 0xdb, 0x19, 0x52, 0xfb, 0x02, 0x16, 0xd1, 0x83, 0x20, 0x75, 0xf5, 0x24, 0x82, 0x4c, 0xb2,
+ 0x63, 0x36, 0xd0, 0x64, 0xd2, 0xcc, 0xec, 0x42, 0x11, 0x2f, 0xfa, 0x02, 0x82, 0x17, 0x0f, 0x3e,
+ 0x81, 0x4f, 0xe0, 0x1b, 0xd8, 0x63, 0xc1, 0x8b, 0x27, 0x95, 0x5d, 0x1f, 0x44, 0x32, 0x33, 0xe9,
+ 0x66, 0xdd, 0x60, 0x73, 0x1b, 0x26, 0xff, 0xef, 0xfb, 0x7e, 0xff, 0xff, 0x37, 0x04, 0xfa, 0x71,
+ 0x10, 0x52, 0x96, 0x65, 0x92, 0xaa, 0x9c, 0xa5, 0xf2, 0x35, 0xcf, 0xe9, 0xc4, 0xa7, 0xc7, 0x63,
+ 0x9e, 0x9f, 0x90, 0x2c, 0x17, 0x4a, 0xe0, 0xb5, 0x38, 0x08, 0x49, 0xa1, 0x20, 0xa5, 0x82, 0x4c,
+ 0x7c, 0x67, 0x2d, 0x12, 0x91, 0xd0, 0x02, 0x5a, 0x9c, 0x8c, 0xd6, 0xd9, 0x0e, 0x85, 0x4c, 0x84,
+ 0xa4, 0x01, 0x93, 0xdc, 0x34, 0xa1, 0x13, 0x3f, 0xe0, 0x8a, 0xf9, 0x34, 0x63, 0x51, 0x9c, 0x32,
+ 0x15, 0x8b, 0xd4, 0x6a, 0x6f, 0xd5, 0x4e, 0x3e, 0x9f, 0x61, 0x44, 0xbd, 0x48, 0x88, 0xe8, 0x88,
+ 0x53, 0x96, 0xc5, 0x94, 0xa5, 0xa9, 0x50, 0xba, 0x83, 0x34, 0x5f, 0xbd, 0x1d, 0xb8, 0xf1, 0xb4,
+ 0x18, 0xf2, 0x80, 0xa7, 0x22, 0x79, 0x9e, 0xb3, 0x90, 0x0f, 0xf8, 0xf1, 0x98, 0x4b, 0x85, 0x31,
+ 0x5c, 0x1a, 0x31, 0x39, 0xea, 0xa2, 0x3e, 0xda, 0xbc, 0x32, 0xd0, 0x67, 0xef, 0x25, 0xdc, 0x5c,
+ 0x52, 0xcb, 0x4c, 0xa4, 0x92, 0xe3, 0xfb, 0xd0, 0x19, 0x16, 0xb7, 0xaf, 0x54, 0x71, 0xad, 0xab,
+ 0x3a, 0x7b, 0x7d, 0x52, 0xe7, 0x9c, 0x54, 0xca, 0x61, 0x78, 0x7e, 0xf6, 0xd8, 0x52, 0x77, 0x59,
+ 0xc2, 0x3c, 0x04, 0x98, 0xbb, 0xb7, 0xcd, 0x6f, 0x13, 0x13, 0x15, 0x29, 0xa2, 0x22, 0x26, 0x6f,
+ 0x1b, 0x15, 0x39, 0x64, 0x51, 0x69, 0x64, 0x50, 0xa9, 0xf4, 0xbe, 0x22, 0xe8, 0x2e, 0xcf, 0xb0,
+ 0x16, 0x9e, 0xc1, 0xd5, 0x8a, 0x05, 0xd9, 0x45, 0xfd, 0xd5, 0x26, 0x1e, 0x0e, 0xae, 0x9d, 0xfe,
+ 0x5c, 0x6f, 0x7d, 0xf9, 0xb5, 0xde, 0xb6, 0xfd, 0x3a, 0x73, 0x4f, 0x12, 0x3f, 0x5a, 0x20, 0x5f,
+ 0xd1, 0xe4, 0x77, 0x2e, 0x24, 0x37, 0x44, 0x0b, 0xe8, 0x6b, 0x80, 0x35, 0xf9, 0x21, 0xcb, 0x59,
+ 0x52, 0x06, 0xe3, 0x3d, 0x86, 0xeb, 0x0b, 0xb7, 0xd6, 0xca, 0x3e, 0xb4, 0x33, 0x7d, 0x63, 0xb3,
+ 0xea, 0xd5, 0x9b, 0xb0, 0x55, 0x56, 0xbb, 0xf7, 0x6d, 0x15, 0x2e, 0xeb, 0x6e, 0xf8, 0x33, 0x02,
+ 0x98, 0x3b, 0xc4, 0x3b, 0xf5, 0xe5, 0xf5, 0x2f, 0xc7, 0xd9, 0x6d, 0xa8, 0x36, 0xac, 0x9e, 0xff,
+ 0xee, 0xfb, 0x9f, 0x8f, 0x2b, 0x77, 0xf1, 0x16, 0xad, 0x7d, 0xce, 0xd5, 0x95, 0xd0, 0x37, 0xc5,
+ 0x33, 0x7c, 0x8b, 0x3f, 0x21, 0xe8, 0x54, 0x36, 0x88, 0x9b, 0x4d, 0x2c, 0x43, 0x73, 0x48, 0x53,
+ 0xb9, 0x25, 0xdc, 0xd6, 0x84, 0x1b, 0xd8, 0xbb, 0x98, 0x10, 0xbf, 0x47, 0xd0, 0x36, 0xb1, 0xe2,
+ 0xcd, 0xff, 0x8c, 0x59, 0xd8, 0xa2, 0xb3, 0xd5, 0x40, 0x69, 0x59, 0x36, 0x34, 0x8b, 0x8b, 0x7b,
+ 0xf5, 0x2c, 0x66, 0x93, 0x07, 0x4f, 0x4e, 0xa7, 0x2e, 0x3a, 0x9b, 0xba, 0xe8, 0xf7, 0xd4, 0x45,
+ 0x1f, 0x66, 0x6e, 0xeb, 0x6c, 0xe6, 0xb6, 0x7e, 0xcc, 0xdc, 0xd6, 0x8b, 0xfd, 0x28, 0x56, 0xa3,
+ 0x71, 0x40, 0x42, 0x91, 0x50, 0xfb, 0xab, 0x89, 0x83, 0x70, 0x37, 0x12, 0x34, 0x11, 0xc3, 0xf1,
+ 0x11, 0x97, 0xff, 0xf4, 0x54, 0x27, 0x19, 0x97, 0x41, 0x5b, 0xff, 0x2d, 0xee, 0xfd, 0x0d, 0x00,
+ 0x00, 0xff, 0xff, 0xf9, 0xce, 0xc4, 0xab, 0xec, 0x04, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -390,7 +388,7 @@ func NewQueryClient(cc grpc1.ClientConn) QueryClient {
func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) {
out := new(QueryDenomTraceResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/DenomTrace", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Query/DenomTrace", in, out, opts...)
if err != nil {
return nil, err
}
@@ -399,7 +397,7 @@ func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest
func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) {
out := new(QueryDenomTracesResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/DenomTraces", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Query/DenomTraces", in, out, opts...)
if err != nil {
return nil, err
}
@@ -408,7 +406,7 @@ func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesReque
func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) {
out := new(QueryParamsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/Params", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Query/Params", in, out, opts...)
if err != nil {
return nil, err
}
@@ -453,7 +451,7 @@ func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(in
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.apps.transfer.v1.Query/DenomTrace",
+ FullMethod: "/ibc.apps.transfer.v1.Query/DenomTrace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest))
@@ -471,7 +469,7 @@ func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(i
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.apps.transfer.v1.Query/DenomTraces",
+ FullMethod: "/ibc.apps.transfer.v1.Query/DenomTraces",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest))
@@ -489,7 +487,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.apps.transfer.v1.Query/Params",
+ FullMethod: "/ibc.apps.transfer.v1.Query/Params",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest))
@@ -498,7 +496,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
}
var _Query_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.apps.transfer.v1.Query",
+ ServiceName: "ibc.apps.transfer.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -515,7 +513,7 @@ var _Query_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/apps/transfer/v1/query.proto",
+ Metadata: "ibc/apps/transfer/v1/query.proto",
}
func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) {
diff --git a/modules/apps/transfer/types/query.pb.gw.go b/modules/apps/transfer/types/query.pb.gw.go
index 4333649f..b8d2eb74 100644
--- a/modules/apps/transfer/types/query.pb.gw.go
+++ b/modules/apps/transfer/types/query.pb.gw.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: ibcgo/apps/transfer/v1/query.proto
+// source: ibc/apps/transfer/v1/query.proto
/*
Package types is a reverse proxy.
diff --git a/modules/apps/transfer/types/transfer.pb.go b/modules/apps/transfer/types/transfer.pb.go
index 64bfef49..7bd2109e 100644
--- a/modules/apps/transfer/types/transfer.pb.go
+++ b/modules/apps/transfer/types/transfer.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/apps/transfer/v1/transfer.proto
+// source: ibc/apps/transfer/v1/transfer.proto
package types
@@ -41,7 +41,7 @@ func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData
func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) }
func (*FungibleTokenPacketData) ProtoMessage() {}
func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) {
- return fileDescriptor_0cd9e010e90bbec6, []int{0}
+ return fileDescriptor_1df6ef24f87610d6, []int{0}
}
func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -112,7 +112,7 @@ func (m *DenomTrace) Reset() { *m = DenomTrace{} }
func (m *DenomTrace) String() string { return proto.CompactTextString(m) }
func (*DenomTrace) ProtoMessage() {}
func (*DenomTrace) Descriptor() ([]byte, []int) {
- return fileDescriptor_0cd9e010e90bbec6, []int{1}
+ return fileDescriptor_1df6ef24f87610d6, []int{1}
}
func (m *DenomTrace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -172,7 +172,7 @@ func (m *Params) Reset() { *m = Params{} }
func (m *Params) String() string { return proto.CompactTextString(m) }
func (*Params) ProtoMessage() {}
func (*Params) Descriptor() ([]byte, []int) {
- return fileDescriptor_0cd9e010e90bbec6, []int{2}
+ return fileDescriptor_1df6ef24f87610d6, []int{2}
}
func (m *Params) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -216,40 +216,40 @@ func (m *Params) GetReceiveEnabled() bool {
}
func init() {
- proto.RegisterType((*FungibleTokenPacketData)(nil), "ibcgo.apps.transfer.v1.FungibleTokenPacketData")
- proto.RegisterType((*DenomTrace)(nil), "ibcgo.apps.transfer.v1.DenomTrace")
- proto.RegisterType((*Params)(nil), "ibcgo.apps.transfer.v1.Params")
+ proto.RegisterType((*FungibleTokenPacketData)(nil), "ibc.apps.transfer.v1.FungibleTokenPacketData")
+ proto.RegisterType((*DenomTrace)(nil), "ibc.apps.transfer.v1.DenomTrace")
+ proto.RegisterType((*Params)(nil), "ibc.apps.transfer.v1.Params")
}
func init() {
- proto.RegisterFile("ibcgo/apps/transfer/v1/transfer.proto", fileDescriptor_0cd9e010e90bbec6)
+ proto.RegisterFile("ibc/apps/transfer/v1/transfer.proto", fileDescriptor_1df6ef24f87610d6)
}
-var fileDescriptor_0cd9e010e90bbec6 = []byte{
- // 358 bytes of a gzipped FileDescriptorProto
+var fileDescriptor_1df6ef24f87610d6 = []byte{
+ // 357 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xc1, 0x6a, 0xea, 0x40,
- 0x14, 0x86, 0x8d, 0xd7, 0x2b, 0x3a, 0xf7, 0x72, 0x2f, 0x4c, 0x45, 0x83, 0xd0, 0x28, 0x81, 0x82,
- 0x9b, 0x26, 0x48, 0xbb, 0x72, 0x53, 0xb0, 0xb6, 0xcb, 0x22, 0xc1, 0x55, 0x37, 0x32, 0x49, 0x4e,
- 0x63, 0x30, 0x99, 0x09, 0x33, 0x13, 0x41, 0xfa, 0x04, 0xdd, 0xf5, 0xb1, 0xba, 0x74, 0xd9, 0x95,
- 0x14, 0x7d, 0x03, 0x9f, 0xa0, 0xcc, 0x24, 0x84, 0xe2, 0xee, 0xfc, 0xe7, 0xff, 0xfe, 0x73, 0x0e,
- 0x1c, 0x74, 0x15, 0xfb, 0x41, 0xc4, 0x5c, 0x92, 0x65, 0xc2, 0x95, 0x9c, 0x50, 0xf1, 0x02, 0xdc,
- 0xdd, 0x8c, 0xab, 0xda, 0xc9, 0x38, 0x93, 0x0c, 0x77, 0x35, 0xe6, 0x28, 0xcc, 0xa9, 0xac, 0xcd,
- 0xb8, 0xdf, 0x89, 0x58, 0xc4, 0x34, 0xe2, 0xaa, 0xaa, 0xa0, 0xed, 0x57, 0xd4, 0x7b, 0xcc, 0x69,
- 0x14, 0xfb, 0x09, 0x2c, 0xd8, 0x1a, 0xe8, 0x9c, 0x04, 0x6b, 0x90, 0x33, 0x22, 0x09, 0xee, 0xa0,
- 0xdf, 0x21, 0x50, 0x96, 0x9a, 0xc6, 0xd0, 0x18, 0xb5, 0xbd, 0x42, 0xe0, 0x2e, 0x6a, 0x92, 0x94,
- 0xe5, 0x54, 0x9a, 0xf5, 0xa1, 0x31, 0x6a, 0x78, 0xa5, 0x52, 0x7d, 0x01, 0x34, 0x04, 0x6e, 0xfe,
- 0xd2, 0x78, 0xa9, 0x70, 0x1f, 0xb5, 0x38, 0x04, 0x10, 0x6f, 0x80, 0x9b, 0x0d, 0xed, 0x54, 0xda,
- 0xbe, 0x43, 0x68, 0xa6, 0x86, 0x2e, 0x38, 0x09, 0x00, 0x63, 0xd4, 0xc8, 0x88, 0x5c, 0x95, 0xeb,
- 0x74, 0x8d, 0x2f, 0x11, 0xf2, 0x89, 0x80, 0x65, 0x71, 0x48, 0x5d, 0x3b, 0x6d, 0xd5, 0xd1, 0x39,
- 0xfb, 0xcd, 0x40, 0xcd, 0x39, 0xe1, 0x24, 0x15, 0x78, 0x82, 0xfe, 0xaa, 0x8d, 0x4b, 0xa0, 0xc4,
- 0x4f, 0x20, 0xd4, 0x53, 0x5a, 0xd3, 0xde, 0x69, 0x3f, 0xb8, 0xd8, 0x92, 0x34, 0x99, 0xd8, 0x3f,
- 0x5d, 0xdb, 0xfb, 0xa3, 0xe4, 0x43, 0xa1, 0xf0, 0x3d, 0xfa, 0x5f, 0xde, 0x54, 0xc5, 0xeb, 0x3a,
- 0xde, 0x3f, 0xed, 0x07, 0xdd, 0x22, 0x7e, 0x06, 0xd8, 0xde, 0xbf, 0xb2, 0x53, 0x0e, 0x99, 0x3e,
- 0x7d, 0x1c, 0x2c, 0x63, 0x77, 0xb0, 0x8c, 0xaf, 0x83, 0x65, 0xbc, 0x1f, 0xad, 0xda, 0xee, 0x68,
- 0xd5, 0x3e, 0x8f, 0x56, 0xed, 0xf9, 0x36, 0x8a, 0xe5, 0x2a, 0xf7, 0x9d, 0x80, 0xa5, 0x6e, 0xc0,
- 0x44, 0xca, 0x84, 0x1b, 0xfb, 0xc1, 0x75, 0xc4, 0xdc, 0x94, 0x85, 0x79, 0x02, 0xe2, 0xec, 0xa7,
- 0x72, 0x9b, 0x81, 0xf0, 0x9b, 0xfa, 0x41, 0x37, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x74,
- 0xa8, 0xf3, 0xf7, 0x01, 0x00, 0x00,
+ 0x14, 0x86, 0x8d, 0xd7, 0x2b, 0x3a, 0xf7, 0x72, 0x2f, 0x4c, 0x45, 0x83, 0xd0, 0x28, 0xe9, 0xc6,
+ 0x4d, 0x13, 0xa4, 0x5d, 0xb9, 0x29, 0x58, 0xdb, 0x65, 0x91, 0xe0, 0xaa, 0x1b, 0x99, 0x99, 0x9c,
+ 0xc6, 0x60, 0x92, 0x09, 0x33, 0x13, 0x41, 0xfa, 0x04, 0xdd, 0xf5, 0xb1, 0xba, 0x74, 0xd9, 0x95,
+ 0x14, 0x7d, 0x03, 0x9f, 0xa0, 0x64, 0x12, 0x42, 0x71, 0x77, 0xbe, 0xf3, 0xff, 0xff, 0x39, 0x07,
+ 0x0e, 0xba, 0x0a, 0x29, 0x73, 0x49, 0x9a, 0x4a, 0x57, 0x09, 0x92, 0xc8, 0x17, 0x10, 0xee, 0x66,
+ 0x5c, 0xd5, 0x4e, 0x2a, 0xb8, 0xe2, 0xb8, 0x13, 0x52, 0xe6, 0xe4, 0x26, 0xa7, 0x12, 0x36, 0xe3,
+ 0x7e, 0x27, 0xe0, 0x01, 0xd7, 0x06, 0x37, 0xaf, 0x0a, 0xaf, 0xfd, 0x8a, 0x7a, 0x8f, 0x59, 0x12,
+ 0x84, 0x34, 0x82, 0x05, 0x5f, 0x43, 0x32, 0x27, 0x6c, 0x0d, 0x6a, 0x46, 0x14, 0xc1, 0x1d, 0xf4,
+ 0xdb, 0x87, 0x84, 0xc7, 0xa6, 0x31, 0x34, 0x46, 0x6d, 0xaf, 0x00, 0xdc, 0x45, 0x4d, 0x12, 0xf3,
+ 0x2c, 0x51, 0x66, 0x7d, 0x68, 0x8c, 0x1a, 0x5e, 0x49, 0x79, 0x5f, 0x42, 0xe2, 0x83, 0x30, 0x7f,
+ 0x69, 0x7b, 0x49, 0xb8, 0x8f, 0x5a, 0x02, 0x18, 0x84, 0x1b, 0x10, 0x66, 0x43, 0x2b, 0x15, 0xdb,
+ 0x77, 0x08, 0xcd, 0xf2, 0xa1, 0x0b, 0x41, 0x18, 0x60, 0x8c, 0x1a, 0x29, 0x51, 0xab, 0x72, 0x9d,
+ 0xae, 0xf1, 0x25, 0x42, 0x94, 0x48, 0x58, 0x16, 0x87, 0xd4, 0xb5, 0xd2, 0xce, 0x3b, 0x3a, 0x67,
+ 0xbf, 0x19, 0xa8, 0x39, 0x27, 0x82, 0xc4, 0x12, 0x4f, 0xd0, 0xdf, 0x7c, 0xe3, 0x12, 0x12, 0x42,
+ 0x23, 0xf0, 0xf5, 0x94, 0xd6, 0xb4, 0x77, 0xda, 0x0f, 0x2e, 0xb6, 0x24, 0x8e, 0x26, 0xf6, 0x4f,
+ 0xd5, 0xf6, 0xfe, 0xe4, 0xf8, 0x50, 0x10, 0xbe, 0x47, 0xff, 0xcb, 0x9b, 0xaa, 0x78, 0x5d, 0xc7,
+ 0xfb, 0xa7, 0xfd, 0xa0, 0x5b, 0xc4, 0xcf, 0x0c, 0xb6, 0xf7, 0xaf, 0xec, 0x94, 0x43, 0xa6, 0x4f,
+ 0x1f, 0x07, 0xcb, 0xd8, 0x1d, 0x2c, 0xe3, 0xeb, 0x60, 0x19, 0xef, 0x47, 0xab, 0xb6, 0x3b, 0x5a,
+ 0xb5, 0xcf, 0xa3, 0x55, 0x7b, 0xbe, 0x0d, 0x42, 0xb5, 0xca, 0xa8, 0xc3, 0x78, 0xec, 0x32, 0x2e,
+ 0x63, 0x2e, 0xdd, 0x90, 0xb2, 0xeb, 0x80, 0xbb, 0x31, 0xf7, 0xb3, 0x08, 0xe4, 0xd9, 0x47, 0xd5,
+ 0x36, 0x05, 0x49, 0x9b, 0xfa, 0x41, 0x37, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x53, 0x27,
+ 0x2c, 0xf3, 0x01, 0x00, 0x00,
}
func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) {
diff --git a/modules/apps/transfer/types/tx.pb.go b/modules/apps/transfer/types/tx.pb.go
index 8388059f..2cfb9f6e 100644
--- a/modules/apps/transfer/types/tx.pb.go
+++ b/modules/apps/transfer/types/tx.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/apps/transfer/v1/tx.proto
+// source: ibc/apps/transfer/v1/tx.proto
package types
@@ -56,7 +56,7 @@ func (m *MsgTransfer) Reset() { *m = MsgTransfer{} }
func (m *MsgTransfer) String() string { return proto.CompactTextString(m) }
func (*MsgTransfer) ProtoMessage() {}
func (*MsgTransfer) Descriptor() ([]byte, []int) {
- return fileDescriptor_4ca3945bed527d36, []int{0}
+ return fileDescriptor_05d96e007505da4e, []int{0}
}
func (m *MsgTransfer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -93,7 +93,7 @@ func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} }
func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) }
func (*MsgTransferResponse) ProtoMessage() {}
func (*MsgTransferResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4ca3945bed527d36, []int{1}
+ return fileDescriptor_05d96e007505da4e, []int{1}
}
func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -123,45 +123,45 @@ func (m *MsgTransferResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo
func init() {
- proto.RegisterType((*MsgTransfer)(nil), "ibcgo.apps.transfer.v1.MsgTransfer")
- proto.RegisterType((*MsgTransferResponse)(nil), "ibcgo.apps.transfer.v1.MsgTransferResponse")
-}
-
-func init() { proto.RegisterFile("ibcgo/apps/transfer/v1/tx.proto", fileDescriptor_4ca3945bed527d36) }
-
-var fileDescriptor_4ca3945bed527d36 = []byte{
- // 485 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x6f, 0xd3, 0x30,
- 0x14, 0xc7, 0x13, 0xda, 0x95, 0xe2, 0x6a, 0x13, 0x18, 0x56, 0x65, 0xd5, 0x48, 0x4a, 0xb8, 0x54,
- 0x42, 0xd8, 0xca, 0x00, 0x21, 0xed, 0x84, 0xb2, 0x0b, 0x1c, 0x86, 0x50, 0xb4, 0x13, 0x42, 0x9a,
- 0x12, 0xcf, 0xa4, 0x11, 0x4d, 0x5e, 0x64, 0xbb, 0x11, 0xfb, 0x06, 0x1c, 0xf9, 0x08, 0xfb, 0x00,
- 0x7c, 0x90, 0x1d, 0x77, 0xe4, 0x54, 0xa1, 0xf6, 0xc2, 0xb9, 0x9f, 0x00, 0x25, 0x76, 0x4b, 0x8b,
- 0x38, 0xec, 0x64, 0xbf, 0xf7, 0xff, 0x3d, 0xff, 0xf5, 0x9e, 0x1f, 0xf2, 0xb2, 0x84, 0xa5, 0x40,
- 0xe3, 0xb2, 0x94, 0x54, 0x89, 0xb8, 0x90, 0x9f, 0xb9, 0xa0, 0x55, 0x40, 0xd5, 0x57, 0x52, 0x0a,
- 0x50, 0x80, 0xfb, 0x0d, 0x40, 0x6a, 0x80, 0xac, 0x00, 0x52, 0x05, 0x83, 0x47, 0x29, 0xa4, 0xd0,
- 0x20, 0xb4, 0xbe, 0x69, 0x7a, 0xe0, 0x32, 0x90, 0x39, 0x48, 0x9a, 0xc4, 0x92, 0xd3, 0x2a, 0x48,
- 0xb8, 0x8a, 0x03, 0xca, 0x20, 0x2b, 0x8c, 0xfe, 0x44, 0xdb, 0x31, 0x10, 0x9c, 0xb2, 0x49, 0xc6,
- 0x0b, 0x55, 0x9b, 0xe9, 0x9b, 0x46, 0xfc, 0x1f, 0x2d, 0xd4, 0x3b, 0x95, 0xe9, 0x99, 0xf1, 0xc2,
- 0xaf, 0x51, 0x4f, 0xc2, 0x54, 0x30, 0x7e, 0x5e, 0x82, 0x50, 0x8e, 0x3d, 0xb4, 0x47, 0xf7, 0xc2,
- 0xfe, 0x72, 0xe6, 0xe1, 0xcb, 0x38, 0x9f, 0x1c, 0xfb, 0x1b, 0xa2, 0x1f, 0x21, 0x1d, 0x7d, 0x00,
- 0xa1, 0xf0, 0x1b, 0xb4, 0x67, 0x34, 0x36, 0x8e, 0x8b, 0x82, 0x4f, 0x9c, 0x3b, 0x4d, 0xed, 0xc1,
- 0x72, 0xe6, 0xed, 0x6f, 0xd5, 0x1a, 0xdd, 0x8f, 0x76, 0x75, 0xe2, 0x44, 0xc7, 0xf8, 0x15, 0xda,
- 0x51, 0xf0, 0x85, 0x17, 0x4e, 0x6b, 0x68, 0x8f, 0x7a, 0x47, 0x07, 0x44, 0x77, 0x47, 0xea, 0xee,
- 0x88, 0xe9, 0x8e, 0x9c, 0x40, 0x56, 0x84, 0xed, 0xeb, 0x99, 0x67, 0x45, 0x9a, 0xc6, 0x7d, 0xd4,
- 0x91, 0xbc, 0xb8, 0xe0, 0xc2, 0x69, 0xd7, 0x86, 0x91, 0x89, 0xf0, 0x00, 0x75, 0x05, 0x67, 0x3c,
- 0xab, 0xb8, 0x70, 0x76, 0x1a, 0x65, 0x1d, 0xe3, 0x04, 0xed, 0xa9, 0x2c, 0xe7, 0x30, 0x55, 0xe7,
- 0x63, 0x9e, 0xa5, 0x63, 0xe5, 0x74, 0x1a, 0xcf, 0x43, 0xa2, 0xe7, 0x5f, 0x4f, 0x8c, 0x98, 0x39,
- 0x55, 0x01, 0x79, 0xdb, 0x30, 0xe1, 0xe3, 0xda, 0xf6, 0x6f, 0x3b, 0xdb, 0x2f, 0xf8, 0xd1, 0xae,
- 0x49, 0x68, 0x1a, 0xbf, 0x43, 0x0f, 0x56, 0x44, 0x7d, 0x4a, 0x15, 0xe7, 0xa5, 0x73, 0x77, 0x68,
- 0x8f, 0xda, 0xe1, 0xe1, 0x72, 0xe6, 0x39, 0xdb, 0x8f, 0xac, 0x11, 0x3f, 0xba, 0x6f, 0x72, 0x67,
- 0xab, 0xd4, 0x71, 0xf7, 0xdb, 0x95, 0x67, 0xfd, 0xbe, 0xf2, 0x2c, 0x7f, 0x1f, 0x3d, 0xdc, 0xf8,
- 0xad, 0x88, 0xcb, 0x12, 0x0a, 0xc9, 0x8f, 0x18, 0x6a, 0x9d, 0xca, 0x14, 0x7f, 0x42, 0xdd, 0xf5,
- 0x47, 0x3e, 0x25, 0xff, 0x5f, 0x25, 0xb2, 0x51, 0x3f, 0x78, 0x76, 0x0b, 0x68, 0x65, 0x12, 0xbe,
- 0xbf, 0x9e, 0xbb, 0xf6, 0xcd, 0xdc, 0xb5, 0x7f, 0xcd, 0x5d, 0xfb, 0xfb, 0xc2, 0xb5, 0x6e, 0x16,
- 0xae, 0xf5, 0x73, 0xe1, 0x5a, 0x1f, 0x5f, 0xa6, 0x99, 0x1a, 0x4f, 0x13, 0xc2, 0x20, 0xa7, 0x66,
- 0x25, 0xb3, 0x84, 0x3d, 0x4f, 0x81, 0xe6, 0x70, 0x31, 0x9d, 0x70, 0xf9, 0xcf, 0xc6, 0xab, 0xcb,
- 0x92, 0xcb, 0xa4, 0xd3, 0x6c, 0xe0, 0x8b, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x7e, 0x3d,
- 0x25, 0x15, 0x03, 0x00, 0x00,
+ proto.RegisterType((*MsgTransfer)(nil), "ibc.apps.transfer.v1.MsgTransfer")
+ proto.RegisterType((*MsgTransferResponse)(nil), "ibc.apps.transfer.v1.MsgTransferResponse")
+}
+
+func init() { proto.RegisterFile("ibc/apps/transfer/v1/tx.proto", fileDescriptor_05d96e007505da4e) }
+
+var fileDescriptor_05d96e007505da4e = []byte{
+ // 483 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3f, 0x6f, 0xd3, 0x40,
+ 0x18, 0xc6, 0x6d, 0x92, 0x86, 0x70, 0x51, 0x2b, 0x38, 0xda, 0xca, 0x8d, 0xa8, 0x1d, 0x3c, 0x85,
+ 0x81, 0x3b, 0xa5, 0x80, 0x90, 0x3a, 0xa1, 0x74, 0x81, 0xa1, 0x08, 0x59, 0x1d, 0x10, 0x4b, 0xb0,
+ 0xaf, 0x2f, 0xce, 0x89, 0xd8, 0x67, 0xdd, 0x5d, 0x2c, 0xfa, 0x0d, 0x18, 0xf9, 0x08, 0x9d, 0xf9,
+ 0x24, 0x1d, 0x3b, 0x32, 0x45, 0x28, 0x59, 0x98, 0xf3, 0x09, 0xd0, 0xd9, 0x97, 0x90, 0x20, 0x24,
+ 0x26, 0xdf, 0xfb, 0x3e, 0xbf, 0xd7, 0x8f, 0xde, 0x3f, 0xe8, 0x98, 0x27, 0x8c, 0xc6, 0x45, 0xa1,
+ 0xa8, 0x96, 0x71, 0xae, 0x3e, 0x81, 0xa4, 0xe5, 0x80, 0xea, 0x2f, 0xa4, 0x90, 0x42, 0x0b, 0xbc,
+ 0xcf, 0x13, 0x46, 0x8c, 0x4c, 0x56, 0x32, 0x29, 0x07, 0xdd, 0xfd, 0x54, 0xa4, 0xa2, 0x02, 0xa8,
+ 0x79, 0xd5, 0x6c, 0xd7, 0x67, 0x42, 0x65, 0x42, 0xd1, 0x24, 0x56, 0x40, 0xcb, 0x41, 0x02, 0x3a,
+ 0x1e, 0x50, 0x26, 0x78, 0x6e, 0xf5, 0xc0, 0x58, 0x31, 0x21, 0x81, 0xb2, 0x09, 0x87, 0x5c, 0x1b,
+ 0xa3, 0xfa, 0x55, 0x03, 0xe1, 0xf7, 0x06, 0xea, 0x9c, 0xab, 0xf4, 0xc2, 0x3a, 0xe1, 0x97, 0xa8,
+ 0xa3, 0xc4, 0x54, 0x32, 0x18, 0x15, 0x42, 0x6a, 0xcf, 0xed, 0xb9, 0xfd, 0x7b, 0xc3, 0xc3, 0xe5,
+ 0x2c, 0xc0, 0x57, 0x71, 0x36, 0x39, 0x0d, 0x37, 0xc4, 0x30, 0x42, 0x75, 0xf4, 0x4e, 0x48, 0x8d,
+ 0x5f, 0xa1, 0x3d, 0xab, 0xb1, 0x71, 0x9c, 0xe7, 0x30, 0xf1, 0xee, 0x54, 0xb5, 0x47, 0xcb, 0x59,
+ 0x70, 0xb0, 0x55, 0x6b, 0xf5, 0x30, 0xda, 0xad, 0x13, 0x67, 0x75, 0x8c, 0x5f, 0xa0, 0x1d, 0x2d,
+ 0x3e, 0x43, 0xee, 0x35, 0x7a, 0x6e, 0xbf, 0x73, 0x72, 0x44, 0xea, 0xde, 0x88, 0xe9, 0x8d, 0xd8,
+ 0xde, 0xc8, 0x99, 0xe0, 0xf9, 0xb0, 0x79, 0x33, 0x0b, 0x9c, 0xa8, 0xa6, 0xf1, 0x21, 0x6a, 0x29,
+ 0xc8, 0x2f, 0x41, 0x7a, 0x4d, 0x63, 0x18, 0xd9, 0x08, 0x77, 0x51, 0x5b, 0x02, 0x03, 0x5e, 0x82,
+ 0xf4, 0x76, 0x2a, 0x65, 0x1d, 0xe3, 0x8f, 0x68, 0x4f, 0xf3, 0x0c, 0xc4, 0x54, 0x8f, 0xc6, 0xc0,
+ 0xd3, 0xb1, 0xf6, 0x5a, 0x95, 0x67, 0x97, 0x98, 0xd9, 0x9b, 0x79, 0x11, 0x3b, 0xa5, 0x72, 0x40,
+ 0x5e, 0x57, 0xc4, 0xf0, 0xd8, 0x98, 0xfe, 0x69, 0x66, 0xbb, 0x3e, 0x8c, 0x76, 0x6d, 0xa2, 0xa6,
+ 0xf1, 0x1b, 0xf4, 0x60, 0x45, 0x98, 0xaf, 0xd2, 0x71, 0x56, 0x78, 0x77, 0x7b, 0x6e, 0xbf, 0x39,
+ 0x7c, 0xb4, 0x9c, 0x05, 0xde, 0xf6, 0x4f, 0xd6, 0x48, 0x18, 0xdd, 0xb7, 0xb9, 0x8b, 0x55, 0xea,
+ 0xb4, 0xfd, 0xf5, 0x3a, 0x70, 0x7e, 0x5d, 0x07, 0x4e, 0x78, 0x80, 0x1e, 0x6e, 0xec, 0x2a, 0x02,
+ 0x55, 0x88, 0x5c, 0xc1, 0xc9, 0x08, 0x35, 0xce, 0x55, 0x8a, 0xdf, 0xa3, 0xf6, 0x7a, 0x8d, 0x8f,
+ 0xc9, 0xbf, 0x8e, 0x88, 0x6c, 0x54, 0x77, 0x9f, 0xfc, 0x17, 0x59, 0x19, 0x0c, 0xdf, 0xde, 0xcc,
+ 0x7d, 0xf7, 0x76, 0xee, 0xbb, 0x3f, 0xe7, 0xbe, 0xfb, 0x6d, 0xe1, 0x3b, 0xb7, 0x0b, 0xdf, 0xf9,
+ 0xb1, 0xf0, 0x9d, 0x0f, 0xcf, 0x53, 0xae, 0xc7, 0xd3, 0x84, 0x30, 0x91, 0x51, 0x7b, 0x8a, 0x3c,
+ 0x61, 0x4f, 0x53, 0x41, 0x33, 0x71, 0x39, 0x9d, 0x80, 0xfa, 0xeb, 0xce, 0xf5, 0x55, 0x01, 0x2a,
+ 0x69, 0x55, 0xb7, 0xf7, 0xec, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x82, 0x6b, 0x1c, 0x09,
+ 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -190,7 +190,7 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient {
func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) {
out := new(MsgTransferResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Msg/Transfer", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Msg/Transfer", in, out, opts...)
if err != nil {
return nil, err
}
@@ -225,7 +225,7 @@ func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interf
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.apps.transfer.v1.Msg/Transfer",
+ FullMethod: "/ibc.apps.transfer.v1.Msg/Transfer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer))
@@ -234,7 +234,7 @@ func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interf
}
var _Msg_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.apps.transfer.v1.Msg",
+ ServiceName: "ibc.apps.transfer.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -243,7 +243,7 @@ var _Msg_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/apps/transfer/v1/tx.proto",
+ Metadata: "ibc/apps/transfer/v1/tx.proto",
}
func (m *MsgTransfer) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/02-client/types/client.pb.go b/modules/core/02-client/types/client.pb.go
index 06d6b285..99f5fe17 100644
--- a/modules/core/02-client/types/client.pb.go
+++ b/modules/core/02-client/types/client.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/client/v1/client.proto
+// source: ibc/core/client/v1/client.proto
package types
@@ -38,7 +38,7 @@ func (m *IdentifiedClientState) Reset() { *m = IdentifiedClientState{} }
func (m *IdentifiedClientState) String() string { return proto.CompactTextString(m) }
func (*IdentifiedClientState) ProtoMessage() {}
func (*IdentifiedClientState) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{0}
+ return fileDescriptor_b6bc4c8185546947, []int{0}
}
func (m *IdentifiedClientState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -94,7 +94,7 @@ func (m *ConsensusStateWithHeight) Reset() { *m = ConsensusStateWithHeig
func (m *ConsensusStateWithHeight) String() string { return proto.CompactTextString(m) }
func (*ConsensusStateWithHeight) ProtoMessage() {}
func (*ConsensusStateWithHeight) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{1}
+ return fileDescriptor_b6bc4c8185546947, []int{1}
}
func (m *ConsensusStateWithHeight) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -150,7 +150,7 @@ func (m *ClientConsensusStates) Reset() { *m = ClientConsensusStates{} }
func (m *ClientConsensusStates) String() string { return proto.CompactTextString(m) }
func (*ClientConsensusStates) ProtoMessage() {}
func (*ClientConsensusStates) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{2}
+ return fileDescriptor_b6bc4c8185546947, []int{2}
}
func (m *ClientConsensusStates) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -218,7 +218,7 @@ func (m *ClientUpdateProposal) Reset() { *m = ClientUpdateProposal{} }
func (m *ClientUpdateProposal) String() string { return proto.CompactTextString(m) }
func (*ClientUpdateProposal) ProtoMessage() {}
func (*ClientUpdateProposal) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{3}
+ return fileDescriptor_b6bc4c8185546947, []int{3}
}
func (m *ClientUpdateProposal) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -265,7 +265,7 @@ type UpgradeProposal struct {
func (m *UpgradeProposal) Reset() { *m = UpgradeProposal{} }
func (*UpgradeProposal) ProtoMessage() {}
func (*UpgradeProposal) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{4}
+ return fileDescriptor_b6bc4c8185546947, []int{4}
}
func (m *UpgradeProposal) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -314,7 +314,7 @@ type Height struct {
func (m *Height) Reset() { *m = Height{} }
func (*Height) ProtoMessage() {}
func (*Height) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{5}
+ return fileDescriptor_b6bc4c8185546947, []int{5}
}
func (m *Height) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -353,7 +353,7 @@ func (m *Params) Reset() { *m = Params{} }
func (m *Params) String() string { return proto.CompactTextString(m) }
func (*Params) ProtoMessage() {}
func (*Params) Descriptor() ([]byte, []int) {
- return fileDescriptor_3cc2cf764ecc47af, []int{6}
+ return fileDescriptor_b6bc4c8185546947, []int{6}
}
func (m *Params) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -390,66 +390,66 @@ func (m *Params) GetAllowedClients() []string {
}
func init() {
- proto.RegisterType((*IdentifiedClientState)(nil), "ibcgo.core.client.v1.IdentifiedClientState")
- proto.RegisterType((*ConsensusStateWithHeight)(nil), "ibcgo.core.client.v1.ConsensusStateWithHeight")
- proto.RegisterType((*ClientConsensusStates)(nil), "ibcgo.core.client.v1.ClientConsensusStates")
- proto.RegisterType((*ClientUpdateProposal)(nil), "ibcgo.core.client.v1.ClientUpdateProposal")
- proto.RegisterType((*UpgradeProposal)(nil), "ibcgo.core.client.v1.UpgradeProposal")
- proto.RegisterType((*Height)(nil), "ibcgo.core.client.v1.Height")
- proto.RegisterType((*Params)(nil), "ibcgo.core.client.v1.Params")
-}
-
-func init() { proto.RegisterFile("ibcgo/core/client/v1/client.proto", fileDescriptor_3cc2cf764ecc47af) }
-
-var fileDescriptor_3cc2cf764ecc47af = []byte{
- // 743 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xbf, 0x6f, 0xdb, 0x46,
- 0x14, 0x16, 0x65, 0x59, 0xb0, 0x4e, 0xae, 0xe4, 0xd2, 0x52, 0x2d, 0xbb, 0xae, 0xa8, 0x1e, 0x3a,
- 0x68, 0x31, 0x59, 0xa9, 0x68, 0x07, 0x6d, 0x95, 0x16, 0x7b, 0x68, 0xab, 0x32, 0x30, 0x12, 0x64,
- 0x11, 0xf8, 0xe3, 0x4c, 0x9d, 0x41, 0xf1, 0x04, 0xde, 0x51, 0x81, 0xf2, 0x17, 0x64, 0xcc, 0x98,
- 0x21, 0x83, 0xff, 0x84, 0xfc, 0x15, 0x81, 0x47, 0x2f, 0x01, 0x32, 0x11, 0x81, 0xbd, 0x64, 0xe6,
- 0x9a, 0x25, 0xe0, 0xdd, 0x51, 0x96, 0x14, 0x3b, 0x30, 0x92, 0xed, 0xee, 0xdd, 0xf7, 0xbe, 0xf7,
- 0xbd, 0xef, 0xf8, 0x8e, 0xe0, 0x57, 0x6c, 0x3b, 0x1e, 0x31, 0x1c, 0x12, 0x22, 0xc3, 0xf1, 0x31,
- 0x0a, 0x98, 0x31, 0xeb, 0xc8, 0x95, 0x3e, 0x0d, 0x09, 0x23, 0x6a, 0x8d, 0x43, 0xf4, 0x14, 0xa2,
- 0xcb, 0x83, 0x59, 0xe7, 0xa0, 0xe6, 0x11, 0x8f, 0x70, 0x80, 0x91, 0xae, 0x04, 0xf6, 0x60, 0xdf,
- 0x23, 0xc4, 0xf3, 0x91, 0xc1, 0x77, 0x76, 0x74, 0x66, 0x58, 0xc1, 0x5c, 0x1e, 0xfd, 0xe6, 0x10,
- 0x3a, 0x21, 0xd4, 0x88, 0xa6, 0x5e, 0x68, 0xb9, 0xc8, 0x98, 0x75, 0x6c, 0xc4, 0xac, 0x4e, 0xb6,
- 0x17, 0x28, 0xf8, 0x5a, 0x01, 0xf5, 0x13, 0x17, 0x05, 0x0c, 0x9f, 0x61, 0xe4, 0x0e, 0x78, 0xb9,
- 0x47, 0xcc, 0x62, 0x48, 0xed, 0x80, 0x92, 0xa8, 0x3e, 0xc2, 0x6e, 0x43, 0x69, 0x29, 0xed, 0x52,
- 0xbf, 0x96, 0xc4, 0xda, 0xce, 0xdc, 0x9a, 0xf8, 0x3d, 0xb8, 0x38, 0x82, 0xe6, 0x96, 0x58, 0x9f,
- 0xb8, 0xea, 0x10, 0x6c, 0xcb, 0x38, 0x4d, 0x29, 0x1a, 0xf9, 0x96, 0xd2, 0x2e, 0x77, 0x6b, 0xba,
- 0x10, 0xa9, 0x67, 0x22, 0xf5, 0xbf, 0x83, 0x79, 0x7f, 0x2f, 0x89, 0xb5, 0xdd, 0x15, 0x2e, 0x9e,
- 0x03, 0xcd, 0xb2, 0x73, 0x2b, 0x02, 0xbe, 0x51, 0x40, 0x63, 0x40, 0x02, 0x8a, 0x02, 0x1a, 0x51,
- 0x1e, 0x7a, 0x8c, 0xd9, 0xf8, 0x18, 0x61, 0x6f, 0xcc, 0xd4, 0x1e, 0x28, 0x8e, 0xf9, 0x8a, 0xcb,
- 0x2b, 0x77, 0x0f, 0xf5, 0xbb, 0x9c, 0xd3, 0x05, 0xba, 0x5f, 0xb8, 0x8c, 0xb5, 0x9c, 0x29, 0x33,
- 0xd4, 0x27, 0xa0, 0xea, 0x64, 0xbc, 0x0f, 0x50, 0xbb, 0x9f, 0xc4, 0x5a, 0x3d, 0x55, 0x0b, 0xd7,
- 0xb2, 0xa0, 0x59, 0x71, 0x56, 0xf4, 0xc1, 0xb7, 0x0a, 0xa8, 0x0b, 0x1f, 0x57, 0x85, 0xd3, 0x6f,
- 0x71, 0xf4, 0x39, 0xd8, 0x59, 0x2b, 0x48, 0x1b, 0xf9, 0xd6, 0x46, 0xbb, 0xdc, 0xd5, 0xef, 0x6e,
- 0xf6, 0x3e, 0xb3, 0xfa, 0x5a, 0xda, 0x7e, 0x12, 0x6b, 0x7b, 0xb2, 0xda, 0x1a, 0x2b, 0x34, 0xab,
- 0xab, 0x7d, 0x50, 0xf8, 0x2e, 0x0f, 0x6a, 0xa2, 0x91, 0xd3, 0xa9, 0x6b, 0x31, 0x34, 0x0c, 0xc9,
- 0x94, 0x50, 0xcb, 0x57, 0x6b, 0x60, 0x93, 0x61, 0xe6, 0x23, 0xd1, 0x83, 0x29, 0x36, 0x6a, 0x0b,
- 0x94, 0x5d, 0x44, 0x9d, 0x10, 0x4f, 0x19, 0x26, 0x01, 0x77, 0xb3, 0x64, 0x2e, 0x87, 0xd4, 0x63,
- 0xf0, 0x23, 0x8d, 0xec, 0x73, 0xe4, 0xb0, 0xd1, 0xad, 0x0f, 0x1b, 0xdc, 0x87, 0xc3, 0x24, 0xd6,
- 0x1a, 0x42, 0xd9, 0x17, 0x10, 0x68, 0x56, 0x65, 0x6c, 0x90, 0xd9, 0xf2, 0x3f, 0xa8, 0xd1, 0xc8,
- 0xa6, 0x0c, 0xb3, 0x88, 0xa1, 0x25, 0xb2, 0x02, 0x27, 0xd3, 0x92, 0x58, 0xfb, 0x39, 0x23, 0xa3,
- 0xf6, 0x3a, 0x0a, 0x9a, 0xea, 0x6d, 0xf2, 0x82, 0xd2, 0x06, 0x15, 0x1c, 0x60, 0x86, 0x2d, 0x7f,
- 0x24, 0x3f, 0xaa, 0xcd, 0x07, 0x7c, 0x54, 0xbf, 0x48, 0x57, 0xeb, 0xa2, 0xdc, 0x2a, 0x03, 0x34,
- 0x7f, 0x90, 0x01, 0x81, 0xee, 0x15, 0x5e, 0x5c, 0x68, 0x39, 0xf8, 0x49, 0x01, 0xd5, 0x53, 0x31,
- 0x84, 0xdf, 0x6d, 0xe9, 0x5f, 0xa0, 0x30, 0xf5, 0xad, 0x80, 0xbb, 0x98, 0x6a, 0x15, 0x33, 0xaf,
- 0x67, 0x33, 0x2e, 0x67, 0x5e, 0x1f, 0xfa, 0x56, 0x20, 0x07, 0x80, 0xe3, 0xd5, 0x73, 0x50, 0x97,
- 0x18, 0x77, 0xb4, 0x32, 0xb2, 0x85, 0xaf, 0x0c, 0x41, 0x2b, 0x89, 0xb5, 0x43, 0xd1, 0xe8, 0x9d,
- 0xc9, 0xd0, 0xdc, 0xcd, 0xe2, 0x4b, 0x0f, 0x49, 0x6f, 0x3b, 0xed, 0xfa, 0xd5, 0x85, 0x96, 0xfb,
- 0x78, 0xa1, 0x29, 0xe9, 0x83, 0x53, 0x94, 0xf3, 0x3b, 0x00, 0xd5, 0x10, 0xcd, 0x30, 0xc5, 0x24,
- 0x18, 0x05, 0xd1, 0xc4, 0x46, 0x21, 0x6f, 0xbf, 0xd0, 0x3f, 0x48, 0x62, 0xed, 0x27, 0x51, 0x68,
- 0x0d, 0x00, 0xcd, 0x4a, 0x16, 0xf9, 0x97, 0x07, 0x56, 0x48, 0xe4, 0xc5, 0xe5, 0xef, 0x25, 0xc9,
- 0xee, 0x65, 0x41, 0x22, 0x2f, 0x66, 0x2b, 0x93, 0x08, 0xff, 0x01, 0xc5, 0xa1, 0x15, 0x5a, 0x13,
- 0x9a, 0x12, 0x5b, 0xbe, 0x4f, 0x9e, 0x2d, 0x9a, 0xa4, 0x0d, 0xa5, 0xb5, 0xd1, 0x2e, 0x2d, 0x13,
- 0xaf, 0x01, 0xa0, 0x59, 0x91, 0x11, 0xd1, 0x3f, 0xed, 0xff, 0x77, 0x79, 0xdd, 0x54, 0xae, 0xae,
- 0x9b, 0xca, 0x87, 0xeb, 0xa6, 0xf2, 0xf2, 0xa6, 0x99, 0xbb, 0xba, 0x69, 0xe6, 0xde, 0xdf, 0x34,
- 0x73, 0x4f, 0xff, 0xf4, 0x30, 0x1b, 0x47, 0xb6, 0xee, 0x90, 0x89, 0x21, 0x5f, 0x6a, 0x6c, 0x3b,
- 0x47, 0x1e, 0x31, 0x26, 0xc4, 0x8d, 0x7c, 0x44, 0xc5, 0x3f, 0xe2, 0xf7, 0xee, 0x91, 0xfc, 0x4d,
- 0xb0, 0xf9, 0x14, 0x51, 0xbb, 0xc8, 0x6f, 0xe4, 0x8f, 0xcf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x18,
- 0x9f, 0x9f, 0xd0, 0x48, 0x06, 0x00, 0x00,
+ proto.RegisterType((*IdentifiedClientState)(nil), "ibc.core.client.v1.IdentifiedClientState")
+ proto.RegisterType((*ConsensusStateWithHeight)(nil), "ibc.core.client.v1.ConsensusStateWithHeight")
+ proto.RegisterType((*ClientConsensusStates)(nil), "ibc.core.client.v1.ClientConsensusStates")
+ proto.RegisterType((*ClientUpdateProposal)(nil), "ibc.core.client.v1.ClientUpdateProposal")
+ proto.RegisterType((*UpgradeProposal)(nil), "ibc.core.client.v1.UpgradeProposal")
+ proto.RegisterType((*Height)(nil), "ibc.core.client.v1.Height")
+ proto.RegisterType((*Params)(nil), "ibc.core.client.v1.Params")
+}
+
+func init() { proto.RegisterFile("ibc/core/client/v1/client.proto", fileDescriptor_b6bc4c8185546947) }
+
+var fileDescriptor_b6bc4c8185546947 = []byte{
+ // 744 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x3d, 0x73, 0xd3, 0x4a,
+ 0x14, 0xb5, 0x1c, 0xc7, 0x13, 0xaf, 0xf3, 0xec, 0x3c, 0xc5, 0x7e, 0x71, 0xfc, 0xf2, 0x2c, 0xcf,
+ 0xce, 0x2b, 0x5c, 0x10, 0x09, 0x9b, 0x81, 0x61, 0xd2, 0x61, 0x37, 0x49, 0x01, 0x18, 0x31, 0x19,
+ 0x18, 0x1a, 0xa3, 0x8f, 0x8d, 0xbc, 0x19, 0x59, 0xeb, 0xd1, 0xae, 0x0c, 0xfe, 0x07, 0x94, 0x94,
+ 0x14, 0x14, 0xf9, 0x05, 0xfc, 0x0a, 0x8a, 0x94, 0x99, 0xa1, 0xa1, 0xd2, 0x30, 0x49, 0x43, 0xad,
+ 0x96, 0x86, 0x91, 0x76, 0xe5, 0xd8, 0x4e, 0x02, 0x0c, 0x74, 0xab, 0xbb, 0xe7, 0x9e, 0x7b, 0xcf,
+ 0xd9, 0xbd, 0x2b, 0xa0, 0x60, 0xd3, 0xd2, 0x2c, 0xe2, 0x23, 0xcd, 0x72, 0x31, 0xf2, 0x98, 0x36,
+ 0x69, 0x8b, 0x95, 0x3a, 0xf6, 0x09, 0x23, 0xb2, 0x8c, 0x4d, 0x4b, 0x8d, 0x01, 0xaa, 0x08, 0x4f,
+ 0xda, 0xf5, 0x8a, 0x43, 0x1c, 0x92, 0x6c, 0x6b, 0xf1, 0x8a, 0x23, 0xeb, 0xdb, 0x0e, 0x21, 0x8e,
+ 0x8b, 0xb4, 0xe4, 0xcb, 0x0c, 0x8e, 0x34, 0xc3, 0x9b, 0x8a, 0xad, 0xff, 0x2d, 0x42, 0x47, 0x84,
+ 0x6a, 0xc1, 0xd8, 0xf1, 0x0d, 0x1b, 0x69, 0x93, 0xb6, 0x89, 0x98, 0xd1, 0x4e, 0xbf, 0x39, 0x0a,
+ 0xbe, 0x97, 0x40, 0xf5, 0xc0, 0x46, 0x1e, 0xc3, 0x47, 0x18, 0xd9, 0xbd, 0xa4, 0xdc, 0x53, 0x66,
+ 0x30, 0x24, 0xb7, 0x41, 0x81, 0x57, 0x1f, 0x60, 0xbb, 0x26, 0x35, 0xa5, 0x56, 0xa1, 0x5b, 0x89,
+ 0x42, 0x65, 0x63, 0x6a, 0x8c, 0xdc, 0x3d, 0x38, 0xdb, 0x82, 0xfa, 0x1a, 0x5f, 0x1f, 0xd8, 0x72,
+ 0x1f, 0xac, 0x8b, 0x38, 0x8d, 0x29, 0x6a, 0xd9, 0xa6, 0xd4, 0x2a, 0x76, 0x2a, 0x2a, 0x6f, 0x52,
+ 0x4d, 0x9b, 0x54, 0x1f, 0x78, 0xd3, 0xee, 0x56, 0x14, 0x2a, 0x9b, 0x0b, 0x5c, 0x49, 0x0e, 0xd4,
+ 0x8b, 0xd6, 0x65, 0x13, 0xf0, 0x83, 0x04, 0x6a, 0x3d, 0xe2, 0x51, 0xe4, 0xd1, 0x80, 0x26, 0xa1,
+ 0x67, 0x98, 0x0d, 0xf7, 0x11, 0x76, 0x86, 0x4c, 0xbe, 0x0f, 0xf2, 0xc3, 0x64, 0x95, 0xb4, 0x57,
+ 0xec, 0xd4, 0xd5, 0xab, 0xbe, 0xa9, 0x1c, 0xdb, 0xcd, 0x9d, 0x86, 0x4a, 0x46, 0x17, 0x78, 0xf9,
+ 0x39, 0x28, 0x5b, 0x29, 0xeb, 0x2f, 0xf4, 0xba, 0x1d, 0x85, 0x4a, 0x35, 0xee, 0x15, 0x2e, 0x65,
+ 0x41, 0xbd, 0x64, 0x2d, 0x74, 0x07, 0x3f, 0x4a, 0xa0, 0xca, 0x5d, 0x5c, 0x6c, 0x9b, 0xfe, 0x8e,
+ 0x9f, 0xaf, 0xc1, 0xc6, 0x52, 0x41, 0x5a, 0xcb, 0x36, 0x57, 0x5a, 0xc5, 0xce, 0xad, 0xeb, 0xa4,
+ 0xde, 0x64, 0x54, 0x57, 0x89, 0xc5, 0x47, 0xa1, 0xb2, 0x25, 0x6a, 0x2d, 0x71, 0x42, 0xbd, 0xbc,
+ 0xa8, 0x82, 0xc2, 0x4f, 0x59, 0x50, 0xe1, 0x32, 0x0e, 0xc7, 0xb6, 0xc1, 0x50, 0xdf, 0x27, 0x63,
+ 0x42, 0x0d, 0x57, 0xae, 0x80, 0x55, 0x86, 0x99, 0x8b, 0xb8, 0x02, 0x9d, 0x7f, 0xc8, 0x4d, 0x50,
+ 0xb4, 0x11, 0xb5, 0x7c, 0x3c, 0x66, 0x98, 0x78, 0x89, 0x97, 0x05, 0x7d, 0x3e, 0x24, 0xef, 0x83,
+ 0xbf, 0x69, 0x60, 0x1e, 0x23, 0x8b, 0x0d, 0x2e, 0x5d, 0x58, 0x49, 0x5c, 0xd8, 0x89, 0x42, 0xa5,
+ 0xc6, 0x3b, 0xbb, 0x02, 0x81, 0x7a, 0x59, 0xc4, 0x7a, 0xa9, 0x29, 0x4f, 0x40, 0x85, 0x06, 0x26,
+ 0x65, 0x98, 0x05, 0x0c, 0xcd, 0x91, 0xe5, 0x12, 0x32, 0x25, 0x0a, 0x95, 0x7f, 0x53, 0x32, 0x6a,
+ 0x2e, 0xa3, 0xa0, 0x2e, 0x5f, 0x26, 0xcf, 0x28, 0x5f, 0x82, 0x12, 0xf6, 0x30, 0xc3, 0x86, 0x3b,
+ 0x10, 0x17, 0x6a, 0xf5, 0xa7, 0x17, 0xea, 0x3f, 0xe1, 0x69, 0x95, 0x17, 0x5b, 0xcc, 0x87, 0xfa,
+ 0x5f, 0x22, 0xc0, 0xd1, 0x7b, 0xb9, 0x37, 0x27, 0x4a, 0x06, 0x7e, 0x93, 0x40, 0xf9, 0x90, 0x8f,
+ 0xdf, 0x1f, 0x1b, 0x7a, 0x0f, 0xe4, 0xc6, 0xae, 0xe1, 0x25, 0x1e, 0x16, 0x3b, 0x3b, 0x2a, 0x9f,
+ 0x76, 0x35, 0x9d, 0x6e, 0x31, 0xed, 0x6a, 0xdf, 0x35, 0x3c, 0x71, 0xf9, 0x13, 0xbc, 0x7c, 0x0c,
+ 0xaa, 0x02, 0x63, 0x0f, 0x16, 0x86, 0x35, 0xf7, 0x83, 0x01, 0x68, 0x46, 0xa1, 0xb2, 0xc3, 0x85,
+ 0x5e, 0x9b, 0x0c, 0xf5, 0xcd, 0x34, 0x3e, 0xf7, 0x84, 0xec, 0xad, 0xc7, 0xaa, 0xdf, 0x9d, 0x28,
+ 0x99, 0xaf, 0x27, 0x8a, 0x14, 0x3f, 0x35, 0x79, 0x31, 0xb9, 0x3d, 0x50, 0xf6, 0xd1, 0x04, 0x53,
+ 0x4c, 0xbc, 0x81, 0x17, 0x8c, 0x4c, 0xe4, 0x27, 0xf2, 0x73, 0xdd, 0x7a, 0x14, 0x2a, 0xff, 0xf0,
+ 0x42, 0x4b, 0x00, 0xa8, 0x97, 0xd2, 0xc8, 0xa3, 0x24, 0xb0, 0x40, 0x22, 0x8e, 0x2d, 0x7b, 0x23,
+ 0x49, 0x7a, 0x2e, 0x33, 0x12, 0x71, 0x30, 0x6b, 0x69, 0x8b, 0xf0, 0x21, 0xc8, 0xf7, 0x0d, 0xdf,
+ 0x18, 0xd1, 0x98, 0xd8, 0x70, 0x5d, 0xf2, 0x6a, 0x26, 0x92, 0xd6, 0xa4, 0xe6, 0x4a, 0xab, 0x30,
+ 0x4f, 0xbc, 0x04, 0x80, 0x7a, 0x49, 0x44, 0xb8, 0x7e, 0xda, 0x7d, 0x7c, 0x7a, 0xde, 0x90, 0xce,
+ 0xce, 0x1b, 0xd2, 0x97, 0xf3, 0x86, 0xf4, 0xf6, 0xa2, 0x91, 0x39, 0xbb, 0x68, 0x64, 0x3e, 0x5f,
+ 0x34, 0x32, 0x2f, 0xee, 0x3a, 0x98, 0x0d, 0x03, 0x53, 0xb5, 0xc8, 0x48, 0x13, 0x6f, 0x34, 0x36,
+ 0xad, 0x5d, 0x87, 0x68, 0x23, 0x62, 0x07, 0x2e, 0xa2, 0xfc, 0xdf, 0x70, 0xbb, 0xb3, 0x2b, 0x7e,
+ 0x0f, 0x6c, 0x3a, 0x46, 0xd4, 0xcc, 0x27, 0x27, 0x72, 0xe7, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xca, 0x6e, 0xea, 0xc6, 0x3e, 0x06, 0x00, 0x00,
}
func (this *UpgradeProposal) Equal(that interface{}) bool {
diff --git a/modules/core/02-client/types/codec.go b/modules/core/02-client/types/codec.go
index 41afe9c7..dfc05c94 100644
--- a/modules/core/02-client/types/codec.go
+++ b/modules/core/02-client/types/codec.go
@@ -14,24 +14,24 @@ import (
// RegisterInterfaces registers the client interfaces to protobuf Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibcgo.core.client.v1.ClientState",
+ "ibc.core.client.v1.ClientState",
(*exported.ClientState)(nil),
)
registry.RegisterInterface(
- "ibcgo.core.client.v1.ConsensusState",
+ "ibc.core.client.v1.ConsensusState",
(*exported.ConsensusState)(nil),
)
registry.RegisterInterface(
- "ibcgo.core.client.v1.Header",
+ "ibc.core.client.v1.Header",
(*exported.Header)(nil),
)
registry.RegisterInterface(
- "ibcgo.core.client.v1.Height",
+ "ibc.core.client.v1.Height",
(*exported.Height)(nil),
&Height{},
)
registry.RegisterInterface(
- "ibcgo.core.client.v1.Misbehaviour",
+ "ibc.core.client.v1.Misbehaviour",
(*exported.Misbehaviour)(nil),
)
registry.RegisterImplementations(
diff --git a/modules/core/02-client/types/genesis.pb.go b/modules/core/02-client/types/genesis.pb.go
index 71b9b2b3..c428a524 100644
--- a/modules/core/02-client/types/genesis.pb.go
+++ b/modules/core/02-client/types/genesis.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/client/v1/genesis.proto
+// source: ibc/core/client/v1/genesis.proto
package types
@@ -42,7 +42,7 @@ func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_a1110e97fc5e4abf, []int{0}
+ return fileDescriptor_bcd0c0f1f2e6a91a, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -126,7 +126,7 @@ func (m *GenesisMetadata) Reset() { *m = GenesisMetadata{} }
func (m *GenesisMetadata) String() string { return proto.CompactTextString(m) }
func (*GenesisMetadata) ProtoMessage() {}
func (*GenesisMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_a1110e97fc5e4abf, []int{1}
+ return fileDescriptor_bcd0c0f1f2e6a91a, []int{1}
}
func (m *GenesisMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -166,7 +166,7 @@ func (m *IdentifiedGenesisMetadata) Reset() { *m = IdentifiedGenesisMeta
func (m *IdentifiedGenesisMetadata) String() string { return proto.CompactTextString(m) }
func (*IdentifiedGenesisMetadata) ProtoMessage() {}
func (*IdentifiedGenesisMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_a1110e97fc5e4abf, []int{2}
+ return fileDescriptor_bcd0c0f1f2e6a91a, []int{2}
}
func (m *IdentifiedGenesisMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -210,51 +210,49 @@ func (m *IdentifiedGenesisMetadata) GetClientMetadata() []GenesisMetadata {
}
func init() {
- proto.RegisterType((*GenesisState)(nil), "ibcgo.core.client.v1.GenesisState")
- proto.RegisterType((*GenesisMetadata)(nil), "ibcgo.core.client.v1.GenesisMetadata")
- proto.RegisterType((*IdentifiedGenesisMetadata)(nil), "ibcgo.core.client.v1.IdentifiedGenesisMetadata")
+ proto.RegisterType((*GenesisState)(nil), "ibc.core.client.v1.GenesisState")
+ proto.RegisterType((*GenesisMetadata)(nil), "ibc.core.client.v1.GenesisMetadata")
+ proto.RegisterType((*IdentifiedGenesisMetadata)(nil), "ibc.core.client.v1.IdentifiedGenesisMetadata")
}
-func init() {
- proto.RegisterFile("ibcgo/core/client/v1/genesis.proto", fileDescriptor_a1110e97fc5e4abf)
-}
+func init() { proto.RegisterFile("ibc/core/client/v1/genesis.proto", fileDescriptor_bcd0c0f1f2e6a91a) }
-var fileDescriptor_a1110e97fc5e4abf = []byte{
- // 541 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x4c,
- 0x18, 0xcc, 0x36, 0x69, 0xfe, 0x76, 0x5b, 0xfd, 0x0d, 0xab, 0xa8, 0x98, 0x16, 0xd9, 0xc1, 0x12,
- 0x52, 0x24, 0x54, 0x9b, 0x04, 0x71, 0xc9, 0x05, 0xc9, 0x95, 0x40, 0x95, 0x40, 0x80, 0xb9, 0x71,
- 0xb1, 0x9c, 0xf5, 0x87, 0x6b, 0x61, 0x7b, 0x43, 0x76, 0x13, 0x11, 0xf1, 0x02, 0x1c, 0x39, 0xf0,
- 0x00, 0x9c, 0x79, 0x08, 0xce, 0x3d, 0xf6, 0xc8, 0x29, 0x54, 0xc9, 0x1b, 0xe4, 0x09, 0x90, 0x77,
- 0xd7, 0xb4, 0x0d, 0x06, 0x6e, 0x5f, 0xc6, 0x33, 0xf3, 0x8d, 0xe6, 0xcb, 0x62, 0x3b, 0x19, 0xd2,
- 0x98, 0xb9, 0x94, 0x8d, 0xc1, 0xa5, 0x69, 0x02, 0xb9, 0x70, 0xa7, 0x3d, 0x37, 0x86, 0x1c, 0x78,
- 0xc2, 0x9d, 0xd1, 0x98, 0x09, 0x46, 0xda, 0x92, 0xe3, 0x14, 0x1c, 0x47, 0x71, 0x9c, 0x69, 0xef,
- 0xe0, 0x4e, 0xa5, 0x52, 0x7f, 0x97, 0xc2, 0x83, 0x76, 0xcc, 0x62, 0x26, 0x47, 0xb7, 0x98, 0x14,
- 0x6a, 0x5f, 0x34, 0xf0, 0xee, 0x13, 0xb5, 0xe0, 0x95, 0x08, 0x05, 0x10, 0xc0, 0xff, 0x29, 0x19,
- 0x37, 0x50, 0xa7, 0xde, 0xdd, 0xe9, 0xdf, 0x73, 0xaa, 0x36, 0x3a, 0x27, 0x11, 0xe4, 0x22, 0x79,
- 0x93, 0x40, 0x74, 0x2c, 0x31, 0xa9, 0xf6, 0xcc, 0xb3, 0xb9, 0x55, 0xfb, 0xfa, 0xc3, 0xda, 0xaf,
- 0xfc, 0xcc, 0xfd, 0xd2, 0x9b, 0x7c, 0x46, 0xf8, 0x86, 0x9e, 0x03, 0xca, 0x72, 0x0e, 0x39, 0x9f,
- 0x70, 0x63, 0xe3, 0x6f, 0x1b, 0x95, 0xd1, 0x71, 0x49, 0x56, 0x8e, 0xde, 0xa0, 0xd8, 0xb8, 0x9a,
- 0x5b, 0xc6, 0x2c, 0xcc, 0xd2, 0x81, 0xfd, 0x9b, 0xa7, 0x5d, 0xa4, 0x51, 0x52, 0xbe, 0xa6, 0xf5,
- 0x5b, 0x74, 0x0d, 0x27, 0x1f, 0x70, 0x89, 0x05, 0x19, 0x88, 0x30, 0x0a, 0x45, 0x68, 0xd4, 0x65,
- 0x28, 0xf7, 0x5f, 0x35, 0xe8, 0x16, 0x9f, 0x69, 0x99, 0x67, 0xe9, 0x60, 0x37, 0xaf, 0x07, 0x2b,
- 0x6d, 0x6d, 0x7f, 0x4f, 0x43, 0xa5, 0x82, 0x0c, 0x70, 0x73, 0x14, 0x8e, 0xc3, 0x8c, 0x1b, 0x8d,
- 0x0e, 0xea, 0xee, 0xf4, 0x6f, 0x57, 0xaf, 0x7c, 0x21, 0x39, 0x5e, 0xa3, 0xf0, 0xf7, 0xb5, 0x82,
- 0x3c, 0xc6, 0x2d, 0x3a, 0x86, 0x50, 0x40, 0x90, 0x32, 0x1a, 0xa6, 0xa7, 0x8c, 0x0b, 0x63, 0xb3,
- 0x83, 0xba, 0x5b, 0xde, 0xe1, 0x95, 0x0c, 0x6b, 0x8c, 0x22, 0x83, 0x84, 0x9e, 0x96, 0x08, 0x79,
- 0x89, 0xdb, 0x39, 0xbc, 0x17, 0x81, 0x5a, 0x17, 0x70, 0x78, 0x37, 0x81, 0x9c, 0x82, 0xd1, 0xec,
- 0xa0, 0x6e, 0xc3, 0xb3, 0x56, 0x73, 0xeb, 0x50, 0x79, 0x55, 0xb1, 0x6c, 0x9f, 0x14, 0xb0, 0xbe,
- 0x78, 0x09, 0x3e, 0xc2, 0x7b, 0x6b, 0xdd, 0x90, 0x16, 0xae, 0xbf, 0x85, 0x99, 0x81, 0x3a, 0xa8,
- 0xbb, 0xeb, 0x17, 0x23, 0x69, 0xe3, 0xcd, 0x69, 0x98, 0x4e, 0xc0, 0xd8, 0x90, 0x98, 0xfa, 0x31,
- 0x68, 0x7c, 0xfc, 0x62, 0xd5, 0xec, 0x6f, 0x08, 0xdf, 0xfa, 0x63, 0xcf, 0xa4, 0x87, 0xb7, 0x75,
- 0x8c, 0x24, 0x92, 0x8e, 0xdb, 0x5e, 0x7b, 0x35, 0xb7, 0x5a, 0x57, 0x6b, 0x0f, 0x92, 0xc8, 0xf6,
- 0xb7, 0xd4, 0x7c, 0x12, 0x91, 0x1c, 0xeb, 0xee, 0x2f, 0x8f, 0xac, 0xfe, 0x79, 0x77, 0xab, 0x1b,
- 0x5f, 0x3f, 0xad, 0xa9, 0x4f, 0xbb, 0x7f, 0x6d, 0xc7, 0xe5, 0x65, 0xff, 0x57, 0xc8, 0x2f, 0xfe,
- 0xf3, 0xb3, 0x85, 0x89, 0xce, 0x17, 0x26, 0xba, 0x58, 0x98, 0xe8, 0xd3, 0xd2, 0xac, 0x9d, 0x2f,
- 0xcd, 0xda, 0xf7, 0xa5, 0x59, 0x7b, 0xfd, 0x30, 0x4e, 0xc4, 0xe9, 0x64, 0xe8, 0x50, 0x96, 0xb9,
- 0x94, 0xf1, 0x8c, 0x71, 0x37, 0x19, 0xd2, 0xa3, 0x98, 0xb9, 0x19, 0x8b, 0x26, 0x29, 0x70, 0xf5,
- 0xa4, 0xef, 0xf7, 0x8f, 0xf4, 0xab, 0x16, 0xb3, 0x11, 0xf0, 0x61, 0x53, 0x3e, 0xde, 0x07, 0x3f,
- 0x03, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x5b, 0x1b, 0x7c, 0x31, 0x04, 0x00, 0x00,
+var fileDescriptor_bcd0c0f1f2e6a91a = []byte{
+ // 537 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x41, 0x6e, 0xd3, 0x40,
+ 0x14, 0xcd, 0x34, 0x69, 0x68, 0xa7, 0x15, 0x0d, 0xa3, 0xa8, 0x98, 0x54, 0xb2, 0x2d, 0xb3, 0x09,
+ 0x8b, 0xd8, 0x24, 0x08, 0x09, 0x65, 0x83, 0xe4, 0x4a, 0xa0, 0x4a, 0x20, 0xc0, 0xec, 0xd8, 0x58,
+ 0x93, 0xf1, 0x90, 0x8e, 0xb0, 0x3d, 0x21, 0x33, 0x89, 0xc8, 0x0d, 0x58, 0x22, 0x4e, 0xc0, 0x9a,
+ 0x33, 0x70, 0x80, 0x2e, 0xbb, 0xec, 0x2a, 0xa0, 0xe4, 0x06, 0x39, 0x01, 0xf2, 0xcc, 0x98, 0xb6,
+ 0x69, 0xca, 0xee, 0xe7, 0xf9, 0xbd, 0xf7, 0x9f, 0xde, 0xcf, 0x40, 0x97, 0x0d, 0x48, 0x40, 0xf8,
+ 0x98, 0x06, 0x24, 0x65, 0x34, 0x97, 0xc1, 0xb4, 0x1b, 0x0c, 0x69, 0x4e, 0x05, 0x13, 0xfe, 0x68,
+ 0xcc, 0x25, 0x47, 0x88, 0x0d, 0x88, 0x5f, 0x30, 0x7c, 0xcd, 0xf0, 0xa7, 0xdd, 0x96, 0xb3, 0x41,
+ 0x65, 0xbe, 0x2a, 0x51, 0xab, 0x39, 0xe4, 0x43, 0xae, 0xc6, 0xa0, 0x98, 0x34, 0xea, 0x5d, 0xd4,
+ 0xe0, 0xfe, 0x4b, 0x6d, 0xfe, 0x5e, 0x62, 0x49, 0x11, 0x81, 0x77, 0xb4, 0x4c, 0x58, 0xc0, 0xad,
+ 0xb6, 0xf7, 0x7a, 0x8f, 0xfc, 0x9b, 0xdb, 0xfc, 0x93, 0x84, 0xe6, 0x92, 0x7d, 0x64, 0x34, 0x39,
+ 0x56, 0x98, 0xd2, 0x86, 0xf6, 0xd9, 0xdc, 0xa9, 0xfc, 0xfc, 0xed, 0x1c, 0x6e, 0xfc, 0x2c, 0xa2,
+ 0xd2, 0x19, 0x7d, 0x07, 0xf0, 0x9e, 0x99, 0x63, 0xc2, 0x73, 0x41, 0x73, 0x31, 0x11, 0xd6, 0xd6,
+ 0xed, 0xfb, 0xb4, 0xcd, 0x71, 0x49, 0xd5, 0x7e, 0x61, 0xbf, 0xd8, 0xb7, 0x9a, 0x3b, 0xd6, 0x0c,
+ 0x67, 0x69, 0xdf, 0xbb, 0xe1, 0xe8, 0x15, 0x59, 0xb4, 0x54, 0xac, 0x69, 0xa3, 0x06, 0x59, 0xc3,
+ 0xd1, 0x0c, 0x96, 0x58, 0x9c, 0x51, 0x89, 0x13, 0x2c, 0xb1, 0x55, 0x55, 0x91, 0x3a, 0xff, 0xaf,
+ 0xc0, 0xf4, 0xf7, 0xda, 0x88, 0x42, 0xc7, 0xc4, 0xba, 0x7f, 0x3d, 0x56, 0x69, 0xea, 0x45, 0x07,
+ 0x06, 0x2a, 0x15, 0xe8, 0x19, 0xac, 0x8f, 0xf0, 0x18, 0x67, 0xc2, 0xaa, 0xb9, 0xa0, 0xbd, 0xd7,
+ 0x6b, 0x6d, 0x5a, 0xf8, 0x56, 0x31, 0xc2, 0x5a, 0xe1, 0x1e, 0x19, 0x3e, 0x7a, 0x01, 0x1b, 0x64,
+ 0x4c, 0xb1, 0xa4, 0x71, 0xca, 0x09, 0x4e, 0x4f, 0xb9, 0x90, 0xd6, 0xb6, 0x0b, 0xda, 0x3b, 0xe1,
+ 0xd1, 0x95, 0x04, 0x6b, 0x8c, 0x22, 0x81, 0x82, 0x5e, 0x95, 0x08, 0x7a, 0x07, 0x9b, 0x39, 0xfd,
+ 0x22, 0x63, 0xbd, 0x2e, 0x16, 0xf4, 0xf3, 0x84, 0xe6, 0x84, 0x5a, 0x75, 0x17, 0xb4, 0x6b, 0xa1,
+ 0xb3, 0x9a, 0x3b, 0x47, 0xda, 0x6b, 0x13, 0xcb, 0x8b, 0x50, 0x01, 0x9b, 0x5b, 0x97, 0xe0, 0x73,
+ 0x78, 0xb0, 0xd6, 0x0c, 0x6a, 0xc0, 0xea, 0x27, 0x3a, 0xb3, 0x80, 0x0b, 0xda, 0xfb, 0x51, 0x31,
+ 0xa2, 0x26, 0xdc, 0x9e, 0xe2, 0x74, 0x42, 0xad, 0x2d, 0x85, 0xe9, 0x1f, 0xfd, 0xda, 0xd7, 0x1f,
+ 0x4e, 0xc5, 0xfb, 0x05, 0xe0, 0x83, 0x5b, 0x5b, 0x46, 0x5d, 0xb8, 0x6b, 0x62, 0xb0, 0x44, 0x39,
+ 0xee, 0x86, 0xcd, 0xd5, 0xdc, 0x69, 0x5c, 0x2d, 0x3d, 0x66, 0x89, 0x17, 0xed, 0xe8, 0xf9, 0x24,
+ 0x41, 0x29, 0x34, 0xcd, 0x5f, 0x1e, 0x58, 0xff, 0xe7, 0x1e, 0x6e, 0xea, 0x7b, 0xfd, 0xac, 0xb6,
+ 0x39, 0xeb, 0xe1, 0xb5, 0x0d, 0x97, 0x57, 0xbd, 0xab, 0x91, 0x7f, 0xfc, 0x37, 0x67, 0x0b, 0x1b,
+ 0x9c, 0x2f, 0x6c, 0xf0, 0x67, 0x61, 0x83, 0x6f, 0x4b, 0xbb, 0x72, 0xbe, 0xb4, 0x2b, 0x17, 0x4b,
+ 0xbb, 0xf2, 0xe1, 0xe9, 0x90, 0xc9, 0xd3, 0xc9, 0xc0, 0x27, 0x3c, 0x0b, 0x08, 0x17, 0x19, 0x17,
+ 0x01, 0x1b, 0x90, 0xce, 0x90, 0x07, 0x19, 0x4f, 0x26, 0x29, 0x15, 0xfa, 0x21, 0x3f, 0xee, 0x75,
+ 0xcc, 0x5b, 0x96, 0xb3, 0x11, 0x15, 0x83, 0xba, 0x7a, 0xb2, 0x4f, 0xfe, 0x06, 0x00, 0x00, 0xff,
+ 0xff, 0xf5, 0x0e, 0x89, 0x7e, 0x21, 0x04, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/02-client/types/query.pb.go b/modules/core/02-client/types/query.pb.go
index 9709353c..b89b1c2e 100644
--- a/modules/core/02-client/types/query.pb.go
+++ b/modules/core/02-client/types/query.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/client/v1/query.proto
+// source: ibc/core/client/v1/query.proto
package types
@@ -42,7 +42,7 @@ func (m *QueryClientStateRequest) Reset() { *m = QueryClientStateRequest
func (m *QueryClientStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryClientStateRequest) ProtoMessage() {}
func (*QueryClientStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{0}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{0}
}
func (m *QueryClientStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -94,7 +94,7 @@ func (m *QueryClientStateResponse) Reset() { *m = QueryClientStateRespon
func (m *QueryClientStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryClientStateResponse) ProtoMessage() {}
func (*QueryClientStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{1}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{1}
}
func (m *QueryClientStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -155,7 +155,7 @@ func (m *QueryClientStatesRequest) Reset() { *m = QueryClientStatesReque
func (m *QueryClientStatesRequest) String() string { return proto.CompactTextString(m) }
func (*QueryClientStatesRequest) ProtoMessage() {}
func (*QueryClientStatesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{2}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{2}
}
func (m *QueryClientStatesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -204,7 +204,7 @@ func (m *QueryClientStatesResponse) Reset() { *m = QueryClientStatesResp
func (m *QueryClientStatesResponse) String() string { return proto.CompactTextString(m) }
func (*QueryClientStatesResponse) ProtoMessage() {}
func (*QueryClientStatesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{3}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{3}
}
func (m *QueryClientStatesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -266,7 +266,7 @@ func (m *QueryConsensusStateRequest) Reset() { *m = QueryConsensusStateR
func (m *QueryConsensusStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConsensusStateRequest) ProtoMessage() {}
func (*QueryConsensusStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{4}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{4}
}
func (m *QueryConsensusStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -338,7 +338,7 @@ func (m *QueryConsensusStateResponse) Reset() { *m = QueryConsensusState
func (m *QueryConsensusStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConsensusStateResponse) ProtoMessage() {}
func (*QueryConsensusStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{5}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{5}
}
func (m *QueryConsensusStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -401,7 +401,7 @@ func (m *QueryConsensusStatesRequest) Reset() { *m = QueryConsensusState
func (m *QueryConsensusStatesRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConsensusStatesRequest) ProtoMessage() {}
func (*QueryConsensusStatesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{6}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{6}
}
func (m *QueryConsensusStatesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -457,7 +457,7 @@ func (m *QueryConsensusStatesResponse) Reset() { *m = QueryConsensusStat
func (m *QueryConsensusStatesResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConsensusStatesResponse) ProtoMessage() {}
func (*QueryConsensusStatesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{7}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{7}
}
func (m *QueryConsensusStatesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -509,7 +509,7 @@ func (m *QueryClientParamsRequest) Reset() { *m = QueryClientParamsReque
func (m *QueryClientParamsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryClientParamsRequest) ProtoMessage() {}
func (*QueryClientParamsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{8}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{8}
}
func (m *QueryClientParamsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -549,7 +549,7 @@ func (m *QueryClientParamsResponse) Reset() { *m = QueryClientParamsResp
func (m *QueryClientParamsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryClientParamsResponse) ProtoMessage() {}
func (*QueryClientParamsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{9}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{9}
}
func (m *QueryClientParamsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -599,7 +599,7 @@ func (m *QueryUpgradedClientStateRequest) Reset() { *m = QueryUpgradedCl
func (m *QueryUpgradedClientStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryUpgradedClientStateRequest) ProtoMessage() {}
func (*QueryUpgradedClientStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{10}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{10}
}
func (m *QueryUpgradedClientStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -653,7 +653,7 @@ func (m *QueryUpgradedClientStateResponse) Reset() { *m = QueryUpgradedC
func (m *QueryUpgradedClientStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryUpgradedClientStateResponse) ProtoMessage() {}
func (*QueryUpgradedClientStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_833c7bc6da1addd1, []int{11}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{11}
}
func (m *QueryUpgradedClientStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -690,81 +690,81 @@ func (m *QueryUpgradedClientStateResponse) GetUpgradedClientState() *types.Any {
}
func init() {
- proto.RegisterType((*QueryClientStateRequest)(nil), "ibcgo.core.client.v1.QueryClientStateRequest")
- proto.RegisterType((*QueryClientStateResponse)(nil), "ibcgo.core.client.v1.QueryClientStateResponse")
- proto.RegisterType((*QueryClientStatesRequest)(nil), "ibcgo.core.client.v1.QueryClientStatesRequest")
- proto.RegisterType((*QueryClientStatesResponse)(nil), "ibcgo.core.client.v1.QueryClientStatesResponse")
- proto.RegisterType((*QueryConsensusStateRequest)(nil), "ibcgo.core.client.v1.QueryConsensusStateRequest")
- proto.RegisterType((*QueryConsensusStateResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStateResponse")
- proto.RegisterType((*QueryConsensusStatesRequest)(nil), "ibcgo.core.client.v1.QueryConsensusStatesRequest")
- proto.RegisterType((*QueryConsensusStatesResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStatesResponse")
- proto.RegisterType((*QueryClientParamsRequest)(nil), "ibcgo.core.client.v1.QueryClientParamsRequest")
- proto.RegisterType((*QueryClientParamsResponse)(nil), "ibcgo.core.client.v1.QueryClientParamsResponse")
- proto.RegisterType((*QueryUpgradedClientStateRequest)(nil), "ibcgo.core.client.v1.QueryUpgradedClientStateRequest")
- proto.RegisterType((*QueryUpgradedClientStateResponse)(nil), "ibcgo.core.client.v1.QueryUpgradedClientStateResponse")
-}
-
-func init() { proto.RegisterFile("ibcgo/core/client/v1/query.proto", fileDescriptor_833c7bc6da1addd1) }
-
-var fileDescriptor_833c7bc6da1addd1 = []byte{
- // 911 bytes of a gzipped FileDescriptorProto
+ proto.RegisterType((*QueryClientStateRequest)(nil), "ibc.core.client.v1.QueryClientStateRequest")
+ proto.RegisterType((*QueryClientStateResponse)(nil), "ibc.core.client.v1.QueryClientStateResponse")
+ proto.RegisterType((*QueryClientStatesRequest)(nil), "ibc.core.client.v1.QueryClientStatesRequest")
+ proto.RegisterType((*QueryClientStatesResponse)(nil), "ibc.core.client.v1.QueryClientStatesResponse")
+ proto.RegisterType((*QueryConsensusStateRequest)(nil), "ibc.core.client.v1.QueryConsensusStateRequest")
+ proto.RegisterType((*QueryConsensusStateResponse)(nil), "ibc.core.client.v1.QueryConsensusStateResponse")
+ proto.RegisterType((*QueryConsensusStatesRequest)(nil), "ibc.core.client.v1.QueryConsensusStatesRequest")
+ proto.RegisterType((*QueryConsensusStatesResponse)(nil), "ibc.core.client.v1.QueryConsensusStatesResponse")
+ proto.RegisterType((*QueryClientParamsRequest)(nil), "ibc.core.client.v1.QueryClientParamsRequest")
+ proto.RegisterType((*QueryClientParamsResponse)(nil), "ibc.core.client.v1.QueryClientParamsResponse")
+ proto.RegisterType((*QueryUpgradedClientStateRequest)(nil), "ibc.core.client.v1.QueryUpgradedClientStateRequest")
+ proto.RegisterType((*QueryUpgradedClientStateResponse)(nil), "ibc.core.client.v1.QueryUpgradedClientStateResponse")
+}
+
+func init() { proto.RegisterFile("ibc/core/client/v1/query.proto", fileDescriptor_dc42cdfd1d52d76e) }
+
+var fileDescriptor_dc42cdfd1d52d76e = []byte{
+ // 909 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x8f, 0xdb, 0x44,
- 0x14, 0xde, 0xd9, 0xdd, 0x56, 0xdb, 0x49, 0xba, 0x41, 0xd3, 0x94, 0xa6, 0xee, 0x92, 0xa4, 0xae,
- 0x44, 0x03, 0x28, 0x33, 0x9b, 0xc0, 0x2e, 0xbd, 0x80, 0xc4, 0x22, 0x4a, 0x7b, 0x81, 0xd6, 0x08,
- 0x21, 0x71, 0x89, 0x6c, 0x67, 0xe2, 0x58, 0x4a, 0x3c, 0xae, 0xc7, 0x8e, 0xb4, 0xaa, 0xf6, 0xd2,
- 0x3f, 0x00, 0x21, 0x71, 0xe3, 0xca, 0x8d, 0x53, 0xc5, 0x8d, 0x03, 0x37, 0x0e, 0x7b, 0xac, 0x84,
- 0x84, 0x38, 0x01, 0xda, 0xf0, 0x87, 0x20, 0xcf, 0x8c, 0x53, 0x3b, 0x3b, 0x69, 0x1d, 0x44, 0x6f,
- 0xde, 0xf7, 0xf3, 0xfb, 0xbe, 0xf7, 0xe6, 0x65, 0x61, 0xdb, 0x77, 0x5c, 0x8f, 0x11, 0x97, 0x45,
- 0x94, 0xb8, 0x13, 0x9f, 0x06, 0x31, 0x99, 0xf5, 0xc8, 0xa3, 0x84, 0x46, 0xc7, 0x38, 0x8c, 0x58,
- 0xcc, 0x50, 0x5d, 0x44, 0xe0, 0x34, 0x02, 0xcb, 0x08, 0x3c, 0xeb, 0x19, 0x6f, 0xbb, 0x8c, 0x4f,
- 0x19, 0x27, 0x8e, 0xcd, 0xa9, 0x0c, 0x27, 0xb3, 0x9e, 0x43, 0x63, 0xbb, 0x47, 0x42, 0xdb, 0xf3,
- 0x03, 0x3b, 0xf6, 0x59, 0x20, 0x2b, 0x18, 0x37, 0xb5, 0x3d, 0x54, 0x2d, 0x19, 0x72, 0xdd, 0x63,
- 0xcc, 0x9b, 0x50, 0x22, 0xfe, 0x72, 0x92, 0x11, 0xb1, 0x03, 0xd5, 0xdf, 0xd8, 0x53, 0x2e, 0x3b,
- 0xf4, 0x89, 0x1d, 0x04, 0x2c, 0x16, 0xa5, 0xb9, 0xf2, 0xd6, 0x3d, 0xe6, 0x31, 0xf1, 0x49, 0xd2,
- 0x2f, 0x69, 0x35, 0x0f, 0xe1, 0xb5, 0x87, 0x29, 0xa6, 0x8f, 0x45, 0x8f, 0x2f, 0x62, 0x3b, 0xa6,
- 0x16, 0x7d, 0x94, 0x50, 0x1e, 0xa3, 0x1b, 0xf0, 0x92, 0xec, 0x3c, 0xf0, 0x87, 0x0d, 0xd0, 0x06,
- 0x9d, 0x4b, 0xd6, 0x8e, 0x34, 0xdc, 0x1f, 0x9a, 0x4f, 0x01, 0x6c, 0x9c, 0x4f, 0xe4, 0x21, 0x0b,
- 0x38, 0x45, 0xef, 0xc3, 0xaa, 0xca, 0xe4, 0xa9, 0x5d, 0x24, 0x57, 0xfa, 0x75, 0x2c, 0xf1, 0xe1,
- 0x0c, 0x3a, 0xfe, 0x28, 0x38, 0xb6, 0x2a, 0xee, 0xf3, 0x02, 0xa8, 0x0e, 0x2f, 0x84, 0x11, 0x63,
- 0xa3, 0xc6, 0x66, 0x1b, 0x74, 0xaa, 0x96, 0xfc, 0x03, 0x7d, 0x02, 0xab, 0xe2, 0x63, 0x30, 0xa6,
- 0xbe, 0x37, 0x8e, 0x1b, 0x5b, 0xa2, 0xdc, 0x1e, 0xd6, 0xc9, 0x8d, 0xef, 0x89, 0x98, 0xa3, 0xed,
- 0xd3, 0x3f, 0x5b, 0x1b, 0x56, 0x45, 0xe4, 0x49, 0x93, 0xe9, 0x9c, 0x47, 0xcc, 0x33, 0xae, 0x77,
- 0x21, 0x7c, 0x3e, 0x0c, 0x85, 0xf7, 0x4d, 0x2c, 0x27, 0x87, 0xd3, 0xc9, 0x61, 0x39, 0x68, 0x35,
- 0x39, 0xfc, 0xc0, 0xf6, 0x32, 0x9d, 0xac, 0x5c, 0xa6, 0xf9, 0x3b, 0x80, 0xd7, 0x35, 0x4d, 0x94,
- 0x2e, 0x21, 0xbc, 0x9c, 0xd7, 0x85, 0x37, 0x40, 0x7b, 0xab, 0x53, 0xe9, 0xbf, 0xa3, 0x67, 0x72,
- 0x7f, 0x48, 0x83, 0xd8, 0x1f, 0xf9, 0x74, 0x98, 0x2b, 0x76, 0xd4, 0x4c, 0x89, 0xfd, 0xf8, 0x57,
- 0xeb, 0x75, 0xad, 0x9b, 0x5b, 0xd5, 0x9c, 0x9e, 0x1c, 0x7d, 0x5a, 0xe0, 0xb5, 0x29, 0x78, 0xdd,
- 0x7e, 0x29, 0x2f, 0x09, 0xb7, 0x40, 0xec, 0x29, 0x80, 0x86, 0x24, 0x96, 0xba, 0x02, 0x9e, 0xf0,
- 0xd2, 0xbb, 0x82, 0x6e, 0xc3, 0x5a, 0x44, 0x67, 0x3e, 0xf7, 0x59, 0x30, 0x08, 0x92, 0xa9, 0x43,
- 0x23, 0x81, 0x64, 0xdb, 0xda, 0xcd, 0xcc, 0x9f, 0x09, 0x6b, 0x21, 0x30, 0x37, 0xeb, 0x5c, 0xa0,
- 0x1c, 0x25, 0xba, 0x05, 0x2f, 0x4f, 0x52, 0x7e, 0x71, 0x16, 0xb6, 0xdd, 0x06, 0x9d, 0x1d, 0xab,
- 0x2a, 0x8d, 0x6a, 0xde, 0x3f, 0x03, 0x78, 0x43, 0x0b, 0x59, 0x4d, 0xe3, 0x03, 0x58, 0x73, 0x33,
- 0x4f, 0x89, 0x45, 0xdd, 0x75, 0x0b, 0x65, 0x5e, 0xed, 0xae, 0x3e, 0xd1, 0x63, 0xe7, 0xa5, 0xf4,
- 0xbe, 0xab, 0x19, 0xfa, 0x7f, 0x59, 0xe6, 0x53, 0x00, 0xf7, 0xf4, 0x20, 0x94, 0x82, 0x03, 0xf8,
- 0xda, 0x92, 0x82, 0xd9, 0x4a, 0x63, 0x3d, 0xe1, 0x62, 0xa1, 0xaf, 0xfc, 0x78, 0x5c, 0x90, 0xa0,
- 0x56, 0x94, 0xf8, 0x7f, 0x5c, 0x5f, 0xa3, 0xf0, 0xf6, 0x1f, 0xd8, 0x91, 0x3d, 0xcd, 0xb4, 0x34,
- 0x1f, 0x16, 0x9e, 0x6c, 0xe6, 0x53, 0x14, 0xdf, 0x83, 0x17, 0x43, 0x61, 0x51, 0xbb, 0xb1, 0x62,
- 0x92, 0x2a, 0x4b, 0xc5, 0x9a, 0x03, 0xd8, 0x12, 0x25, 0xbf, 0x0c, 0xbd, 0xc8, 0x1e, 0x16, 0x5e,
- 0x68, 0xa9, 0x09, 0xb6, 0x60, 0x25, 0x9c, 0xd8, 0x8b, 0x47, 0x90, 0x12, 0xdf, 0xb2, 0x60, 0x6a,
- 0x52, 0xfb, 0x31, 0x81, 0xed, 0xd5, 0x0d, 0x14, 0xf4, 0x7b, 0xf0, 0x6a, 0xa2, 0xdc, 0x83, 0xd2,
- 0xe7, 0xf8, 0x4a, 0x72, 0xbe, 0x62, 0xff, 0x97, 0x1d, 0x78, 0x41, 0xb4, 0x43, 0x3f, 0x00, 0x58,
- 0xc9, 0x79, 0x50, 0x57, 0x2f, 0xc7, 0x8a, 0x9f, 0x14, 0x03, 0x97, 0x0d, 0x97, 0x14, 0xcc, 0x83,
- 0x27, 0xbf, 0xfd, 0xf3, 0xdd, 0x26, 0x41, 0x5d, 0xe2, 0x3b, 0xae, 0xfe, 0x67, 0x51, 0xed, 0x1d,
- 0x79, 0xbc, 0x50, 0xf3, 0x04, 0x7d, 0x0f, 0x60, 0x35, 0x7f, 0x14, 0x51, 0xc9, 0xbe, 0xd9, 0x4a,
- 0x18, 0xa4, 0x74, 0xbc, 0x02, 0xfa, 0x96, 0x00, 0x7a, 0x0b, 0xdd, 0x7c, 0x29, 0x50, 0x34, 0x07,
- 0x70, 0xb7, 0xf8, 0x0e, 0xd0, 0xfe, 0x8b, 0xda, 0xe9, 0xee, 0xad, 0xd1, 0x5b, 0x23, 0x43, 0x41,
- 0x9c, 0x08, 0x88, 0x23, 0x34, 0xd4, 0x42, 0x5c, 0x7a, 0xc6, 0x79, 0x39, 0x49, 0x76, 0x7c, 0xc9,
- 0xe3, 0xa5, 0x33, 0x7e, 0x42, 0xe4, 0x82, 0xe6, 0x1c, 0xd2, 0x70, 0x82, 0x7e, 0x02, 0xb0, 0xb6,
- 0x74, 0x36, 0x50, 0x79, 0xd0, 0x8b, 0x41, 0xf4, 0xd7, 0x49, 0x51, 0x44, 0xef, 0x08, 0xa2, 0x7d,
- 0xb4, 0xbf, 0x2e, 0x51, 0xf4, 0xcd, 0x62, 0x6f, 0xe4, 0x7b, 0x2e, 0xb1, 0x37, 0x85, 0x53, 0x52,
- 0x62, 0x6f, 0x8a, 0xe7, 0xc5, 0x7c, 0x43, 0x60, 0xbd, 0x86, 0xae, 0x4a, 0xac, 0x0b, 0x98, 0xf2,
- 0x8e, 0xa0, 0x5f, 0x01, 0xbc, 0xa2, 0x79, 0xe2, 0xe8, 0xe0, 0x05, 0x7d, 0x56, 0xdf, 0x1c, 0xe3,
- 0x70, 0xdd, 0x34, 0x85, 0xf2, 0x43, 0x81, 0xf2, 0x0e, 0x3a, 0xd4, 0x29, 0xaa, 0xbd, 0x31, 0x05,
- 0x5d, 0x8f, 0x3e, 0x3f, 0x3d, 0x6b, 0x82, 0x67, 0x67, 0x4d, 0xf0, 0xf7, 0x59, 0x13, 0x7c, 0x3b,
- 0x6f, 0x6e, 0x3c, 0x9b, 0x37, 0x37, 0xfe, 0x98, 0x37, 0x37, 0xbe, 0x3e, 0xf0, 0xfc, 0x78, 0x9c,
- 0x38, 0xd8, 0x65, 0x53, 0xa2, 0xfe, 0x4f, 0xf6, 0x1d, 0xb7, 0xeb, 0x31, 0x32, 0x65, 0xc3, 0x64,
- 0x42, 0xb9, 0xec, 0xb6, 0xdf, 0xef, 0xaa, 0x86, 0xf1, 0x71, 0x48, 0xb9, 0x73, 0x51, 0xdc, 0xac,
- 0x77, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x42, 0xbb, 0x44, 0xc2, 0x94, 0x0b, 0x00, 0x00,
+ 0x14, 0xce, 0xec, 0x6e, 0xab, 0xed, 0x24, 0xdd, 0xa0, 0xe9, 0x96, 0xa6, 0x6e, 0x71, 0x82, 0x2b,
+ 0xd1, 0x14, 0x1a, 0xcf, 0x26, 0xa5, 0xa5, 0x17, 0x90, 0xd8, 0x4a, 0xa5, 0xbd, 0xd0, 0x62, 0x84,
+ 0x90, 0x90, 0x50, 0x64, 0x3b, 0x13, 0xc7, 0x52, 0xe2, 0x71, 0x3d, 0x76, 0xa4, 0x55, 0xb5, 0x97,
+ 0x1e, 0x39, 0x21, 0x21, 0x71, 0xe5, 0xc4, 0x85, 0xc3, 0x8a, 0x1b, 0x57, 0x4e, 0x68, 0x8f, 0x2b,
+ 0xc1, 0x81, 0x13, 0x8b, 0x76, 0xf9, 0x43, 0x90, 0x67, 0xc6, 0x59, 0x3b, 0x99, 0x28, 0x5e, 0x44,
+ 0x6f, 0xde, 0xf7, 0xf3, 0x7b, 0xdf, 0xfb, 0xe6, 0x65, 0xa1, 0xee, 0x3b, 0x2e, 0x76, 0x69, 0x44,
+ 0xb0, 0x3b, 0xf6, 0x49, 0x10, 0xe3, 0x69, 0x17, 0xbf, 0x48, 0x48, 0xb4, 0x67, 0x86, 0x11, 0x8d,
+ 0x29, 0x42, 0xbe, 0xe3, 0x9a, 0xa9, 0xdf, 0x14, 0x7e, 0x73, 0xda, 0xd5, 0xde, 0x75, 0x29, 0x9b,
+ 0x50, 0x86, 0x1d, 0x9b, 0x11, 0x11, 0x8c, 0xa7, 0x5d, 0x87, 0xc4, 0x76, 0x17, 0x87, 0xb6, 0xe7,
+ 0x07, 0x76, 0xec, 0xd3, 0x40, 0xe4, 0x6b, 0x4d, 0x45, 0x7d, 0x59, 0x49, 0x04, 0x5c, 0xf7, 0x28,
+ 0xf5, 0xc6, 0x04, 0xf3, 0xbf, 0x9c, 0x64, 0x88, 0xed, 0x40, 0xf6, 0xd6, 0x6e, 0x4a, 0x97, 0x1d,
+ 0xfa, 0xd8, 0x0e, 0x02, 0x1a, 0xf3, 0xc2, 0x4c, 0x7a, 0xb7, 0x3d, 0xea, 0x51, 0xfe, 0x89, 0xd3,
+ 0x2f, 0x61, 0x35, 0x1e, 0xc0, 0x6b, 0x9f, 0xa5, 0x88, 0x1e, 0xf1, 0x1e, 0x9f, 0xc7, 0x76, 0x4c,
+ 0x2c, 0xf2, 0x22, 0x21, 0x2c, 0x46, 0x37, 0xe0, 0x25, 0xd1, 0xb9, 0xef, 0x0f, 0x1a, 0xa0, 0x05,
+ 0xda, 0x97, 0xac, 0x4d, 0x61, 0x78, 0x3a, 0x30, 0x0e, 0x00, 0x6c, 0x2c, 0x26, 0xb2, 0x90, 0x06,
+ 0x8c, 0xa0, 0x0f, 0x60, 0x4d, 0x66, 0xb2, 0xd4, 0xce, 0x93, 0xab, 0xbd, 0x6d, 0x53, 0xe0, 0x33,
+ 0x33, 0xe8, 0xe6, 0xc7, 0xc1, 0x9e, 0x55, 0x75, 0xcf, 0x0a, 0xa0, 0x6d, 0x78, 0x21, 0x8c, 0x28,
+ 0x1d, 0x36, 0xd6, 0x5a, 0xa0, 0x5d, 0xb3, 0xc4, 0x1f, 0xe8, 0x11, 0xac, 0xf1, 0x8f, 0xfe, 0x88,
+ 0xf8, 0xde, 0x28, 0x6e, 0xac, 0xf3, 0x72, 0x9a, 0xb9, 0x48, 0xb5, 0xf9, 0x84, 0x47, 0xec, 0x6e,
+ 0x1c, 0xfe, 0xd5, 0xac, 0x58, 0x55, 0x9e, 0x25, 0x4c, 0x86, 0xb3, 0x88, 0x97, 0x65, 0x93, 0x3e,
+ 0x86, 0xf0, 0x6c, 0x11, 0x12, 0xed, 0x3b, 0xa6, 0xd8, 0x9a, 0x99, 0x6e, 0xcd, 0x14, 0x2b, 0x96,
+ 0x5b, 0x33, 0x9f, 0xdb, 0x5e, 0xc6, 0x92, 0x95, 0xcb, 0x34, 0xfe, 0x00, 0xf0, 0xba, 0xa2, 0x89,
+ 0x64, 0x25, 0x80, 0x97, 0xf3, 0xac, 0xb0, 0x06, 0x68, 0xad, 0xb7, 0xab, 0xbd, 0x3b, 0xaa, 0x39,
+ 0x9e, 0x0e, 0x48, 0x10, 0xfb, 0x43, 0x9f, 0x0c, 0x72, 0xa5, 0x76, 0xf5, 0x74, 0xac, 0x9f, 0x8e,
+ 0x9b, 0x6f, 0x2a, 0xdd, 0xcc, 0xaa, 0xe5, 0xb8, 0x64, 0xe8, 0x93, 0xc2, 0x54, 0x6b, 0x7c, 0xaa,
+ 0xdb, 0x2b, 0xa7, 0x12, 0x60, 0x0b, 0x63, 0xfd, 0x0c, 0xa0, 0x26, 0xc6, 0x4a, 0x5d, 0x01, 0x4b,
+ 0x58, 0x69, 0x9d, 0xa0, 0xdb, 0xb0, 0x1e, 0x91, 0xa9, 0xcf, 0x7c, 0x1a, 0xf4, 0x83, 0x64, 0xe2,
+ 0x90, 0x88, 0x23, 0xd9, 0xb0, 0xb6, 0x32, 0xf3, 0xa7, 0xdc, 0x5a, 0x08, 0xcc, 0xed, 0x39, 0x17,
+ 0x28, 0x16, 0x89, 0x6e, 0xc1, 0xcb, 0xe3, 0x74, 0xbe, 0x38, 0x0b, 0xdb, 0x68, 0x81, 0xf6, 0xa6,
+ 0x55, 0x13, 0x46, 0xb9, 0xed, 0x5f, 0x00, 0xbc, 0xa1, 0x84, 0x2c, 0x77, 0xf1, 0x21, 0xac, 0xbb,
+ 0x99, 0xa7, 0x84, 0x48, 0xb7, 0xdc, 0x42, 0x99, 0xd7, 0xa9, 0xd3, 0x57, 0x6a, 0xe4, 0xac, 0x14,
+ 0xdb, 0x8f, 0x15, 0x2b, 0xff, 0x2f, 0x42, 0xfe, 0x0d, 0xc0, 0x9b, 0x6a, 0x10, 0x92, 0xbf, 0xaf,
+ 0xe1, 0x1b, 0x73, 0xfc, 0x65, 0x72, 0xbe, 0xab, 0x1a, 0xb7, 0x58, 0xe6, 0x4b, 0x3f, 0x1e, 0x15,
+ 0x08, 0xa8, 0x17, 0xe9, 0xfd, 0x1f, 0xa5, 0xab, 0x15, 0x5e, 0xfd, 0x73, 0x3b, 0xb2, 0x27, 0x19,
+ 0x93, 0xc6, 0xb3, 0xc2, 0x63, 0xcd, 0x7c, 0x72, 0xc0, 0x1e, 0xbc, 0x18, 0x72, 0x8b, 0xd4, 0x85,
+ 0x72, 0x8b, 0x32, 0x47, 0x46, 0x1a, 0x7d, 0xd8, 0xe4, 0x05, 0xbf, 0x08, 0xbd, 0xc8, 0x1e, 0x14,
+ 0xde, 0x66, 0xa9, 0xed, 0x35, 0x61, 0x35, 0x1c, 0xdb, 0x33, 0xf9, 0xa7, 0x63, 0xaf, 0x5b, 0x30,
+ 0x35, 0x49, 0x6d, 0x8c, 0x61, 0x6b, 0x79, 0x03, 0x09, 0xfc, 0x09, 0xbc, 0x9a, 0x48, 0x77, 0xbf,
+ 0xf4, 0x11, 0xbe, 0x92, 0x2c, 0x56, 0xec, 0xfd, 0xb8, 0x09, 0x2f, 0xf0, 0x76, 0xe8, 0x07, 0x00,
+ 0xab, 0x39, 0x0f, 0x7a, 0x4f, 0x45, 0xc6, 0x92, 0x9f, 0x11, 0xed, 0x6e, 0xb9, 0x60, 0x01, 0xdf,
+ 0xb8, 0xff, 0xea, 0xf7, 0x7f, 0xbe, 0x5b, 0xc3, 0xa8, 0x83, 0x97, 0xfe, 0x10, 0x4a, 0xbd, 0xe1,
+ 0x97, 0x33, 0x26, 0xf7, 0xd1, 0xf7, 0x00, 0xd6, 0xf2, 0xa7, 0x10, 0x95, 0xea, 0x9a, 0x49, 0x41,
+ 0xeb, 0x94, 0x8c, 0x96, 0x20, 0xef, 0x70, 0x90, 0xb7, 0xd0, 0xdb, 0x2b, 0x41, 0xa2, 0x63, 0x00,
+ 0xb7, 0x8a, 0xea, 0x47, 0xe6, 0xf2, 0x66, 0xaa, 0xfb, 0xaa, 0xe1, 0xd2, 0xf1, 0x12, 0xde, 0x98,
+ 0xc3, 0x1b, 0xa2, 0x81, 0x12, 0xde, 0xdc, 0xb3, 0xcd, 0xd3, 0x88, 0xb3, 0x53, 0x8b, 0x5f, 0xce,
+ 0x1d, 0xed, 0x7d, 0x2c, 0x44, 0x99, 0x73, 0x08, 0xc3, 0x3e, 0x3a, 0x00, 0xb0, 0x3e, 0x77, 0x26,
+ 0x50, 0x59, 0xc8, 0xb3, 0x05, 0xec, 0x94, 0x4f, 0x90, 0x43, 0x3e, 0xe4, 0x43, 0xf6, 0xd0, 0xce,
+ 0x79, 0x87, 0x44, 0xdf, 0xcc, 0xb4, 0x22, 0xde, 0xef, 0x4a, 0xad, 0x14, 0xce, 0xc6, 0x4a, 0xad,
+ 0x14, 0x0f, 0x89, 0xf1, 0x16, 0xc7, 0x79, 0x0d, 0x5d, 0x15, 0x38, 0x67, 0x10, 0xc5, 0xcd, 0x40,
+ 0xbf, 0x02, 0x78, 0x45, 0xf1, 0x9c, 0xd1, 0xbd, 0xa5, 0x5d, 0x96, 0x5f, 0x17, 0xed, 0xfd, 0xf3,
+ 0x25, 0x49, 0x84, 0x1f, 0x71, 0x84, 0x0f, 0xd1, 0x03, 0x15, 0x93, 0xca, 0x5b, 0x52, 0xe0, 0x73,
+ 0xf7, 0xd9, 0xe1, 0x89, 0x0e, 0x8e, 0x4e, 0x74, 0xf0, 0xf7, 0x89, 0x0e, 0xbe, 0x3d, 0xd5, 0x2b,
+ 0x47, 0xa7, 0x7a, 0xe5, 0xcf, 0x53, 0xbd, 0xf2, 0xd5, 0x7d, 0xcf, 0x8f, 0x47, 0x89, 0x63, 0xba,
+ 0x74, 0x82, 0xe5, 0xff, 0xc0, 0xbe, 0xe3, 0x76, 0x3c, 0x8a, 0x27, 0x74, 0x90, 0x8c, 0x09, 0x13,
+ 0xdd, 0x76, 0x7a, 0x1d, 0xd9, 0x30, 0xde, 0x0b, 0x09, 0x73, 0x2e, 0xf2, 0xdb, 0x74, 0xef, 0xdf,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x94, 0x03, 0xe8, 0x6c, 0x0b, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -805,7 +805,7 @@ func NewQueryClient(cc grpc1.ClientConn) QueryClient {
func (c *queryClient) ClientState(ctx context.Context, in *QueryClientStateRequest, opts ...grpc.CallOption) (*QueryClientStateResponse, error) {
out := new(QueryClientStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ClientState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -814,7 +814,7 @@ func (c *queryClient) ClientState(ctx context.Context, in *QueryClientStateReque
func (c *queryClient) ClientStates(ctx context.Context, in *QueryClientStatesRequest, opts ...grpc.CallOption) (*QueryClientStatesResponse, error) {
out := new(QueryClientStatesResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientStates", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ClientStates", in, out, opts...)
if err != nil {
return nil, err
}
@@ -823,7 +823,7 @@ func (c *queryClient) ClientStates(ctx context.Context, in *QueryClientStatesReq
func (c *queryClient) ConsensusState(ctx context.Context, in *QueryConsensusStateRequest, opts ...grpc.CallOption) (*QueryConsensusStateResponse, error) {
out := new(QueryConsensusStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ConsensusState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ConsensusState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -832,7 +832,7 @@ func (c *queryClient) ConsensusState(ctx context.Context, in *QueryConsensusStat
func (c *queryClient) ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error) {
out := new(QueryConsensusStatesResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ConsensusStates", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ConsensusStates", in, out, opts...)
if err != nil {
return nil, err
}
@@ -841,7 +841,7 @@ func (c *queryClient) ConsensusStates(ctx context.Context, in *QueryConsensusSta
func (c *queryClient) ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error) {
out := new(QueryClientParamsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientParams", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ClientParams", in, out, opts...)
if err != nil {
return nil, err
}
@@ -850,7 +850,7 @@ func (c *queryClient) ClientParams(ctx context.Context, in *QueryClientParamsReq
func (c *queryClient) UpgradedClientState(ctx context.Context, in *QueryUpgradedClientStateRequest, opts ...grpc.CallOption) (*QueryUpgradedClientStateResponse, error) {
out := new(QueryUpgradedClientStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/UpgradedClientState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/UpgradedClientState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -912,7 +912,7 @@ func _Query_ClientState_Handler(srv interface{}, ctx context.Context, dec func(i
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Query/ClientState",
+ FullMethod: "/ibc.core.client.v1.Query/ClientState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ClientState(ctx, req.(*QueryClientStateRequest))
@@ -930,7 +930,7 @@ func _Query_ClientStates_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Query/ClientStates",
+ FullMethod: "/ibc.core.client.v1.Query/ClientStates",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ClientStates(ctx, req.(*QueryClientStatesRequest))
@@ -948,7 +948,7 @@ func _Query_ConsensusState_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Query/ConsensusState",
+ FullMethod: "/ibc.core.client.v1.Query/ConsensusState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ConsensusState(ctx, req.(*QueryConsensusStateRequest))
@@ -966,7 +966,7 @@ func _Query_ConsensusStates_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Query/ConsensusStates",
+ FullMethod: "/ibc.core.client.v1.Query/ConsensusStates",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ConsensusStates(ctx, req.(*QueryConsensusStatesRequest))
@@ -984,7 +984,7 @@ func _Query_ClientParams_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Query/ClientParams",
+ FullMethod: "/ibc.core.client.v1.Query/ClientParams",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ClientParams(ctx, req.(*QueryClientParamsRequest))
@@ -1002,7 +1002,7 @@ func _Query_UpgradedClientState_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Query/UpgradedClientState",
+ FullMethod: "/ibc.core.client.v1.Query/UpgradedClientState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).UpgradedClientState(ctx, req.(*QueryUpgradedClientStateRequest))
@@ -1011,7 +1011,7 @@ func _Query_UpgradedClientState_Handler(srv interface{}, ctx context.Context, de
}
var _Query_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.core.client.v1.Query",
+ ServiceName: "ibc.core.client.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -1040,7 +1040,7 @@ var _Query_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/core/client/v1/query.proto",
+ Metadata: "ibc/core/client/v1/query.proto",
}
func (m *QueryClientStateRequest) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/02-client/types/query.pb.gw.go b/modules/core/02-client/types/query.pb.gw.go
index 24c1e508..f29a4fea 100644
--- a/modules/core/02-client/types/query.pb.gw.go
+++ b/modules/core/02-client/types/query.pb.gw.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: ibcgo/core/client/v1/query.proto
+// source: ibc/core/client/v1/query.proto
/*
Package types is a reverse proxy.
diff --git a/modules/core/02-client/types/tx.pb.go b/modules/core/02-client/types/tx.pb.go
index 1067063e..e19d4e63 100644
--- a/modules/core/02-client/types/tx.pb.go
+++ b/modules/core/02-client/types/tx.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/client/v1/tx.proto
+// source: ibc/core/client/v1/tx.proto
package types
@@ -44,7 +44,7 @@ func (m *MsgCreateClient) Reset() { *m = MsgCreateClient{} }
func (m *MsgCreateClient) String() string { return proto.CompactTextString(m) }
func (*MsgCreateClient) ProtoMessage() {}
func (*MsgCreateClient) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{0}
+ return fileDescriptor_cb5dc4651eb49a04, []int{0}
}
func (m *MsgCreateClient) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -81,7 +81,7 @@ func (m *MsgCreateClientResponse) Reset() { *m = MsgCreateClientResponse
func (m *MsgCreateClientResponse) String() string { return proto.CompactTextString(m) }
func (*MsgCreateClientResponse) ProtoMessage() {}
func (*MsgCreateClientResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{1}
+ return fileDescriptor_cb5dc4651eb49a04, []int{1}
}
func (m *MsgCreateClientResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -125,7 +125,7 @@ func (m *MsgUpdateClient) Reset() { *m = MsgUpdateClient{} }
func (m *MsgUpdateClient) String() string { return proto.CompactTextString(m) }
func (*MsgUpdateClient) ProtoMessage() {}
func (*MsgUpdateClient) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{2}
+ return fileDescriptor_cb5dc4651eb49a04, []int{2}
}
func (m *MsgUpdateClient) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -162,7 +162,7 @@ func (m *MsgUpdateClientResponse) Reset() { *m = MsgUpdateClientResponse
func (m *MsgUpdateClientResponse) String() string { return proto.CompactTextString(m) }
func (*MsgUpdateClientResponse) ProtoMessage() {}
func (*MsgUpdateClientResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{3}
+ return fileDescriptor_cb5dc4651eb49a04, []int{3}
}
func (m *MsgUpdateClientResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -213,7 +213,7 @@ func (m *MsgUpgradeClient) Reset() { *m = MsgUpgradeClient{} }
func (m *MsgUpgradeClient) String() string { return proto.CompactTextString(m) }
func (*MsgUpgradeClient) ProtoMessage() {}
func (*MsgUpgradeClient) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{4}
+ return fileDescriptor_cb5dc4651eb49a04, []int{4}
}
func (m *MsgUpgradeClient) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -250,7 +250,7 @@ func (m *MsgUpgradeClientResponse) Reset() { *m = MsgUpgradeClientRespon
func (m *MsgUpgradeClientResponse) String() string { return proto.CompactTextString(m) }
func (*MsgUpgradeClientResponse) ProtoMessage() {}
func (*MsgUpgradeClientResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{5}
+ return fileDescriptor_cb5dc4651eb49a04, []int{5}
}
func (m *MsgUpgradeClientResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -294,7 +294,7 @@ func (m *MsgSubmitMisbehaviour) Reset() { *m = MsgSubmitMisbehaviour{} }
func (m *MsgSubmitMisbehaviour) String() string { return proto.CompactTextString(m) }
func (*MsgSubmitMisbehaviour) ProtoMessage() {}
func (*MsgSubmitMisbehaviour) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{6}
+ return fileDescriptor_cb5dc4651eb49a04, []int{6}
}
func (m *MsgSubmitMisbehaviour) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -332,7 +332,7 @@ func (m *MsgSubmitMisbehaviourResponse) Reset() { *m = MsgSubmitMisbehav
func (m *MsgSubmitMisbehaviourResponse) String() string { return proto.CompactTextString(m) }
func (*MsgSubmitMisbehaviourResponse) ProtoMessage() {}
func (*MsgSubmitMisbehaviourResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3848774a44f81317, []int{7}
+ return fileDescriptor_cb5dc4651eb49a04, []int{7}
}
func (m *MsgSubmitMisbehaviourResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -362,58 +362,58 @@ func (m *MsgSubmitMisbehaviourResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_MsgSubmitMisbehaviourResponse proto.InternalMessageInfo
func init() {
- proto.RegisterType((*MsgCreateClient)(nil), "ibcgo.core.client.v1.MsgCreateClient")
- proto.RegisterType((*MsgCreateClientResponse)(nil), "ibcgo.core.client.v1.MsgCreateClientResponse")
- proto.RegisterType((*MsgUpdateClient)(nil), "ibcgo.core.client.v1.MsgUpdateClient")
- proto.RegisterType((*MsgUpdateClientResponse)(nil), "ibcgo.core.client.v1.MsgUpdateClientResponse")
- proto.RegisterType((*MsgUpgradeClient)(nil), "ibcgo.core.client.v1.MsgUpgradeClient")
- proto.RegisterType((*MsgUpgradeClientResponse)(nil), "ibcgo.core.client.v1.MsgUpgradeClientResponse")
- proto.RegisterType((*MsgSubmitMisbehaviour)(nil), "ibcgo.core.client.v1.MsgSubmitMisbehaviour")
- proto.RegisterType((*MsgSubmitMisbehaviourResponse)(nil), "ibcgo.core.client.v1.MsgSubmitMisbehaviourResponse")
-}
-
-func init() { proto.RegisterFile("ibcgo/core/client/v1/tx.proto", fileDescriptor_3848774a44f81317) }
-
-var fileDescriptor_3848774a44f81317 = []byte{
- // 607 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcb, 0x6e, 0xd3, 0x40,
- 0x14, 0x8d, 0x1b, 0x88, 0x9a, 0x69, 0xa0, 0x95, 0x09, 0x6d, 0x6a, 0x14, 0x3b, 0x58, 0x80, 0x22,
- 0x41, 0xc6, 0x24, 0x15, 0x12, 0xea, 0x8e, 0x74, 0xc5, 0x22, 0x02, 0x5c, 0xb1, 0x80, 0x4d, 0xe4,
- 0xc7, 0x74, 0x62, 0x11, 0x7b, 0x22, 0x8f, 0x1d, 0x91, 0x3f, 0x60, 0xc9, 0x82, 0x0f, 0xe8, 0x8a,
- 0x0f, 0xe0, 0x2b, 0x58, 0x76, 0xc1, 0x82, 0x55, 0x54, 0x25, 0x1b, 0xd6, 0xf9, 0x02, 0x94, 0x19,
- 0x27, 0xc4, 0xc6, 0xb1, 0xc2, 0xa3, 0x3b, 0xdf, 0xb9, 0x67, 0xce, 0xb9, 0xc7, 0xf7, 0xce, 0x0c,
- 0xa8, 0x3a, 0xa6, 0x85, 0x89, 0x66, 0x11, 0x1f, 0x69, 0x56, 0xdf, 0x41, 0x5e, 0xa0, 0x0d, 0x9b,
- 0x5a, 0xf0, 0x1e, 0x0e, 0x7c, 0x12, 0x10, 0xb1, 0xcc, 0xd2, 0x70, 0x9e, 0x86, 0x3c, 0x0d, 0x87,
- 0x4d, 0xa9, 0x8c, 0x09, 0x26, 0x0c, 0xa0, 0xcd, 0xbf, 0x38, 0x56, 0x3a, 0xc4, 0x84, 0xe0, 0x3e,
- 0xd2, 0x58, 0x64, 0x86, 0x67, 0x9a, 0xe1, 0x8d, 0xa2, 0xd4, 0xdd, 0x54, 0x95, 0x88, 0x90, 0x41,
- 0xd4, 0x4b, 0x01, 0xec, 0x76, 0x28, 0x3e, 0xf1, 0x91, 0x11, 0xa0, 0x13, 0x96, 0x11, 0x5f, 0x82,
- 0x12, 0xc7, 0x74, 0x69, 0x60, 0x04, 0xa8, 0x22, 0xd4, 0x84, 0xfa, 0x4e, 0xab, 0x0c, 0xb9, 0x10,
- 0x5c, 0x08, 0xc1, 0x67, 0xde, 0xa8, 0x7d, 0x30, 0x1b, 0x2b, 0xb7, 0x46, 0x86, 0xdb, 0x3f, 0x56,
- 0x57, 0xf7, 0xa8, 0xfa, 0x0e, 0x0f, 0x4f, 0xe7, 0x91, 0xf8, 0x06, 0xec, 0x5a, 0xc4, 0xa3, 0xc8,
- 0xa3, 0x21, 0x8d, 0x48, 0xb7, 0x32, 0x48, 0xa5, 0xd9, 0x58, 0xd9, 0x8f, 0x48, 0xe3, 0xdb, 0x54,
- 0xfd, 0xe6, 0x72, 0x85, 0x53, 0xef, 0x83, 0x02, 0x75, 0xb0, 0x87, 0xfc, 0x4a, 0xbe, 0x26, 0xd4,
- 0x8b, 0x7a, 0x14, 0x1d, 0x6f, 0x7f, 0x38, 0x57, 0x72, 0x3f, 0xce, 0x95, 0x9c, 0x7a, 0x08, 0x0e,
- 0x12, 0x0e, 0x75, 0x44, 0x07, 0x73, 0x16, 0xf5, 0x13, 0x77, 0xff, 0x7a, 0x60, 0xff, 0x72, 0xdf,
- 0x04, 0xc5, 0xc8, 0x89, 0x63, 0x33, 0xeb, 0xc5, 0x76, 0x79, 0x36, 0x56, 0xf6, 0x62, 0x26, 0x1d,
- 0x5b, 0xd5, 0xb7, 0xf9, 0xf7, 0x73, 0x5b, 0x7c, 0x04, 0x0a, 0x3d, 0x64, 0xd8, 0xc8, 0xcf, 0x72,
- 0xa5, 0x47, 0x98, 0x8d, 0x2b, 0x5e, 0xad, 0x6a, 0x59, 0xf1, 0xb7, 0x3c, 0xd8, 0x63, 0x39, 0xec,
- 0x1b, 0xf6, 0x3f, 0x94, 0x9c, 0xec, 0xf1, 0xd6, 0x55, 0xf4, 0x38, 0xff, 0x9f, 0x7a, 0xfc, 0x0a,
- 0x94, 0x07, 0x3e, 0x21, 0x67, 0xdd, 0x90, 0xdb, 0xee, 0x72, 0xdd, 0xca, 0xb5, 0x9a, 0x50, 0x2f,
- 0xb5, 0x95, 0xd9, 0x58, 0xb9, 0xc3, 0x99, 0xd2, 0x50, 0xaa, 0x2e, 0xb2, 0xe5, 0xf8, 0x2f, 0x7b,
- 0x07, 0xaa, 0x09, 0x70, 0xa2, 0xf6, 0xeb, 0x8c, 0xbb, 0x3e, 0x1b, 0x2b, 0xf7, 0x52, 0xb9, 0x93,
- 0x35, 0x4b, 0x31, 0x91, 0x75, 0x33, 0x5a, 0x58, 0xd3, 0x71, 0x09, 0x54, 0x92, 0x5d, 0x5d, 0xb6,
- 0xfc, 0xb3, 0x00, 0x6e, 0x77, 0x28, 0x3e, 0x0d, 0x4d, 0xd7, 0x09, 0x3a, 0x0e, 0x35, 0x51, 0xcf,
- 0x18, 0x3a, 0x24, 0xf4, 0xff, 0xa6, 0xef, 0x4f, 0x41, 0xc9, 0x5d, 0xa1, 0xc8, 0x1c, 0xd8, 0x18,
- 0x72, 0x83, 0xb1, 0x55, 0x40, 0x35, 0xb5, 0xce, 0x85, 0x93, 0xd6, 0x97, 0x3c, 0xc8, 0x77, 0x28,
- 0x16, 0x6d, 0x50, 0x8a, 0x5d, 0x38, 0xf7, 0x61, 0xda, 0x7d, 0x07, 0x13, 0xa7, 0x56, 0x6a, 0x6c,
- 0x04, 0x5b, 0xa8, 0xcd, 0x55, 0x62, 0x07, 0x7b, 0xbd, 0xca, 0x2a, 0x2c, 0x43, 0x25, 0xed, 0x40,
- 0x8a, 0x18, 0xdc, 0x88, 0x4f, 0xd6, 0x83, 0x8c, 0xfd, 0x2b, 0x38, 0x09, 0x6e, 0x86, 0x5b, 0x0a,
- 0x0d, 0x81, 0x98, 0x32, 0x02, 0x0f, 0xd7, 0xb2, 0xfc, 0x0e, 0x96, 0x8e, 0xfe, 0x00, 0xbc, 0xd0,
- 0x6d, 0xbf, 0xf8, 0x3a, 0x91, 0x85, 0x8b, 0x89, 0x2c, 0x5c, 0x4e, 0x64, 0xe1, 0xe3, 0x54, 0xce,
- 0x5d, 0x4c, 0xe5, 0xdc, 0xf7, 0xa9, 0x9c, 0x7b, 0xfb, 0x04, 0x3b, 0x41, 0x2f, 0x34, 0xa1, 0x45,
- 0x5c, 0xcd, 0x22, 0xd4, 0x25, 0x54, 0x73, 0x4c, 0xab, 0x81, 0x89, 0xe6, 0x12, 0x3b, 0xec, 0x23,
- 0xca, 0x5f, 0x9e, 0xc7, 0xad, 0x46, 0xf4, 0xf8, 0x04, 0xa3, 0x01, 0xa2, 0x66, 0x81, 0x0d, 0xd9,
- 0xd1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0xb7, 0x53, 0x8d, 0x04, 0x07, 0x00, 0x00,
+ proto.RegisterType((*MsgCreateClient)(nil), "ibc.core.client.v1.MsgCreateClient")
+ proto.RegisterType((*MsgCreateClientResponse)(nil), "ibc.core.client.v1.MsgCreateClientResponse")
+ proto.RegisterType((*MsgUpdateClient)(nil), "ibc.core.client.v1.MsgUpdateClient")
+ proto.RegisterType((*MsgUpdateClientResponse)(nil), "ibc.core.client.v1.MsgUpdateClientResponse")
+ proto.RegisterType((*MsgUpgradeClient)(nil), "ibc.core.client.v1.MsgUpgradeClient")
+ proto.RegisterType((*MsgUpgradeClientResponse)(nil), "ibc.core.client.v1.MsgUpgradeClientResponse")
+ proto.RegisterType((*MsgSubmitMisbehaviour)(nil), "ibc.core.client.v1.MsgSubmitMisbehaviour")
+ proto.RegisterType((*MsgSubmitMisbehaviourResponse)(nil), "ibc.core.client.v1.MsgSubmitMisbehaviourResponse")
+}
+
+func init() { proto.RegisterFile("ibc/core/client/v1/tx.proto", fileDescriptor_cb5dc4651eb49a04) }
+
+var fileDescriptor_cb5dc4651eb49a04 = []byte{
+ // 606 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x3f, 0x6f, 0xd3, 0x4e,
+ 0x18, 0x8e, 0x9b, 0xdf, 0x2f, 0x6a, 0xae, 0x81, 0x56, 0x26, 0xb4, 0xa9, 0xab, 0xda, 0x91, 0xe9,
+ 0x10, 0x44, 0xeb, 0x23, 0x41, 0x48, 0xa8, 0x1b, 0xe9, 0xc4, 0x10, 0x01, 0xae, 0x18, 0x60, 0x09,
+ 0xfe, 0x73, 0xbd, 0x9c, 0x88, 0x7d, 0x91, 0xcf, 0x8e, 0xc8, 0x37, 0x60, 0x64, 0xe0, 0x03, 0x54,
+ 0x0c, 0x7c, 0x16, 0xc6, 0x0e, 0x0c, 0x4c, 0x51, 0x95, 0x2c, 0xcc, 0xf9, 0x04, 0x28, 0x3e, 0x27,
+ 0xc4, 0xae, 0x1d, 0x59, 0xfc, 0xd9, 0x7c, 0x7e, 0x9f, 0x7b, 0x9e, 0xf7, 0xf1, 0xf3, 0x9e, 0x0f,
+ 0x1c, 0x10, 0xd3, 0x82, 0x16, 0xf5, 0x10, 0xb4, 0xfa, 0x04, 0xb9, 0x3e, 0x1c, 0x36, 0xa1, 0xff,
+ 0x5e, 0x1b, 0x78, 0xd4, 0xa7, 0xa2, 0x48, 0x4c, 0x4b, 0x9b, 0x17, 0x35, 0x5e, 0xd4, 0x86, 0x4d,
+ 0xa9, 0x8a, 0x29, 0xa6, 0x61, 0x19, 0xce, 0x9f, 0x38, 0x52, 0xda, 0xc7, 0x94, 0xe2, 0x3e, 0x82,
+ 0xe1, 0xca, 0x0c, 0x2e, 0xa0, 0xe1, 0x8e, 0xa2, 0x92, 0x92, 0xa2, 0x10, 0xd1, 0x85, 0x00, 0xf5,
+ 0x5a, 0x00, 0xdb, 0x1d, 0x86, 0xcf, 0x3c, 0x64, 0xf8, 0xe8, 0x2c, 0xac, 0x88, 0x2f, 0x40, 0x85,
+ 0x63, 0xba, 0xcc, 0x37, 0x7c, 0x54, 0x13, 0xea, 0x42, 0x63, 0xab, 0x55, 0xd5, 0xb8, 0x8c, 0xb6,
+ 0x90, 0xd1, 0x9e, 0xba, 0xa3, 0xf6, 0xde, 0x6c, 0xac, 0xdc, 0x19, 0x19, 0x4e, 0xff, 0x54, 0x5d,
+ 0xdd, 0xa3, 0xea, 0x5b, 0x7c, 0x79, 0x3e, 0x5f, 0x89, 0xaf, 0xc1, 0xb6, 0x45, 0x5d, 0x86, 0x5c,
+ 0x16, 0xb0, 0x88, 0x74, 0x63, 0x0d, 0xa9, 0x34, 0x1b, 0x2b, 0xbb, 0x11, 0x69, 0x7c, 0x9b, 0xaa,
+ 0xdf, 0x5e, 0xbe, 0xe1, 0xd4, 0xbb, 0xa0, 0xc4, 0x08, 0x76, 0x91, 0x57, 0x2b, 0xd6, 0x85, 0x46,
+ 0x59, 0x8f, 0x56, 0xa7, 0x9b, 0x1f, 0x2e, 0x95, 0xc2, 0x8f, 0x4b, 0xa5, 0xa0, 0xee, 0x83, 0xbd,
+ 0x84, 0x43, 0x1d, 0xb1, 0xc1, 0x9c, 0x45, 0xfd, 0xc4, 0xdd, 0xbf, 0x1a, 0xd8, 0xbf, 0xdc, 0x37,
+ 0x41, 0x39, 0x72, 0x42, 0xec, 0xd0, 0x7a, 0xb9, 0x5d, 0x9d, 0x8d, 0x95, 0x9d, 0x98, 0x49, 0x62,
+ 0xab, 0xfa, 0x26, 0x7f, 0x7e, 0x66, 0x8b, 0xc7, 0xa0, 0xd4, 0x43, 0x86, 0x8d, 0xbc, 0x75, 0xae,
+ 0xf4, 0x08, 0x93, 0xbb, 0xe3, 0xd5, 0xae, 0x96, 0x1d, 0x7f, 0x2b, 0x82, 0x9d, 0xb0, 0x86, 0x3d,
+ 0xc3, 0xfe, 0x83, 0x96, 0x93, 0x19, 0x6f, 0xfc, 0x8b, 0x8c, 0x8b, 0x7f, 0x29, 0xe3, 0x97, 0xa0,
+ 0x3a, 0xf0, 0x28, 0xbd, 0xe8, 0x06, 0xdc, 0x76, 0x97, 0xeb, 0xd6, 0xfe, 0xab, 0x0b, 0x8d, 0x4a,
+ 0x5b, 0x99, 0x8d, 0x95, 0x03, 0xce, 0x94, 0x86, 0x52, 0x75, 0x31, 0x7c, 0x1d, 0xff, 0x64, 0xef,
+ 0xc0, 0x61, 0x02, 0x9c, 0xe8, 0xfd, 0xff, 0x90, 0xbb, 0x31, 0x1b, 0x2b, 0x47, 0xa9, 0xdc, 0xc9,
+ 0x9e, 0xa5, 0x98, 0x48, 0xd6, 0x8c, 0x96, 0x32, 0x12, 0x97, 0x40, 0x2d, 0x99, 0xea, 0x32, 0xf2,
+ 0x2f, 0x02, 0xb8, 0xdb, 0x61, 0xf8, 0x3c, 0x30, 0x1d, 0xe2, 0x77, 0x08, 0x33, 0x51, 0xcf, 0x18,
+ 0x12, 0x1a, 0x78, 0xbf, 0x93, 0xfb, 0x13, 0x50, 0x71, 0x56, 0x28, 0xd6, 0x0e, 0x6c, 0x0c, 0x99,
+ 0x63, 0x6c, 0x15, 0x70, 0x98, 0xda, 0xe7, 0xc2, 0x49, 0xeb, 0x73, 0x11, 0x14, 0x3b, 0x0c, 0x8b,
+ 0x6f, 0x41, 0x25, 0xf6, 0xc3, 0xb9, 0xa7, 0xdd, 0xfc, 0xd7, 0x69, 0x89, 0x33, 0x2b, 0x3d, 0xc8,
+ 0x01, 0x5a, 0x28, 0xcd, 0x15, 0x62, 0x87, 0x3a, 0x4b, 0x61, 0x15, 0x94, 0xa9, 0x90, 0x76, 0x10,
+ 0x45, 0x0b, 0xdc, 0x8a, 0x4f, 0xd4, 0x51, 0xe6, 0xee, 0x15, 0x94, 0x74, 0x9c, 0x07, 0xb5, 0x14,
+ 0xf1, 0x80, 0x98, 0x12, 0xfb, 0xfd, 0x0c, 0x8e, 0x9b, 0x50, 0xa9, 0x99, 0x1b, 0xba, 0xd0, 0x6c,
+ 0x3f, 0xff, 0x3a, 0x91, 0x85, 0xab, 0x89, 0x2c, 0x5c, 0x4f, 0x64, 0xe1, 0xe3, 0x54, 0x2e, 0x5c,
+ 0x4d, 0xe5, 0xc2, 0xf7, 0xa9, 0x5c, 0x78, 0xf3, 0x18, 0x13, 0xbf, 0x17, 0x98, 0x9a, 0x45, 0x1d,
+ 0x68, 0x51, 0xe6, 0x50, 0x06, 0x89, 0x69, 0x9d, 0x60, 0x0a, 0x1d, 0x6a, 0x07, 0x7d, 0xc4, 0xf8,
+ 0x4d, 0xf3, 0xb0, 0x75, 0x12, 0x5d, 0x36, 0xfe, 0x68, 0x80, 0x98, 0x59, 0x0a, 0x87, 0xea, 0xd1,
+ 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xf3, 0x53, 0xbe, 0xee, 0x06, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -448,7 +448,7 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient {
func (c *msgClient) CreateClient(ctx context.Context, in *MsgCreateClient, opts ...grpc.CallOption) (*MsgCreateClientResponse, error) {
out := new(MsgCreateClientResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/CreateClient", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Msg/CreateClient", in, out, opts...)
if err != nil {
return nil, err
}
@@ -457,7 +457,7 @@ func (c *msgClient) CreateClient(ctx context.Context, in *MsgCreateClient, opts
func (c *msgClient) UpdateClient(ctx context.Context, in *MsgUpdateClient, opts ...grpc.CallOption) (*MsgUpdateClientResponse, error) {
out := new(MsgUpdateClientResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/UpdateClient", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Msg/UpdateClient", in, out, opts...)
if err != nil {
return nil, err
}
@@ -466,7 +466,7 @@ func (c *msgClient) UpdateClient(ctx context.Context, in *MsgUpdateClient, opts
func (c *msgClient) UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opts ...grpc.CallOption) (*MsgUpgradeClientResponse, error) {
out := new(MsgUpgradeClientResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/UpgradeClient", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Msg/UpgradeClient", in, out, opts...)
if err != nil {
return nil, err
}
@@ -475,7 +475,7 @@ func (c *msgClient) UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opt
func (c *msgClient) SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error) {
out := new(MsgSubmitMisbehaviourResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/SubmitMisbehaviour", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Msg/SubmitMisbehaviour", in, out, opts...)
if err != nil {
return nil, err
}
@@ -525,7 +525,7 @@ func _Msg_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(in
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Msg/CreateClient",
+ FullMethod: "/ibc.core.client.v1.Msg/CreateClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).CreateClient(ctx, req.(*MsgCreateClient))
@@ -543,7 +543,7 @@ func _Msg_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(in
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Msg/UpdateClient",
+ FullMethod: "/ibc.core.client.v1.Msg/UpdateClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).UpdateClient(ctx, req.(*MsgUpdateClient))
@@ -561,7 +561,7 @@ func _Msg_UpgradeClient_Handler(srv interface{}, ctx context.Context, dec func(i
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Msg/UpgradeClient",
+ FullMethod: "/ibc.core.client.v1.Msg/UpgradeClient",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).UpgradeClient(ctx, req.(*MsgUpgradeClient))
@@ -579,7 +579,7 @@ func _Msg_SubmitMisbehaviour_Handler(srv interface{}, ctx context.Context, dec f
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.client.v1.Msg/SubmitMisbehaviour",
+ FullMethod: "/ibc.core.client.v1.Msg/SubmitMisbehaviour",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).SubmitMisbehaviour(ctx, req.(*MsgSubmitMisbehaviour))
@@ -588,7 +588,7 @@ func _Msg_SubmitMisbehaviour_Handler(srv interface{}, ctx context.Context, dec f
}
var _Msg_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.core.client.v1.Msg",
+ ServiceName: "ibc.core.client.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -609,7 +609,7 @@ var _Msg_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/core/client/v1/tx.proto",
+ Metadata: "ibc/core/client/v1/tx.proto",
}
func (m *MsgCreateClient) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/03-connection/types/codec.go b/modules/core/03-connection/types/codec.go
index 9a93e96a..cf98be87 100644
--- a/modules/core/03-connection/types/codec.go
+++ b/modules/core/03-connection/types/codec.go
@@ -12,17 +12,17 @@ import (
// Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibcgo.core.connection.v1.ConnectionI",
+ "ibc.core.connection.v1.ConnectionI",
(*exported.ConnectionI)(nil),
&ConnectionEnd{},
)
registry.RegisterInterface(
- "ibcgo.core.connection.v1.CounterpartyConnectionI",
+ "ibc.core.connection.v1.CounterpartyConnectionI",
(*exported.CounterpartyConnectionI)(nil),
&Counterparty{},
)
registry.RegisterInterface(
- "ibcgo.core.connection.v1.Version",
+ "ibc.core.connection.v1.Version",
(*exported.Version)(nil),
&Version{},
)
diff --git a/modules/core/03-connection/types/connection.pb.go b/modules/core/03-connection/types/connection.pb.go
index c5586069..16389778 100644
--- a/modules/core/03-connection/types/connection.pb.go
+++ b/modules/core/03-connection/types/connection.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/connection/v1/connection.proto
+// source: ibc/core/connection/v1/connection.proto
package types
@@ -59,7 +59,7 @@ func (x State) String() string {
}
func (State) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{0}
+ return fileDescriptor_90572467c054e43a, []int{0}
}
// ConnectionEnd defines a stateful object on a chain connected to another
@@ -73,7 +73,7 @@ type ConnectionEnd struct {
// channels or packets utilising this connection.
Versions []*Version `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty"`
// current state of the connection end.
- State State `protobuf:"varint,3,opt,name=state,proto3,enum=ibcgo.core.connection.v1.State" json:"state,omitempty"`
+ State State `protobuf:"varint,3,opt,name=state,proto3,enum=ibc.core.connection.v1.State" json:"state,omitempty"`
// counterparty chain associated with this connection.
Counterparty Counterparty `protobuf:"bytes,4,opt,name=counterparty,proto3" json:"counterparty"`
// delay period that must pass before a consensus state can be used for
@@ -86,7 +86,7 @@ func (m *ConnectionEnd) Reset() { *m = ConnectionEnd{} }
func (m *ConnectionEnd) String() string { return proto.CompactTextString(m) }
func (*ConnectionEnd) ProtoMessage() {}
func (*ConnectionEnd) Descriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{0}
+ return fileDescriptor_90572467c054e43a, []int{0}
}
func (m *ConnectionEnd) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -126,7 +126,7 @@ type IdentifiedConnection struct {
// channels or packets utilising this connection
Versions []*Version `protobuf:"bytes,3,rep,name=versions,proto3" json:"versions,omitempty"`
// current state of the connection end.
- State State `protobuf:"varint,4,opt,name=state,proto3,enum=ibcgo.core.connection.v1.State" json:"state,omitempty"`
+ State State `protobuf:"varint,4,opt,name=state,proto3,enum=ibc.core.connection.v1.State" json:"state,omitempty"`
// counterparty chain associated with this connection.
Counterparty Counterparty `protobuf:"bytes,5,opt,name=counterparty,proto3" json:"counterparty"`
// delay period associated with this connection.
@@ -137,7 +137,7 @@ func (m *IdentifiedConnection) Reset() { *m = IdentifiedConnection{} }
func (m *IdentifiedConnection) String() string { return proto.CompactTextString(m) }
func (*IdentifiedConnection) ProtoMessage() {}
func (*IdentifiedConnection) Descriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{1}
+ return fileDescriptor_90572467c054e43a, []int{1}
}
func (m *IdentifiedConnection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -182,7 +182,7 @@ func (m *Counterparty) Reset() { *m = Counterparty{} }
func (m *Counterparty) String() string { return proto.CompactTextString(m) }
func (*Counterparty) ProtoMessage() {}
func (*Counterparty) Descriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{2}
+ return fileDescriptor_90572467c054e43a, []int{2}
}
func (m *Counterparty) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -221,7 +221,7 @@ func (m *ClientPaths) Reset() { *m = ClientPaths{} }
func (m *ClientPaths) String() string { return proto.CompactTextString(m) }
func (*ClientPaths) ProtoMessage() {}
func (*ClientPaths) Descriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{3}
+ return fileDescriptor_90572467c054e43a, []int{3}
}
func (m *ClientPaths) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -269,7 +269,7 @@ func (m *ConnectionPaths) Reset() { *m = ConnectionPaths{} }
func (m *ConnectionPaths) String() string { return proto.CompactTextString(m) }
func (*ConnectionPaths) ProtoMessage() {}
func (*ConnectionPaths) Descriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{4}
+ return fileDescriptor_90572467c054e43a, []int{4}
}
func (m *ConnectionPaths) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -325,7 +325,7 @@ func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (*Version) Descriptor() ([]byte, []int) {
- return fileDescriptor_278e9c8044b4f86b, []int{5}
+ return fileDescriptor_90572467c054e43a, []int{5}
}
func (m *Version) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -355,62 +355,63 @@ func (m *Version) XXX_DiscardUnknown() {
var xxx_messageInfo_Version proto.InternalMessageInfo
func init() {
- proto.RegisterEnum("ibcgo.core.connection.v1.State", State_name, State_value)
- proto.RegisterType((*ConnectionEnd)(nil), "ibcgo.core.connection.v1.ConnectionEnd")
- proto.RegisterType((*IdentifiedConnection)(nil), "ibcgo.core.connection.v1.IdentifiedConnection")
- proto.RegisterType((*Counterparty)(nil), "ibcgo.core.connection.v1.Counterparty")
- proto.RegisterType((*ClientPaths)(nil), "ibcgo.core.connection.v1.ClientPaths")
- proto.RegisterType((*ConnectionPaths)(nil), "ibcgo.core.connection.v1.ConnectionPaths")
- proto.RegisterType((*Version)(nil), "ibcgo.core.connection.v1.Version")
+ proto.RegisterEnum("ibc.core.connection.v1.State", State_name, State_value)
+ proto.RegisterType((*ConnectionEnd)(nil), "ibc.core.connection.v1.ConnectionEnd")
+ proto.RegisterType((*IdentifiedConnection)(nil), "ibc.core.connection.v1.IdentifiedConnection")
+ proto.RegisterType((*Counterparty)(nil), "ibc.core.connection.v1.Counterparty")
+ proto.RegisterType((*ClientPaths)(nil), "ibc.core.connection.v1.ClientPaths")
+ proto.RegisterType((*ConnectionPaths)(nil), "ibc.core.connection.v1.ConnectionPaths")
+ proto.RegisterType((*Version)(nil), "ibc.core.connection.v1.Version")
}
func init() {
- proto.RegisterFile("ibcgo/core/connection/v1/connection.proto", fileDescriptor_278e9c8044b4f86b)
-}
-
-var fileDescriptor_278e9c8044b4f86b = []byte{
- // 656 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xc1, 0x6e, 0xda, 0x4a,
- 0x14, 0xb5, 0x8d, 0x49, 0x60, 0x08, 0xef, 0xd1, 0x29, 0x52, 0x2d, 0x4b, 0xb5, 0x5d, 0x57, 0xaa,
- 0x68, 0xa5, 0xe0, 0x92, 0xa8, 0x95, 0x1a, 0xa9, 0x8b, 0x40, 0xa8, 0x64, 0xb5, 0xa5, 0xc8, 0x90,
- 0x4a, 0xcd, 0x06, 0x81, 0x3d, 0x21, 0xa3, 0x82, 0x07, 0xd9, 0x03, 0x2a, 0x7f, 0x10, 0x65, 0xd5,
- 0x6d, 0x17, 0x91, 0x2a, 0xf5, 0x67, 0xb2, 0xc8, 0x22, 0xcb, 0xae, 0x50, 0x95, 0xfc, 0x01, 0x5f,
- 0x50, 0xd9, 0x63, 0x8c, 0xd3, 0x8a, 0x45, 0x92, 0xee, 0xee, 0xf1, 0x3d, 0xe7, 0x30, 0xf7, 0xcc,
- 0x65, 0xc0, 0x53, 0xdc, 0xb3, 0xfb, 0xc4, 0xb0, 0x89, 0x87, 0x0c, 0x9b, 0xb8, 0x2e, 0xb2, 0x29,
- 0x26, 0xae, 0x31, 0xa9, 0x24, 0x50, 0x79, 0xe4, 0x11, 0x4a, 0xa0, 0x14, 0x52, 0xcb, 0x01, 0xb5,
- 0x9c, 0x68, 0x4e, 0x2a, 0x72, 0xb1, 0x4f, 0xfa, 0x24, 0x24, 0x19, 0x41, 0xc5, 0xf8, 0xf2, 0x75,
- 0xeb, 0xe1, 0x10, 0xd3, 0x21, 0x72, 0x29, 0xb3, 0x5e, 0x20, 0x46, 0xd5, 0xcf, 0x05, 0x90, 0xaf,
- 0xc5, 0x96, 0x75, 0xd7, 0x81, 0x15, 0x90, 0xb5, 0x07, 0x18, 0xb9, 0xb4, 0x83, 0x1d, 0x89, 0xd7,
- 0xf8, 0x52, 0xb6, 0x5a, 0x9c, 0xcf, 0xd4, 0xc2, 0xb4, 0x3b, 0x1c, 0xec, 0xe8, 0x71, 0x4b, 0xb7,
- 0x32, 0xac, 0x36, 0x1d, 0xf8, 0x1a, 0x64, 0x26, 0xc8, 0xf3, 0x31, 0x71, 0x7d, 0x49, 0xd0, 0x52,
- 0xa5, 0xdc, 0xd6, 0xa3, 0xf2, 0xaa, 0x23, 0x97, 0x3f, 0x32, 0xa6, 0x15, 0x4b, 0xe0, 0x0b, 0x90,
- 0xf6, 0x69, 0x97, 0x22, 0x29, 0xa5, 0xf1, 0xa5, 0xff, 0xb6, 0xd4, 0xd5, 0xda, 0x56, 0x40, 0xb3,
- 0x18, 0x1b, 0x36, 0xc1, 0x86, 0x4d, 0xc6, 0x2e, 0x45, 0xde, 0xa8, 0xeb, 0xd1, 0xa9, 0x24, 0x6a,
- 0x7c, 0x29, 0xb7, 0xf5, 0x64, 0xb5, 0xba, 0x96, 0x60, 0x57, 0xc5, 0xb3, 0x99, 0xca, 0x59, 0xd7,
- 0x1c, 0xe0, 0x0e, 0xd8, 0x70, 0xd0, 0xa0, 0x3b, 0xed, 0x8c, 0x90, 0x87, 0x89, 0x23, 0xa5, 0x35,
- 0xbe, 0x24, 0x56, 0x1f, 0xcc, 0x67, 0xea, 0x7d, 0x36, 0x7d, 0xb2, 0xab, 0x5b, 0xb9, 0x10, 0x36,
- 0x43, 0xb4, 0x23, 0x1e, 0x7f, 0x57, 0x39, 0x7d, 0x2e, 0x80, 0xa2, 0xe9, 0x20, 0x97, 0xe2, 0x43,
- 0x8c, 0x9c, 0x65, 0xb0, 0xf0, 0x21, 0x10, 0xe2, 0x38, 0xf3, 0xf3, 0x99, 0x9a, 0x65, 0x86, 0x41,
- 0x8e, 0x02, 0xfe, 0x23, 0x74, 0xe1, 0xc6, 0xa1, 0xa7, 0xee, 0x10, 0xba, 0x78, 0xa7, 0xd0, 0xd3,
- 0xff, 0x3c, 0xf4, 0xb5, 0x1b, 0x87, 0x7e, 0xce, 0x83, 0x8d, 0xe4, 0xcf, 0xdc, 0x6e, 0x85, 0xf3,
- 0xcb, 0x73, 0x2f, 0x2f, 0x41, 0x9a, 0xcf, 0xd4, 0x62, 0x24, 0x4b, 0xb6, 0xf5, 0x60, 0x88, 0x05,
- 0x36, 0x1d, 0xb8, 0x07, 0xd6, 0x46, 0x1e, 0x3a, 0xc4, 0x5f, 0xc2, 0x1d, 0xfe, 0x2b, 0x90, 0xf8,
- 0x4f, 0x37, 0xa9, 0x94, 0xdf, 0x23, 0xef, 0xf3, 0x00, 0x35, 0x43, 0x76, 0x14, 0x48, 0xa4, 0x8d,
- 0xc6, 0x79, 0x0c, 0x72, 0xb5, 0xf0, 0x58, 0xcd, 0x2e, 0x3d, 0xf2, 0x61, 0x11, 0xa4, 0x47, 0x41,
- 0x21, 0xf1, 0x5a, 0xaa, 0x94, 0xb5, 0x18, 0xd0, 0x0f, 0xc0, 0xff, 0xcb, 0xed, 0x62, 0xc4, 0x5b,
- 0x4c, 0x1d, 0x7b, 0x0b, 0x49, 0xef, 0xb7, 0x60, 0x3d, 0xda, 0x17, 0xa8, 0x00, 0x80, 0x17, 0xeb,
- 0xec, 0x31, 0x53, 0x2b, 0xf1, 0x05, 0xca, 0x20, 0x73, 0x88, 0xba, 0x74, 0xec, 0xa1, 0x85, 0x47,
- 0x8c, 0xd9, 0x34, 0xcf, 0xbe, 0xf1, 0x20, 0x1d, 0x6e, 0x10, 0x7c, 0x09, 0xd4, 0x56, 0x7b, 0xb7,
- 0x5d, 0xef, 0xec, 0x37, 0xcc, 0x86, 0xd9, 0x36, 0x77, 0xdf, 0x99, 0x07, 0xf5, 0xbd, 0xce, 0x7e,
- 0xa3, 0xd5, 0xac, 0xd7, 0xcc, 0x37, 0x66, 0x7d, 0xaf, 0xc0, 0xc9, 0xf7, 0x4e, 0x4e, 0xb5, 0xfc,
- 0x35, 0x02, 0x94, 0x00, 0x60, 0xba, 0xe0, 0x63, 0x81, 0x97, 0x33, 0x27, 0xa7, 0x9a, 0x18, 0xd4,
- 0x50, 0x01, 0x79, 0xd6, 0x69, 0x5b, 0x9f, 0x3e, 0x34, 0xeb, 0x8d, 0x82, 0x20, 0xe7, 0x4e, 0x4e,
- 0xb5, 0xf5, 0x08, 0x2e, 0x95, 0x61, 0x33, 0xc5, 0x94, 0x41, 0x2d, 0x8b, 0xc7, 0x3f, 0x14, 0xae,
- 0xda, 0x3a, 0xbb, 0x54, 0xf8, 0x8b, 0x4b, 0x85, 0xff, 0x75, 0xa9, 0xf0, 0x5f, 0xaf, 0x14, 0xee,
- 0xe2, 0x4a, 0xe1, 0x7e, 0x5e, 0x29, 0xdc, 0xc1, 0xab, 0x3e, 0xa6, 0x47, 0xe3, 0x5e, 0x70, 0x75,
- 0x86, 0x4d, 0xfc, 0x21, 0xf1, 0x0d, 0xdc, 0xb3, 0x37, 0xfb, 0xc4, 0x18, 0x12, 0x67, 0x3c, 0x40,
- 0x3e, 0x7b, 0x5c, 0x9f, 0x6f, 0x6f, 0x26, 0x9e, 0x6e, 0x3a, 0x1d, 0x21, 0xbf, 0xb7, 0x16, 0x3e,
- 0xac, 0xdb, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x84, 0x44, 0x1f, 0x0f, 0xe0, 0x05, 0x00, 0x00,
+ proto.RegisterFile("ibc/core/connection/v1/connection.proto", fileDescriptor_90572467c054e43a)
+}
+
+var fileDescriptor_90572467c054e43a = []byte{
+ // 658 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x41, 0x6b, 0xdb, 0x4c,
+ 0x14, 0x94, 0x64, 0x39, 0xb1, 0xd7, 0xf1, 0xf7, 0xb9, 0x5b, 0xd3, 0x0a, 0x41, 0x24, 0xa1, 0x16,
+ 0x6a, 0x0a, 0xb1, 0xea, 0x04, 0x0a, 0x4d, 0xe9, 0x21, 0x76, 0x5c, 0x10, 0x6d, 0x5d, 0x23, 0x3b,
+ 0x85, 0xe6, 0x62, 0x6c, 0x69, 0xe3, 0x2c, 0xb5, 0xb5, 0x46, 0x5a, 0x9b, 0xfa, 0x1f, 0x84, 0x9c,
+ 0x7a, 0xed, 0x21, 0x50, 0xe8, 0x7f, 0x29, 0xa1, 0xa7, 0x1c, 0x7b, 0x32, 0x25, 0xb9, 0xf6, 0xe4,
+ 0x5f, 0x50, 0xa4, 0x95, 0x65, 0x25, 0x34, 0x87, 0xa4, 0xbd, 0xbd, 0xd9, 0x37, 0x33, 0xde, 0x37,
+ 0x7e, 0x5a, 0xf0, 0x08, 0xf7, 0x6c, 0xc3, 0x26, 0x1e, 0x32, 0x6c, 0xe2, 0xba, 0xc8, 0xa6, 0x98,
+ 0xb8, 0xc6, 0xa4, 0x92, 0x40, 0xe5, 0x91, 0x47, 0x28, 0x81, 0xf7, 0x70, 0xcf, 0x2e, 0x07, 0xc4,
+ 0x72, 0xa2, 0x35, 0xa9, 0xc8, 0xc5, 0x3e, 0xe9, 0x93, 0x90, 0x62, 0x04, 0x15, 0x63, 0xcb, 0x49,
+ 0xdb, 0xe1, 0x10, 0xd3, 0x21, 0x72, 0x29, 0xb3, 0x5d, 0x20, 0x46, 0xd4, 0xbf, 0x09, 0x20, 0x5f,
+ 0x8b, 0x0d, 0xeb, 0xae, 0x03, 0x2b, 0x20, 0x6b, 0x0f, 0x30, 0x72, 0x69, 0x07, 0x3b, 0x12, 0xaf,
+ 0xf1, 0xa5, 0x6c, 0xb5, 0x38, 0x9f, 0xa9, 0x85, 0x69, 0x77, 0x38, 0xd8, 0xd6, 0xe3, 0x96, 0x6e,
+ 0x65, 0x58, 0x6d, 0x3a, 0xf0, 0x39, 0xc8, 0x4c, 0x90, 0xe7, 0x63, 0xe2, 0xfa, 0x92, 0xa0, 0xa5,
+ 0x4a, 0xb9, 0x4d, 0xb5, 0xfc, 0xe7, 0xeb, 0x96, 0xdf, 0x31, 0x9e, 0x15, 0x0b, 0xe0, 0x16, 0x48,
+ 0xfb, 0xb4, 0x4b, 0x91, 0x94, 0xd2, 0xf8, 0xd2, 0x7f, 0x9b, 0xeb, 0xd7, 0x29, 0x5b, 0x01, 0xc9,
+ 0x62, 0x5c, 0xd8, 0x00, 0x6b, 0x36, 0x19, 0xbb, 0x14, 0x79, 0xa3, 0xae, 0x47, 0xa7, 0x92, 0xa8,
+ 0xf1, 0xa5, 0xdc, 0xe6, 0xc3, 0xeb, 0xb4, 0xb5, 0x04, 0xb7, 0x2a, 0x9e, 0xce, 0x54, 0xce, 0xba,
+ 0xa4, 0x87, 0xdb, 0x60, 0xcd, 0x41, 0x83, 0xee, 0xb4, 0x33, 0x42, 0x1e, 0x26, 0x8e, 0x94, 0xd6,
+ 0xf8, 0x92, 0x58, 0xbd, 0x3f, 0x9f, 0xa9, 0x77, 0xd9, 0xdc, 0xc9, 0xae, 0x6e, 0xe5, 0x42, 0xd8,
+ 0x0c, 0xd1, 0xb6, 0x78, 0xf4, 0x45, 0xe5, 0xf4, 0x5f, 0x02, 0x28, 0x9a, 0x0e, 0x72, 0x29, 0x3e,
+ 0xc0, 0xc8, 0x59, 0x46, 0x0a, 0xd7, 0x81, 0x10, 0x07, 0x99, 0x9f, 0xcf, 0xd4, 0x2c, 0x33, 0x0c,
+ 0x12, 0x14, 0xf0, 0x95, 0xb8, 0x85, 0x1b, 0xc7, 0x9d, 0xba, 0x75, 0xdc, 0xe2, 0x5f, 0xc4, 0x9d,
+ 0xfe, 0xc7, 0x71, 0xaf, 0xdc, 0x38, 0xee, 0xef, 0x3c, 0x58, 0x4b, 0xfe, 0xcc, 0x6d, 0xd6, 0xf6,
+ 0x05, 0xc8, 0x2f, 0xef, 0xbd, 0x8c, 0x5f, 0x9a, 0xcf, 0xd4, 0x62, 0x24, 0x4b, 0xb6, 0xf5, 0x60,
+ 0x88, 0x05, 0x36, 0x1d, 0x58, 0x05, 0x2b, 0x23, 0x0f, 0x1d, 0xe0, 0x8f, 0xe1, 0xe6, 0x5e, 0x89,
+ 0x23, 0xfe, 0xcc, 0x26, 0x95, 0xf2, 0x1b, 0xe4, 0x7d, 0x18, 0xa0, 0x66, 0xc8, 0x8d, 0xe2, 0x88,
+ 0x94, 0xd1, 0x30, 0x0f, 0x40, 0xae, 0x16, 0x5e, 0xaa, 0xd9, 0xa5, 0x87, 0x3e, 0x2c, 0x82, 0xf4,
+ 0x28, 0x28, 0x24, 0x5e, 0x4b, 0x95, 0xb2, 0x16, 0x03, 0xfa, 0x3e, 0xf8, 0x7f, 0xb9, 0x55, 0x8c,
+ 0x78, 0x8b, 0x99, 0x63, 0x6f, 0x21, 0xe9, 0xfd, 0x0a, 0xac, 0x46, 0x9b, 0x02, 0x15, 0x00, 0xf0,
+ 0x62, 0x8d, 0x3d, 0x66, 0x6a, 0x25, 0x4e, 0xa0, 0x0c, 0x32, 0x07, 0xa8, 0x4b, 0xc7, 0x1e, 0x5a,
+ 0x78, 0xc4, 0x98, 0x4d, 0xf3, 0xf8, 0x33, 0x0f, 0xd2, 0xe1, 0xf6, 0xc0, 0xa7, 0x40, 0x6d, 0xb5,
+ 0x77, 0xda, 0xf5, 0xce, 0x5e, 0xc3, 0x6c, 0x98, 0x6d, 0x73, 0xe7, 0xb5, 0xb9, 0x5f, 0xdf, 0xed,
+ 0xec, 0x35, 0x5a, 0xcd, 0x7a, 0xcd, 0x7c, 0x69, 0xd6, 0x77, 0x0b, 0x9c, 0x7c, 0xe7, 0xf8, 0x44,
+ 0xcb, 0x5f, 0x22, 0x40, 0x09, 0x00, 0xa6, 0x0b, 0x0e, 0x0b, 0xbc, 0x9c, 0x39, 0x3e, 0xd1, 0xc4,
+ 0xa0, 0x86, 0x0a, 0xc8, 0xb3, 0x4e, 0xdb, 0x7a, 0xff, 0xb6, 0x59, 0x6f, 0x14, 0x04, 0x39, 0x77,
+ 0x7c, 0xa2, 0xad, 0x46, 0x70, 0xa9, 0x0c, 0x9b, 0x29, 0xa6, 0x0c, 0x6a, 0x59, 0x3c, 0xfa, 0xaa,
+ 0x70, 0xd5, 0xd6, 0xe9, 0xb9, 0xc2, 0x9f, 0x9d, 0x2b, 0xfc, 0xcf, 0x73, 0x85, 0xff, 0x74, 0xa1,
+ 0x70, 0x67, 0x17, 0x0a, 0xf7, 0xe3, 0x42, 0xe1, 0xf6, 0x9f, 0xf5, 0x31, 0x3d, 0x1c, 0xf7, 0x82,
+ 0xbf, 0xce, 0xb0, 0x89, 0x3f, 0x24, 0xbe, 0x81, 0x7b, 0xf6, 0x46, 0x9f, 0x18, 0x43, 0xe2, 0x8c,
+ 0x07, 0xc8, 0x67, 0xcf, 0xe9, 0x93, 0xad, 0x8d, 0xc4, 0x43, 0x4d, 0xa7, 0x23, 0xe4, 0xf7, 0x56,
+ 0xc2, 0xa7, 0x74, 0xeb, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x1f, 0x87, 0x8e, 0xcc, 0x05,
+ 0x00, 0x00,
}
func (m *ConnectionEnd) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/03-connection/types/genesis.pb.go b/modules/core/03-connection/types/genesis.pb.go
index 514fea5e..c538d3e3 100644
--- a/modules/core/03-connection/types/genesis.pb.go
+++ b/modules/core/03-connection/types/genesis.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/connection/v1/genesis.proto
+// source: ibc/core/connection/v1/genesis.proto
package types
@@ -35,7 +35,7 @@ func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_1d3565a164ba596e, []int{0}
+ return fileDescriptor_1879d34bc6ac3cd7, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -86,36 +86,36 @@ func (m *GenesisState) GetNextConnectionSequence() uint64 {
}
func init() {
- proto.RegisterType((*GenesisState)(nil), "ibcgo.core.connection.v1.GenesisState")
+ proto.RegisterType((*GenesisState)(nil), "ibc.core.connection.v1.GenesisState")
}
func init() {
- proto.RegisterFile("ibcgo/core/connection/v1/genesis.proto", fileDescriptor_1d3565a164ba596e)
+ proto.RegisterFile("ibc/core/connection/v1/genesis.proto", fileDescriptor_1879d34bc6ac3cd7)
}
-var fileDescriptor_1d3565a164ba596e = []byte{
- // 330 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xcd, 0x6a, 0xf2, 0x40,
- 0x14, 0x86, 0x13, 0x95, 0x6f, 0x11, 0xbf, 0x55, 0xe8, 0x4f, 0x70, 0x31, 0x91, 0x14, 0x44, 0x17,
- 0xce, 0xd4, 0xba, 0x6a, 0x97, 0xe9, 0xa2, 0x74, 0x57, 0x14, 0xba, 0x28, 0x14, 0x31, 0xe3, 0xe9,
- 0x38, 0x60, 0xe6, 0x58, 0x67, 0x94, 0x7a, 0x09, 0xdd, 0xf5, 0xb2, 0x5c, 0xda, 0x5d, 0x57, 0x52,
- 0xf4, 0x0e, 0xbc, 0x82, 0x92, 0xa4, 0x34, 0xb6, 0x90, 0xdd, 0x70, 0xce, 0xf3, 0x3e, 0x67, 0xe0,
- 0x75, 0x1a, 0x32, 0xe2, 0x02, 0x19, 0xc7, 0x19, 0x30, 0x8e, 0x4a, 0x01, 0x37, 0x12, 0x15, 0x5b,
- 0x74, 0x98, 0x00, 0x05, 0x5a, 0x6a, 0x3a, 0x9d, 0xa1, 0x41, 0xd7, 0x4b, 0x39, 0x9a, 0x70, 0x34,
- 0xe7, 0xe8, 0xa2, 0x53, 0x3b, 0x12, 0x28, 0x30, 0x85, 0x58, 0xf2, 0xca, 0xf8, 0x5a, 0xab, 0xd0,
- 0x7b, 0x90, 0x4e, 0xd1, 0xe0, 0xbd, 0xe4, 0xfc, 0xbf, 0xc9, 0x8e, 0xf5, 0xcd, 0xd0, 0x80, 0x7b,
- 0xef, 0x54, 0x73, 0x48, 0x7b, 0x76, 0xbd, 0xdc, 0xac, 0x5e, 0x50, 0x5a, 0xf4, 0x03, 0x7a, 0x3b,
- 0x02, 0x65, 0xe4, 0x93, 0x84, 0xd1, 0xf5, 0xcf, 0x3c, 0xac, 0xac, 0x36, 0xbe, 0xd5, 0x3b, 0x14,
- 0xb9, 0xaf, 0xb6, 0x73, 0xca, 0x27, 0x12, 0x94, 0x19, 0xe4, 0xe3, 0xc1, 0x74, 0x68, 0xc6, 0xda,
- 0x2b, 0xa5, 0x47, 0x5a, 0xc5, 0x47, 0x72, 0xf5, 0x5d, 0x12, 0x08, 0x1b, 0x89, 0x7f, 0xbf, 0xf1,
- 0xc9, 0x72, 0x18, 0x4f, 0xae, 0x82, 0x02, 0x6f, 0xd0, 0x3b, 0xce, 0x36, 0x7f, 0xe2, 0xee, 0xa3,
- 0xe3, 0x29, 0x78, 0xf9, 0x15, 0xd0, 0xf0, 0x3c, 0x07, 0xc5, 0xc1, 0x2b, 0xd7, 0xed, 0x66, 0x25,
- 0x3c, 0xdb, 0x6f, 0x7c, 0x3f, 0x93, 0x17, 0x91, 0x41, 0xef, 0x24, 0x59, 0xe5, 0xee, 0xfe, 0xf7,
- 0x22, 0xec, 0xaf, 0xb6, 0xc4, 0x5e, 0x6f, 0x89, 0xfd, 0xb9, 0x25, 0xf6, 0xdb, 0x8e, 0x58, 0xeb,
- 0x1d, 0xb1, 0x3e, 0x76, 0xc4, 0x7a, 0xb8, 0x14, 0xd2, 0x8c, 0xe7, 0x11, 0xe5, 0x18, 0x33, 0x8e,
- 0x3a, 0x46, 0xcd, 0x64, 0xc4, 0xdb, 0x02, 0x59, 0x8c, 0xa3, 0xf9, 0x04, 0x74, 0xd6, 0xd9, 0x79,
- 0xb7, 0x7d, 0x50, 0x9b, 0x59, 0x4e, 0x41, 0x47, 0xff, 0xd2, 0xbe, 0xba, 0x5f, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x1e, 0x94, 0x88, 0x7d, 0x34, 0x02, 0x00, 0x00,
+var fileDescriptor_1879d34bc6ac3cd7 = []byte{
+ // 327 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x31, 0x4f, 0xc2, 0x40,
+ 0x18, 0x86, 0x5b, 0x20, 0x0e, 0xc5, 0xa9, 0x51, 0x6c, 0x18, 0xae, 0xa4, 0x1a, 0x61, 0x90, 0x3b,
+ 0x91, 0x49, 0xc7, 0x3a, 0x18, 0x37, 0x03, 0x4e, 0x26, 0x86, 0xd0, 0xe3, 0xb3, 0x5c, 0x42, 0xef,
+ 0x43, 0xee, 0x20, 0xf2, 0x0b, 0x5c, 0xfd, 0x59, 0x2c, 0x26, 0x8c, 0x4e, 0xc4, 0xc0, 0x3f, 0xe0,
+ 0x17, 0x98, 0xb6, 0xc6, 0xa2, 0xb1, 0xdb, 0xe5, 0xbe, 0xe7, 0x7d, 0xde, 0xe1, 0xb5, 0x4e, 0x44,
+ 0xc0, 0x19, 0xc7, 0x09, 0x30, 0x8e, 0x52, 0x02, 0xd7, 0x02, 0x25, 0x9b, 0xb5, 0x58, 0x08, 0x12,
+ 0x94, 0x50, 0x74, 0x3c, 0x41, 0x8d, 0x76, 0x45, 0x04, 0x9c, 0xc6, 0x14, 0xcd, 0x28, 0x3a, 0x6b,
+ 0x55, 0x0f, 0x42, 0x0c, 0x31, 0x41, 0x58, 0xfc, 0x4a, 0xe9, 0x6a, 0x3d, 0xc7, 0xb9, 0x93, 0x4d,
+ 0x40, 0xef, 0xbd, 0x60, 0xed, 0xdf, 0xa4, 0x45, 0x5d, 0xdd, 0xd7, 0x60, 0xdf, 0x5b, 0xe5, 0x0c,
+ 0x52, 0x8e, 0x59, 0x2b, 0x36, 0xca, 0x17, 0x67, 0xf4, 0xff, 0x76, 0x7a, 0x3b, 0x00, 0xa9, 0xc5,
+ 0x93, 0x80, 0xc1, 0xf5, 0xcf, 0xbf, 0x5f, 0x5a, 0xac, 0x5c, 0xa3, 0xb3, 0xab, 0xb1, 0x5f, 0x4d,
+ 0xeb, 0x88, 0x8f, 0x04, 0x48, 0xdd, 0xcb, 0xbe, 0x7b, 0xe3, 0xbe, 0x1e, 0x2a, 0xa7, 0x90, 0x54,
+ 0xd4, 0xf3, 0x2a, 0x32, 0xf1, 0x5d, 0x8c, 0xfb, 0xa7, 0xb1, 0x7d, 0xbb, 0x72, 0xc9, 0xbc, 0x1f,
+ 0x8d, 0xae, 0xbc, 0x1c, 0xab, 0xd7, 0x39, 0x4c, 0x2f, 0x7f, 0xe2, 0xf6, 0xa3, 0xe5, 0x48, 0x78,
+ 0xf9, 0x15, 0x50, 0xf0, 0x3c, 0x05, 0xc9, 0xc1, 0x29, 0xd6, 0xcc, 0x46, 0xc9, 0x3f, 0xde, 0xae,
+ 0x5c, 0x37, 0x95, 0xe7, 0x91, 0x5e, 0xa7, 0x12, 0x9f, 0x32, 0x77, 0xf7, 0xfb, 0xe0, 0x77, 0x17,
+ 0x6b, 0x62, 0x2e, 0xd7, 0xc4, 0xfc, 0x5c, 0x13, 0xf3, 0x6d, 0x43, 0x8c, 0xe5, 0x86, 0x18, 0x1f,
+ 0x1b, 0x62, 0x3c, 0x5c, 0x86, 0x42, 0x0f, 0xa7, 0x01, 0xe5, 0x18, 0x31, 0x8e, 0x2a, 0x42, 0xc5,
+ 0x44, 0xc0, 0x9b, 0x21, 0xb2, 0x08, 0x07, 0xd3, 0x11, 0xa8, 0x74, 0xaf, 0xf3, 0x76, 0x73, 0x67,
+ 0x32, 0x3d, 0x1f, 0x83, 0x0a, 0xf6, 0x92, 0xad, 0xda, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x85,
+ 0x66, 0x6f, 0x29, 0x2a, 0x02, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/03-connection/types/query.pb.go b/modules/core/03-connection/types/query.pb.go
index 49e8f074..32ad340b 100644
--- a/modules/core/03-connection/types/query.pb.go
+++ b/modules/core/03-connection/types/query.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/connection/v1/query.proto
+// source: ibc/core/connection/v1/query.proto
package types
@@ -43,7 +43,7 @@ func (m *QueryConnectionRequest) Reset() { *m = QueryConnectionRequest{}
func (m *QueryConnectionRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionRequest) ProtoMessage() {}
func (*QueryConnectionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{0}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{0}
}
func (m *QueryConnectionRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -95,7 +95,7 @@ func (m *QueryConnectionResponse) Reset() { *m = QueryConnectionResponse
func (m *QueryConnectionResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionResponse) ProtoMessage() {}
func (*QueryConnectionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{1}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{1}
}
func (m *QueryConnectionResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -155,7 +155,7 @@ func (m *QueryConnectionsRequest) Reset() { *m = QueryConnectionsRequest
func (m *QueryConnectionsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionsRequest) ProtoMessage() {}
func (*QueryConnectionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{2}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{2}
}
func (m *QueryConnectionsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -206,7 +206,7 @@ func (m *QueryConnectionsResponse) Reset() { *m = QueryConnectionsRespon
func (m *QueryConnectionsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionsResponse) ProtoMessage() {}
func (*QueryConnectionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{3}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{3}
}
func (m *QueryConnectionsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -267,7 +267,7 @@ func (m *QueryClientConnectionsRequest) Reset() { *m = QueryClientConnec
func (m *QueryClientConnectionsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryClientConnectionsRequest) ProtoMessage() {}
func (*QueryClientConnectionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{4}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{4}
}
func (m *QueryClientConnectionsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -318,7 +318,7 @@ func (m *QueryClientConnectionsResponse) Reset() { *m = QueryClientConne
func (m *QueryClientConnectionsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryClientConnectionsResponse) ProtoMessage() {}
func (*QueryClientConnectionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{5}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{5}
}
func (m *QueryClientConnectionsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -379,7 +379,7 @@ func (m *QueryConnectionClientStateRequest) Reset() { *m = QueryConnecti
func (m *QueryConnectionClientStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionClientStateRequest) ProtoMessage() {}
func (*QueryConnectionClientStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{6}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{6}
}
func (m *QueryConnectionClientStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -430,7 +430,7 @@ func (m *QueryConnectionClientStateResponse) Reset() { *m = QueryConnect
func (m *QueryConnectionClientStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionClientStateResponse) ProtoMessage() {}
func (*QueryConnectionClientStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{7}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{7}
}
func (m *QueryConnectionClientStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -493,7 +493,7 @@ func (m *QueryConnectionConsensusStateRequest) Reset() { *m = QueryConne
func (m *QueryConnectionConsensusStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionConsensusStateRequest) ProtoMessage() {}
func (*QueryConnectionConsensusStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{8}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{8}
}
func (m *QueryConnectionConsensusStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -560,7 +560,7 @@ func (m *QueryConnectionConsensusStateResponse) Reset() { *m = QueryConn
func (m *QueryConnectionConsensusStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionConsensusStateResponse) ProtoMessage() {}
func (*QueryConnectionConsensusStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_eaccf9805ea75291, []int{9}
+ return fileDescriptor_cd8d529f8c7cd06b, []int{9}
}
func (m *QueryConnectionConsensusStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -618,80 +618,80 @@ func (m *QueryConnectionConsensusStateResponse) GetProofHeight() types.Height {
}
func init() {
- proto.RegisterType((*QueryConnectionRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionRequest")
- proto.RegisterType((*QueryConnectionResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionResponse")
- proto.RegisterType((*QueryConnectionsRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionsRequest")
- proto.RegisterType((*QueryConnectionsResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionsResponse")
- proto.RegisterType((*QueryClientConnectionsRequest)(nil), "ibcgo.core.connection.v1.QueryClientConnectionsRequest")
- proto.RegisterType((*QueryClientConnectionsResponse)(nil), "ibcgo.core.connection.v1.QueryClientConnectionsResponse")
- proto.RegisterType((*QueryConnectionClientStateRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionClientStateRequest")
- proto.RegisterType((*QueryConnectionClientStateResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionClientStateResponse")
- proto.RegisterType((*QueryConnectionConsensusStateRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionConsensusStateRequest")
- proto.RegisterType((*QueryConnectionConsensusStateResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionConsensusStateResponse")
+ proto.RegisterType((*QueryConnectionRequest)(nil), "ibc.core.connection.v1.QueryConnectionRequest")
+ proto.RegisterType((*QueryConnectionResponse)(nil), "ibc.core.connection.v1.QueryConnectionResponse")
+ proto.RegisterType((*QueryConnectionsRequest)(nil), "ibc.core.connection.v1.QueryConnectionsRequest")
+ proto.RegisterType((*QueryConnectionsResponse)(nil), "ibc.core.connection.v1.QueryConnectionsResponse")
+ proto.RegisterType((*QueryClientConnectionsRequest)(nil), "ibc.core.connection.v1.QueryClientConnectionsRequest")
+ proto.RegisterType((*QueryClientConnectionsResponse)(nil), "ibc.core.connection.v1.QueryClientConnectionsResponse")
+ proto.RegisterType((*QueryConnectionClientStateRequest)(nil), "ibc.core.connection.v1.QueryConnectionClientStateRequest")
+ proto.RegisterType((*QueryConnectionClientStateResponse)(nil), "ibc.core.connection.v1.QueryConnectionClientStateResponse")
+ proto.RegisterType((*QueryConnectionConsensusStateRequest)(nil), "ibc.core.connection.v1.QueryConnectionConsensusStateRequest")
+ proto.RegisterType((*QueryConnectionConsensusStateResponse)(nil), "ibc.core.connection.v1.QueryConnectionConsensusStateResponse")
}
func init() {
- proto.RegisterFile("ibcgo/core/connection/v1/query.proto", fileDescriptor_eaccf9805ea75291)
-}
-
-var fileDescriptor_eaccf9805ea75291 = []byte{
- // 895 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0x1b, 0x45,
- 0x14, 0xf6, 0x38, 0x69, 0xd5, 0x8c, 0x43, 0x0b, 0x23, 0x97, 0x2e, 0xa6, 0xb8, 0xe9, 0xb6, 0xa5,
- 0x29, 0x55, 0x67, 0x6a, 0x47, 0x40, 0xc8, 0x2f, 0xc0, 0x28, 0x90, 0x5c, 0x50, 0xd8, 0xdc, 0xb8,
- 0x44, 0xbb, 0xeb, 0xc9, 0x7a, 0x25, 0x7b, 0xc7, 0xf1, 0xac, 0x8d, 0xac, 0xc8, 0x42, 0xe2, 0x2f,
- 0x40, 0xe2, 0xca, 0x05, 0x71, 0xe1, 0xc4, 0x95, 0x23, 0x37, 0x94, 0x63, 0x24, 0x2e, 0x1c, 0x50,
- 0x14, 0x25, 0x88, 0x3b, 0xfc, 0x05, 0x68, 0x67, 0xc6, 0xd9, 0x59, 0xdb, 0x9b, 0xd8, 0x16, 0xb9,
- 0x6d, 0xde, 0xbc, 0x37, 0xef, 0xfb, 0xbe, 0xf7, 0xe6, 0x73, 0xe0, 0x63, 0xdf, 0x71, 0x3d, 0x46,
- 0x5c, 0xd6, 0xa2, 0xc4, 0x65, 0x41, 0x40, 0xdd, 0xd0, 0x67, 0x01, 0xe9, 0x94, 0xc8, 0x41, 0x9b,
- 0xb6, 0xba, 0xb8, 0xd9, 0x62, 0x21, 0x43, 0x86, 0xc8, 0xc2, 0x51, 0x16, 0x8e, 0xb3, 0x70, 0xa7,
- 0x54, 0xc8, 0x7b, 0xcc, 0x63, 0x22, 0x89, 0x44, 0x5f, 0x32, 0xbf, 0xf0, 0x8e, 0xcb, 0x78, 0x83,
- 0x71, 0xe2, 0xd8, 0x9c, 0xca, 0x8b, 0x48, 0xa7, 0xe4, 0xd0, 0xd0, 0x2e, 0x91, 0xa6, 0xed, 0xf9,
- 0x81, 0x2d, 0xca, 0x65, 0xee, 0x43, 0x1d, 0x41, 0xdd, 0xa7, 0x41, 0x18, 0x75, 0x97, 0x5f, 0x2a,
- 0xe5, 0x59, 0x2a, 0x48, 0x0d, 0x8c, 0x4c, 0xbd, 0xef, 0x31, 0xe6, 0xd5, 0x29, 0xb1, 0x9b, 0x3e,
- 0xb1, 0x83, 0x80, 0x85, 0xa2, 0x15, 0x57, 0xa7, 0x6f, 0xa8, 0x53, 0xf1, 0x97, 0xd3, 0xde, 0x27,
- 0x76, 0xa0, 0x28, 0x9a, 0xeb, 0xf0, 0xf5, 0x2f, 0x22, 0xa0, 0x9f, 0x5c, 0xdc, 0x68, 0xd1, 0x83,
- 0x36, 0xe5, 0x21, 0x7a, 0x04, 0x5f, 0x89, 0xdb, 0xec, 0xf9, 0x55, 0x03, 0x2c, 0x80, 0xc5, 0x39,
- 0x6b, 0x3e, 0x0e, 0x6e, 0x57, 0xcd, 0x5f, 0x01, 0xbc, 0x37, 0x54, 0xcf, 0x9b, 0x2c, 0xe0, 0x14,
- 0x7d, 0x06, 0x61, 0x9c, 0x2b, 0xaa, 0x73, 0xe5, 0xa7, 0x38, 0x4d, 0x52, 0x1c, 0xdf, 0xb0, 0x19,
- 0x54, 0x2d, 0xad, 0x14, 0xe5, 0xe1, 0x8d, 0x66, 0x8b, 0xb1, 0x7d, 0x23, 0xbb, 0x00, 0x16, 0xe7,
- 0x2d, 0xf9, 0x07, 0xda, 0x84, 0xf3, 0xe2, 0x63, 0xaf, 0x46, 0x7d, 0xaf, 0x16, 0x1a, 0x33, 0xa2,
- 0xc1, 0xfd, 0x44, 0x03, 0xa9, 0x66, 0xa7, 0x84, 0xb7, 0x44, 0x4e, 0x65, 0xf6, 0xe8, 0xe4, 0x41,
- 0xc6, 0xca, 0x89, 0x3a, 0x19, 0x32, 0xed, 0x21, 0x02, 0xbc, 0xaf, 0xc0, 0xa7, 0x10, 0xc6, 0x63,
- 0x53, 0x04, 0xde, 0xc6, 0x72, 0xc6, 0x38, 0x9a, 0x31, 0x96, 0xcb, 0xa2, 0x66, 0x8c, 0x77, 0x6c,
- 0x8f, 0xaa, 0x5a, 0x4b, 0xab, 0x34, 0xff, 0x01, 0xd0, 0x18, 0xee, 0xa1, 0x54, 0xda, 0x81, 0xb9,
- 0x98, 0x2a, 0x37, 0xc0, 0xc2, 0xcc, 0x62, 0xae, 0x8c, 0xd3, 0x65, 0xda, 0xae, 0xd2, 0x20, 0xf4,
- 0xf7, 0x7d, 0x5a, 0xd5, 0x24, 0xd7, 0xaf, 0x88, 0x74, 0xd7, 0x60, 0x67, 0x95, 0xee, 0x57, 0xc1,
- 0x96, 0x70, 0x74, 0xdc, 0x68, 0x05, 0xde, 0x9c, 0x58, 0x5b, 0x55, 0x61, 0xae, 0xc1, 0xb7, 0x24,
- 0x65, 0x91, 0x36, 0x42, 0xdc, 0x37, 0xe1, 0x9c, 0xbc, 0x22, 0x5e, 0xad, 0x5b, 0x32, 0xb0, 0x5d,
- 0x35, 0x7f, 0x02, 0xb0, 0x98, 0x56, 0xae, 0x74, 0x7b, 0x06, 0x5f, 0xd5, 0xd6, 0xb3, 0x69, 0x87,
- 0x35, 0x29, 0xde, 0x9c, 0x75, 0x27, 0x8e, 0xef, 0x44, 0xe1, 0xeb, 0xdd, 0x1f, 0x07, 0x3e, 0x1c,
- 0x98, 0xad, 0xc4, 0xbc, 0x1b, 0xda, 0x61, 0x7f, 0x1b, 0xd0, 0xfa, 0xc8, 0xb7, 0x54, 0x31, 0xfe,
- 0x3d, 0x79, 0x90, 0xef, 0xda, 0x8d, 0xfa, 0x8a, 0x99, 0x38, 0x36, 0x07, 0x5e, 0xd9, 0xdf, 0x00,
- 0x9a, 0x97, 0x35, 0x51, 0x92, 0xb8, 0xf0, 0x9e, 0x7f, 0xb1, 0x1d, 0x7b, 0x4a, 0x5d, 0x1e, 0xa5,
- 0xa8, 0xe5, 0x7d, 0x3e, 0x9a, 0x9c, 0xb6, 0x52, 0xda, 0xad, 0x77, 0xfd, 0x51, 0xe1, 0xeb, 0x15,
- 0xf3, 0x17, 0x00, 0x1f, 0x0f, 0x12, 0x8d, 0xa8, 0x05, 0xbc, 0xcd, 0xff, 0x47, 0x41, 0xd1, 0x53,
- 0x78, 0xa7, 0x45, 0x3b, 0x3e, 0x8f, 0x4e, 0x83, 0x76, 0xc3, 0xa1, 0x2d, 0x41, 0x67, 0xd6, 0xba,
- 0xdd, 0x0f, 0x7f, 0x2e, 0xa2, 0x89, 0x44, 0x8d, 0x9a, 0x96, 0xa8, 0x90, 0x9f, 0x02, 0xf8, 0xe4,
- 0x0a, 0xe4, 0x6a, 0x4a, 0xeb, 0x30, 0x5a, 0x50, 0x79, 0x92, 0x98, 0x4e, 0x1e, 0x4b, 0x9b, 0xc6,
- 0x7d, 0x9b, 0xc6, 0x1f, 0x07, 0x5d, 0xeb, 0xb6, 0x9b, 0xb8, 0x26, 0xf9, 0x6e, 0xb2, 0xc9, 0x77,
- 0x13, 0x0f, 0x67, 0xe6, 0xb2, 0xe1, 0xcc, 0x4e, 0x35, 0x9c, 0xf2, 0x8f, 0xb7, 0xe0, 0x0d, 0x41,
- 0x11, 0xfd, 0x0c, 0x20, 0x8c, 0x79, 0xa2, 0x97, 0xe9, 0x6e, 0x35, 0xfa, 0xb7, 0xa5, 0x50, 0x9a,
- 0xa0, 0x42, 0xca, 0x66, 0xae, 0x7e, 0xf3, 0xfb, 0x5f, 0xdf, 0x65, 0xdf, 0x45, 0x4b, 0xc4, 0x77,
- 0xdc, 0xcb, 0x7f, 0x13, 0x39, 0x39, 0x4c, 0x4c, 0xbf, 0x87, 0x7e, 0x00, 0x30, 0xa7, 0x99, 0x08,
- 0x1a, 0xbf, 0x7f, 0xdf, 0xaf, 0x0a, 0xe5, 0x49, 0x4a, 0x14, 0xe6, 0xe7, 0x02, 0xf3, 0x13, 0xf4,
- 0x68, 0x0c, 0xcc, 0xe8, 0x37, 0x00, 0x5f, 0x1b, 0xb2, 0x3b, 0xf4, 0xfe, 0x55, 0x6d, 0x53, 0xfc,
- 0xb5, 0xb0, 0x3c, 0x79, 0xa1, 0x42, 0xbd, 0x21, 0x50, 0x2f, 0xa3, 0xf7, 0x52, 0x51, 0xcb, 0xfd,
- 0x4b, 0x0a, 0xde, 0xdf, 0xc9, 0x1e, 0xfa, 0x13, 0xc0, 0xbb, 0x23, 0x8d, 0x0a, 0xad, 0x8e, 0xad,
- 0xe1, 0xb0, 0x87, 0x16, 0xd6, 0xa6, 0x2b, 0x56, 0xa4, 0xb6, 0x04, 0xa9, 0x0a, 0xfa, 0x68, 0x8a,
- 0xf5, 0x21, 0xba, 0x95, 0xa2, 0xef, 0xb3, 0xd0, 0x48, 0x7b, 0xe4, 0x68, 0x63, 0x7c, 0x90, 0xa3,
- 0x7c, 0xad, 0xf0, 0xe1, 0xd4, 0xf5, 0x8a, 0xe7, 0xd7, 0x82, 0x67, 0x17, 0x7d, 0x35, 0x15, 0xcf,
- 0xa4, 0x2f, 0x91, 0xbe, 0xc7, 0x91, 0xc3, 0x01, 0xb7, 0xec, 0x11, 0x69, 0x25, 0xda, 0x81, 0x0c,
- 0xf4, 0x2a, 0xbb, 0x47, 0x67, 0x45, 0x70, 0x7c, 0x56, 0x04, 0xa7, 0x67, 0x45, 0xf0, 0xed, 0x79,
- 0x31, 0x73, 0x7c, 0x5e, 0xcc, 0xfc, 0x71, 0x5e, 0xcc, 0x7c, 0xf9, 0x81, 0xe7, 0x87, 0xb5, 0xb6,
- 0x83, 0x5d, 0xd6, 0x20, 0xea, 0x1f, 0x65, 0xdf, 0x71, 0x5f, 0x78, 0x8c, 0x34, 0x58, 0xb5, 0x5d,
- 0xa7, 0x5c, 0xc2, 0x7d, 0xb9, 0xf4, 0x42, 0x43, 0x1c, 0x76, 0x9b, 0x94, 0x3b, 0x37, 0x85, 0x25,
- 0x2e, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x44, 0x76, 0x5a, 0x8b, 0xb7, 0x0b, 0x00, 0x00,
+ proto.RegisterFile("ibc/core/connection/v1/query.proto", fileDescriptor_cd8d529f8c7cd06b)
+}
+
+var fileDescriptor_cd8d529f8c7cd06b = []byte{
+ // 892 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x6f, 0x1b, 0x45,
+ 0x14, 0xf6, 0x38, 0x69, 0xd5, 0x8c, 0x43, 0x0b, 0x23, 0xb7, 0x35, 0x0b, 0x38, 0x61, 0x4b, 0x48,
+ 0x0a, 0x74, 0xa6, 0x4e, 0xd4, 0xaa, 0x2d, 0x31, 0x02, 0x47, 0x81, 0xe4, 0x12, 0x85, 0xcd, 0x8d,
+ 0x4b, 0xb4, 0xbb, 0x9e, 0xac, 0x57, 0xb2, 0x77, 0x1c, 0xcf, 0xda, 0xc8, 0x8a, 0x2c, 0x24, 0xfe,
+ 0x00, 0x48, 0x5c, 0xb8, 0x70, 0xe5, 0xc0, 0x1f, 0xe0, 0xc0, 0x8d, 0x53, 0x8e, 0x91, 0xb8, 0xe4,
+ 0x14, 0x21, 0x87, 0x2b, 0x17, 0x7e, 0x01, 0xda, 0x99, 0x71, 0x76, 0xd6, 0x5e, 0x27, 0x8e, 0xd5,
+ 0xdc, 0x36, 0x6f, 0xde, 0x9b, 0xf7, 0x7d, 0xdf, 0x7b, 0xf3, 0x39, 0xd0, 0xf4, 0x1d, 0x97, 0xb8,
+ 0xac, 0x45, 0x89, 0xcb, 0x82, 0x80, 0xba, 0xa1, 0xcf, 0x02, 0xd2, 0x29, 0x91, 0xc3, 0x36, 0x6d,
+ 0x75, 0x71, 0xb3, 0xc5, 0x42, 0x86, 0x1e, 0xf8, 0x8e, 0x8b, 0xa3, 0x1c, 0x1c, 0xe7, 0xe0, 0x4e,
+ 0xc9, 0xc8, 0x7b, 0xcc, 0x63, 0x22, 0x85, 0x44, 0x5f, 0x32, 0xdb, 0xf8, 0xc8, 0x65, 0xbc, 0xc1,
+ 0x38, 0x71, 0x6c, 0x4e, 0xe5, 0x35, 0xa4, 0x53, 0x72, 0x68, 0x68, 0x97, 0x48, 0xd3, 0xf6, 0xfc,
+ 0xc0, 0x16, 0xe5, 0x32, 0x77, 0x21, 0xee, 0x5e, 0xf7, 0x69, 0x10, 0x46, 0x9d, 0xe5, 0x97, 0x4a,
+ 0x58, 0x1e, 0x03, 0x4f, 0x03, 0x22, 0x13, 0xdf, 0xf5, 0x18, 0xf3, 0xea, 0x94, 0xd8, 0x4d, 0x9f,
+ 0xd8, 0x41, 0xc0, 0x42, 0xd1, 0x86, 0xab, 0xd3, 0xb7, 0xd5, 0xa9, 0xf8, 0xcb, 0x69, 0x1f, 0x10,
+ 0x3b, 0x50, 0xe4, 0xcc, 0x32, 0x7c, 0xf0, 0x75, 0x04, 0x72, 0xe3, 0xe2, 0x46, 0x8b, 0x1e, 0xb6,
+ 0x29, 0x0f, 0xd1, 0x23, 0xf8, 0x46, 0xdc, 0x66, 0xdf, 0xaf, 0x16, 0xc0, 0x22, 0x58, 0x99, 0xb3,
+ 0xe6, 0xe3, 0xe0, 0x76, 0xd5, 0xfc, 0x03, 0xc0, 0x87, 0x23, 0xf5, 0xbc, 0xc9, 0x02, 0x4e, 0xd1,
+ 0x26, 0x84, 0x71, 0xae, 0xa8, 0xce, 0xad, 0x2e, 0xe1, 0x74, 0x31, 0x71, 0x5c, 0xbf, 0x19, 0x54,
+ 0x2d, 0xad, 0x10, 0xe5, 0xe1, 0xad, 0x66, 0x8b, 0xb1, 0x83, 0x42, 0x76, 0x11, 0xac, 0xcc, 0x5b,
+ 0xf2, 0x0f, 0xb4, 0x01, 0xe7, 0xc5, 0xc7, 0x7e, 0x8d, 0xfa, 0x5e, 0x2d, 0x2c, 0xcc, 0x88, 0xeb,
+ 0x0d, 0xed, 0x7a, 0xa9, 0x63, 0xa7, 0x84, 0xb7, 0x44, 0x46, 0x65, 0xf6, 0xf8, 0x6c, 0x21, 0x63,
+ 0xe5, 0x44, 0x95, 0x0c, 0x99, 0xf6, 0x08, 0x78, 0x3e, 0x60, 0xff, 0x25, 0x84, 0xf1, 0xb8, 0x14,
+ 0xf8, 0x0f, 0xb1, 0x9c, 0x2d, 0x8e, 0x66, 0x8b, 0xe5, 0x8a, 0xa8, 0xd9, 0xe2, 0x5d, 0xdb, 0xa3,
+ 0xaa, 0xd6, 0xd2, 0x2a, 0xcd, 0x7f, 0x01, 0x2c, 0x8c, 0xf6, 0x50, 0x0a, 0xed, 0xc0, 0x5c, 0x4c,
+ 0x94, 0x17, 0xc0, 0xe2, 0xcc, 0x4a, 0x6e, 0xf5, 0x93, 0x71, 0x12, 0x6d, 0x57, 0x69, 0x10, 0xfa,
+ 0x07, 0x3e, 0xad, 0x6a, 0x62, 0xeb, 0x17, 0xa0, 0xaf, 0x12, 0xa0, 0xb3, 0x02, 0xf4, 0xf2, 0x95,
+ 0xa0, 0x25, 0x18, 0x1d, 0x35, 0x7a, 0x01, 0x6f, 0x5f, 0x53, 0x57, 0x95, 0x6f, 0xae, 0xc3, 0xf7,
+ 0x24, 0x5d, 0x91, 0x96, 0x22, 0xec, 0x3b, 0x70, 0x4e, 0x5e, 0x11, 0xaf, 0xd4, 0x1d, 0x19, 0xd8,
+ 0xae, 0x9a, 0xbf, 0x02, 0x58, 0x1c, 0x57, 0xae, 0x34, 0x7b, 0x0c, 0xdf, 0xd4, 0xd6, 0xb2, 0x69,
+ 0x87, 0x35, 0x29, 0xdc, 0x9c, 0x75, 0x2f, 0x8e, 0xef, 0x46, 0xe1, 0x9b, 0xdc, 0x1c, 0x07, 0xbe,
+ 0x3f, 0x34, 0x55, 0x89, 0x78, 0x2f, 0xb4, 0xc3, 0xc1, 0x1e, 0xa0, 0x72, 0xea, 0x0b, 0xaa, 0x14,
+ 0xfe, 0x3b, 0x5b, 0xc8, 0x77, 0xed, 0x46, 0xfd, 0x95, 0x99, 0x38, 0x36, 0x87, 0xde, 0x56, 0x1f,
+ 0x40, 0xf3, 0xb2, 0x26, 0x4a, 0x10, 0x1b, 0x3e, 0xf4, 0x2f, 0x36, 0x63, 0x5f, 0x69, 0xcb, 0xa3,
+ 0x14, 0xb5, 0xb6, 0x8f, 0xd3, 0xa8, 0x69, 0xcb, 0xa4, 0xdd, 0x79, 0xdf, 0x4f, 0x0b, 0xdf, 0xa4,
+ 0x90, 0xbf, 0x03, 0xf8, 0xc1, 0x30, 0xc9, 0x88, 0x56, 0xc0, 0xdb, 0xfc, 0x35, 0x8a, 0x89, 0x96,
+ 0xe1, 0xbd, 0x16, 0xed, 0xf8, 0x3c, 0x3a, 0x0d, 0xda, 0x0d, 0x87, 0xb6, 0x04, 0x99, 0x59, 0xeb,
+ 0xee, 0x20, 0xbc, 0x23, 0xa2, 0x89, 0x44, 0x8d, 0x98, 0x96, 0xa8, 0x90, 0x9f, 0x01, 0xb8, 0x74,
+ 0x05, 0x72, 0x35, 0xa1, 0x32, 0x8c, 0x56, 0x53, 0x9e, 0x24, 0x26, 0x93, 0xc7, 0xd2, 0x98, 0xf1,
+ 0xc0, 0x98, 0xf1, 0x17, 0x41, 0xd7, 0xba, 0xeb, 0x26, 0xae, 0x49, 0xbe, 0x98, 0x6c, 0xf2, 0xc5,
+ 0xc4, 0xa3, 0x99, 0xb9, 0x6c, 0x34, 0xb3, 0x53, 0x8c, 0x66, 0xf5, 0x87, 0x3b, 0xf0, 0x96, 0x20,
+ 0x88, 0x7e, 0x03, 0x10, 0xc6, 0x2c, 0x11, 0x1e, 0xe7, 0x50, 0xe9, 0xbf, 0x24, 0x06, 0x99, 0x38,
+ 0x5f, 0x0a, 0x66, 0x7e, 0xfa, 0xfd, 0x5f, 0xff, 0xfc, 0x94, 0x7d, 0x86, 0xd6, 0xc8, 0x95, 0xbf,
+ 0x7f, 0x9c, 0x1c, 0x25, 0xe6, 0xde, 0x43, 0xbf, 0x00, 0x98, 0xd3, 0x8c, 0x03, 0x4d, 0xda, 0x7d,
+ 0xe0, 0x50, 0xc6, 0xd3, 0xc9, 0x0b, 0x14, 0xde, 0x8f, 0x05, 0xde, 0x25, 0xf4, 0x68, 0x02, 0xbc,
+ 0xe8, 0x4f, 0x00, 0xdf, 0x1a, 0xb1, 0x37, 0xf4, 0xec, 0xf2, 0xa6, 0x63, 0xdc, 0xd4, 0x78, 0x7e,
+ 0xdd, 0x32, 0x85, 0xf8, 0x33, 0x81, 0xf8, 0x05, 0x7a, 0x3e, 0x16, 0xb1, 0xdc, 0xb8, 0xa4, 0xd0,
+ 0x83, 0x2d, 0xec, 0xa1, 0x53, 0x00, 0xef, 0xa7, 0xda, 0x12, 0x7a, 0x39, 0xa1, 0x7a, 0xa3, 0x7e,
+ 0x69, 0xbc, 0x9a, 0xa6, 0x54, 0x11, 0xda, 0x12, 0x84, 0x2a, 0xe8, 0xf3, 0x29, 0x56, 0x86, 0xe8,
+ 0xa6, 0x89, 0x7e, 0xce, 0xc2, 0xc2, 0xb8, 0x27, 0x8d, 0xd6, 0x27, 0x85, 0x98, 0xe6, 0x61, 0x46,
+ 0x79, 0xca, 0x6a, 0xc5, 0xf1, 0x3b, 0xc1, 0xb1, 0x8b, 0xbe, 0x9d, 0x8a, 0x63, 0xd2, 0x81, 0xc8,
+ 0xc0, 0xcd, 0xc8, 0xd1, 0x90, 0x2f, 0xf6, 0x88, 0x34, 0x0d, 0xed, 0x40, 0x06, 0x7a, 0x95, 0xbd,
+ 0xe3, 0x7e, 0x11, 0x9c, 0xf4, 0x8b, 0xe0, 0xef, 0x7e, 0x11, 0xfc, 0x78, 0x5e, 0xcc, 0x9c, 0x9c,
+ 0x17, 0x33, 0xa7, 0xe7, 0xc5, 0xcc, 0x37, 0x2f, 0x3d, 0x3f, 0xac, 0xb5, 0x1d, 0xec, 0xb2, 0x06,
+ 0x51, 0xff, 0x00, 0xfb, 0x8e, 0xfb, 0xc4, 0x63, 0xa4, 0xc1, 0xaa, 0xed, 0x3a, 0xe5, 0x12, 0xee,
+ 0xd3, 0xb5, 0x27, 0x1a, 0xe2, 0xb0, 0xdb, 0xa4, 0xdc, 0xb9, 0x2d, 0xcc, 0x6f, 0xed, 0xff, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xc7, 0x37, 0x60, 0x53, 0x8b, 0x0b, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -731,7 +731,7 @@ func NewQueryClient(cc grpc1.ClientConn) QueryClient {
func (c *queryClient) Connection(ctx context.Context, in *QueryConnectionRequest, opts ...grpc.CallOption) (*QueryConnectionResponse, error) {
out := new(QueryConnectionResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/Connection", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Query/Connection", in, out, opts...)
if err != nil {
return nil, err
}
@@ -740,7 +740,7 @@ func (c *queryClient) Connection(ctx context.Context, in *QueryConnectionRequest
func (c *queryClient) Connections(ctx context.Context, in *QueryConnectionsRequest, opts ...grpc.CallOption) (*QueryConnectionsResponse, error) {
out := new(QueryConnectionsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/Connections", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Query/Connections", in, out, opts...)
if err != nil {
return nil, err
}
@@ -749,7 +749,7 @@ func (c *queryClient) Connections(ctx context.Context, in *QueryConnectionsReque
func (c *queryClient) ClientConnections(ctx context.Context, in *QueryClientConnectionsRequest, opts ...grpc.CallOption) (*QueryClientConnectionsResponse, error) {
out := new(QueryClientConnectionsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ClientConnections", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Query/ClientConnections", in, out, opts...)
if err != nil {
return nil, err
}
@@ -758,7 +758,7 @@ func (c *queryClient) ClientConnections(ctx context.Context, in *QueryClientConn
func (c *queryClient) ConnectionClientState(ctx context.Context, in *QueryConnectionClientStateRequest, opts ...grpc.CallOption) (*QueryConnectionClientStateResponse, error) {
out := new(QueryConnectionClientStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ConnectionClientState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Query/ConnectionClientState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -767,7 +767,7 @@ func (c *queryClient) ConnectionClientState(ctx context.Context, in *QueryConnec
func (c *queryClient) ConnectionConsensusState(ctx context.Context, in *QueryConnectionConsensusStateRequest, opts ...grpc.CallOption) (*QueryConnectionConsensusStateResponse, error) {
out := new(QueryConnectionConsensusStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ConnectionConsensusState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Query/ConnectionConsensusState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -825,7 +825,7 @@ func _Query_Connection_Handler(srv interface{}, ctx context.Context, dec func(in
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Query/Connection",
+ FullMethod: "/ibc.core.connection.v1.Query/Connection",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Connection(ctx, req.(*QueryConnectionRequest))
@@ -843,7 +843,7 @@ func _Query_Connections_Handler(srv interface{}, ctx context.Context, dec func(i
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Query/Connections",
+ FullMethod: "/ibc.core.connection.v1.Query/Connections",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Connections(ctx, req.(*QueryConnectionsRequest))
@@ -861,7 +861,7 @@ func _Query_ClientConnections_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Query/ClientConnections",
+ FullMethod: "/ibc.core.connection.v1.Query/ClientConnections",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ClientConnections(ctx, req.(*QueryClientConnectionsRequest))
@@ -879,7 +879,7 @@ func _Query_ConnectionClientState_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Query/ConnectionClientState",
+ FullMethod: "/ibc.core.connection.v1.Query/ConnectionClientState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ConnectionClientState(ctx, req.(*QueryConnectionClientStateRequest))
@@ -897,7 +897,7 @@ func _Query_ConnectionConsensusState_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Query/ConnectionConsensusState",
+ FullMethod: "/ibc.core.connection.v1.Query/ConnectionConsensusState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ConnectionConsensusState(ctx, req.(*QueryConnectionConsensusStateRequest))
@@ -906,7 +906,7 @@ func _Query_ConnectionConsensusState_Handler(srv interface{}, ctx context.Contex
}
var _Query_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.core.connection.v1.Query",
+ ServiceName: "ibc.core.connection.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -931,7 +931,7 @@ var _Query_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/core/connection/v1/query.proto",
+ Metadata: "ibc/core/connection/v1/query.proto",
}
func (m *QueryConnectionRequest) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/03-connection/types/query.pb.gw.go b/modules/core/03-connection/types/query.pb.gw.go
index e597cbeb..2de52353 100644
--- a/modules/core/03-connection/types/query.pb.gw.go
+++ b/modules/core/03-connection/types/query.pb.gw.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: ibcgo/core/connection/v1/query.proto
+// source: ibc/core/connection/v1/query.proto
/*
Package types is a reverse proxy.
diff --git a/modules/core/03-connection/types/tx.pb.go b/modules/core/03-connection/types/tx.pb.go
index 3e849954..f544f50f 100644
--- a/modules/core/03-connection/types/tx.pb.go
+++ b/modules/core/03-connection/types/tx.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/connection/v1/tx.proto
+// source: ibc/core/connection/v1/tx.proto
package types
@@ -44,7 +44,7 @@ func (m *MsgConnectionOpenInit) Reset() { *m = MsgConnectionOpenInit{} }
func (m *MsgConnectionOpenInit) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenInit) ProtoMessage() {}
func (*MsgConnectionOpenInit) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{0}
+ return fileDescriptor_5d00fde5fc97399e, []int{0}
}
func (m *MsgConnectionOpenInit) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -82,7 +82,7 @@ func (m *MsgConnectionOpenInitResponse) Reset() { *m = MsgConnectionOpen
func (m *MsgConnectionOpenInitResponse) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenInitResponse) ProtoMessage() {}
func (*MsgConnectionOpenInitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{1}
+ return fileDescriptor_5d00fde5fc97399e, []int{1}
}
func (m *MsgConnectionOpenInitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -138,7 +138,7 @@ func (m *MsgConnectionOpenTry) Reset() { *m = MsgConnectionOpenTry{} }
func (m *MsgConnectionOpenTry) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenTry) ProtoMessage() {}
func (*MsgConnectionOpenTry) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{2}
+ return fileDescriptor_5d00fde5fc97399e, []int{2}
}
func (m *MsgConnectionOpenTry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -175,7 +175,7 @@ func (m *MsgConnectionOpenTryResponse) Reset() { *m = MsgConnectionOpenT
func (m *MsgConnectionOpenTryResponse) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenTryResponse) ProtoMessage() {}
func (*MsgConnectionOpenTryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{3}
+ return fileDescriptor_5d00fde5fc97399e, []int{3}
}
func (m *MsgConnectionOpenTryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -227,7 +227,7 @@ func (m *MsgConnectionOpenAck) Reset() { *m = MsgConnectionOpenAck{} }
func (m *MsgConnectionOpenAck) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenAck) ProtoMessage() {}
func (*MsgConnectionOpenAck) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{4}
+ return fileDescriptor_5d00fde5fc97399e, []int{4}
}
func (m *MsgConnectionOpenAck) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -264,7 +264,7 @@ func (m *MsgConnectionOpenAckResponse) Reset() { *m = MsgConnectionOpenA
func (m *MsgConnectionOpenAckResponse) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenAckResponse) ProtoMessage() {}
func (*MsgConnectionOpenAckResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{5}
+ return fileDescriptor_5d00fde5fc97399e, []int{5}
}
func (m *MsgConnectionOpenAckResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -307,7 +307,7 @@ func (m *MsgConnectionOpenConfirm) Reset() { *m = MsgConnectionOpenConfi
func (m *MsgConnectionOpenConfirm) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenConfirm) ProtoMessage() {}
func (*MsgConnectionOpenConfirm) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{6}
+ return fileDescriptor_5d00fde5fc97399e, []int{6}
}
func (m *MsgConnectionOpenConfirm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -345,7 +345,7 @@ func (m *MsgConnectionOpenConfirmResponse) Reset() { *m = MsgConnectionO
func (m *MsgConnectionOpenConfirmResponse) String() string { return proto.CompactTextString(m) }
func (*MsgConnectionOpenConfirmResponse) ProtoMessage() {}
func (*MsgConnectionOpenConfirmResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_296ab31199620d78, []int{7}
+ return fileDescriptor_5d00fde5fc97399e, []int{7}
}
func (m *MsgConnectionOpenConfirmResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -375,78 +375,78 @@ func (m *MsgConnectionOpenConfirmResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_MsgConnectionOpenConfirmResponse proto.InternalMessageInfo
func init() {
- proto.RegisterType((*MsgConnectionOpenInit)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenInit")
- proto.RegisterType((*MsgConnectionOpenInitResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenInitResponse")
- proto.RegisterType((*MsgConnectionOpenTry)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenTry")
- proto.RegisterType((*MsgConnectionOpenTryResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenTryResponse")
- proto.RegisterType((*MsgConnectionOpenAck)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenAck")
- proto.RegisterType((*MsgConnectionOpenAckResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenAckResponse")
- proto.RegisterType((*MsgConnectionOpenConfirm)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenConfirm")
- proto.RegisterType((*MsgConnectionOpenConfirmResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenConfirmResponse")
-}
-
-func init() { proto.RegisterFile("ibcgo/core/connection/v1/tx.proto", fileDescriptor_296ab31199620d78) }
-
-var fileDescriptor_296ab31199620d78 = []byte{
- // 922 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x8f, 0xdb, 0x44,
- 0x14, 0x8f, 0xf3, 0x3f, 0x93, 0x40, 0x5b, 0x93, 0xec, 0x9a, 0xb0, 0x8d, 0x13, 0x4b, 0xa0, 0x70,
- 0xa8, 0x4d, 0xb6, 0x08, 0x44, 0x10, 0x87, 0x24, 0x17, 0x7a, 0xa8, 0xa8, 0xdc, 0x0a, 0x24, 0x84,
- 0x14, 0x25, 0xce, 0xac, 0x63, 0x25, 0xf1, 0x58, 0x1e, 0x27, 0xaa, 0x85, 0xc4, 0x15, 0x90, 0x38,
- 0xf0, 0x05, 0x90, 0xfa, 0x2d, 0xf8, 0x0a, 0x3d, 0xf6, 0xc8, 0xc9, 0x42, 0xbb, 0x07, 0x38, 0xfb,
- 0x13, 0x20, 0xcf, 0xd8, 0x8e, 0x9d, 0xd8, 0xd2, 0x9a, 0x6c, 0x6f, 0xf3, 0xe6, 0xfd, 0xde, 0x7b,
- 0x33, 0xef, 0xfd, 0x7e, 0xa3, 0x01, 0x3d, 0x6d, 0xae, 0xa8, 0x48, 0x52, 0x90, 0x09, 0x25, 0x05,
- 0xe9, 0x3a, 0x54, 0x2c, 0x0d, 0xe9, 0xd2, 0x6e, 0x20, 0x59, 0x2f, 0x45, 0xc3, 0x44, 0x16, 0x62,
- 0x39, 0x02, 0x11, 0x3d, 0x88, 0xb8, 0x87, 0x88, 0xbb, 0x41, 0xbb, 0xa9, 0x22, 0x15, 0x11, 0x90,
- 0xe4, 0xad, 0x28, 0xbe, 0xfd, 0xbe, 0x8a, 0x90, 0xba, 0x86, 0x12, 0xb1, 0xe6, 0xdb, 0x2b, 0x69,
- 0xa6, 0xdb, 0xbe, 0x2b, 0x56, 0x6d, 0xad, 0x41, 0xdd, 0xf2, 0x2a, 0xd1, 0x95, 0x0f, 0xf9, 0x38,
- 0xf5, 0x40, 0x91, 0xda, 0x04, 0x2a, 0xfc, 0x99, 0x07, 0xad, 0xa7, 0x58, 0x9d, 0x84, 0xfb, 0xdf,
- 0x18, 0x50, 0x7f, 0xa2, 0x6b, 0x16, 0x3b, 0x00, 0x35, 0x9a, 0x74, 0xaa, 0x2d, 0x38, 0xa6, 0xcb,
- 0xf4, 0x6b, 0xe3, 0xa6, 0xeb, 0xf0, 0xf7, 0xed, 0xd9, 0x66, 0x3d, 0x14, 0x42, 0x97, 0x20, 0x57,
- 0xe9, 0xfa, 0xc9, 0x82, 0x7d, 0x06, 0x1a, 0x0a, 0xda, 0xea, 0x16, 0x34, 0x8d, 0x99, 0x69, 0xd9,
- 0x5c, 0xbe, 0xcb, 0xf4, 0xeb, 0x97, 0x1f, 0x89, 0x69, 0x97, 0x17, 0x27, 0x11, 0xf4, 0xb8, 0xf8,
- 0xda, 0xe1, 0x73, 0x72, 0x2c, 0x03, 0xfb, 0x25, 0xa8, 0xec, 0xa0, 0x89, 0x35, 0xa4, 0x73, 0x05,
- 0x92, 0xac, 0x97, 0x9e, 0xec, 0x5b, 0x0a, 0x94, 0x83, 0x08, 0x76, 0x08, 0x1a, 0x0b, 0xb8, 0x9e,
- 0xd9, 0x53, 0x03, 0x9a, 0x1a, 0x5a, 0x70, 0xc5, 0x2e, 0xd3, 0x2f, 0x8e, 0xcf, 0x5d, 0x87, 0x7f,
- 0x8f, 0x5e, 0x22, 0xea, 0x15, 0xe4, 0x3a, 0x31, 0x9f, 0x11, 0x8b, 0x3d, 0x03, 0x65, 0xac, 0xa9,
- 0x3a, 0x34, 0xb9, 0x92, 0x77, 0x75, 0xd9, 0xb7, 0x86, 0xd5, 0x5f, 0x5e, 0xf1, 0xb9, 0x7f, 0x5f,
- 0xf1, 0x39, 0x81, 0x07, 0x0f, 0x13, 0x1b, 0x27, 0x43, 0x6c, 0x20, 0x1d, 0x43, 0xe1, 0x8f, 0x0a,
- 0x68, 0x1e, 0x21, 0x5e, 0x98, 0xf6, 0xff, 0xe9, 0xec, 0x77, 0xe0, 0xcc, 0x30, 0xe1, 0x4e, 0x43,
- 0x5b, 0x3c, 0xdd, 0xdf, 0xda, 0x8b, 0xcf, 0x93, 0xf8, 0x9e, 0xeb, 0xf0, 0x0f, 0x69, 0x7c, 0x32,
- 0x4e, 0x90, 0x9b, 0x81, 0x63, 0x7f, 0x20, 0x7f, 0x64, 0xb4, 0x20, 0xb6, 0x66, 0x16, 0xf4, 0xbb,
- 0xdc, 0x14, 0x29, 0xff, 0xc4, 0x80, 0x7f, 0xe2, 0x48, 0xb7, 0xa3, 0x9d, 0x8b, 0xc6, 0x08, 0x72,
- 0x9d, 0x9a, 0xcf, 0x3d, 0xeb, 0x88, 0x04, 0xc5, 0x93, 0x49, 0x70, 0x38, 0xc7, 0x52, 0x86, 0x39,
- 0xbe, 0x04, 0xad, 0x68, 0xae, 0xa9, 0xcf, 0x0d, 0xcc, 0x95, 0xbb, 0x85, 0x5b, 0xd1, 0x69, 0xdc,
- 0x75, 0x1d, 0xfe, 0xc2, 0xbf, 0x75, 0x52, 0x26, 0x41, 0x6e, 0x46, 0xf7, 0xfd, 0x30, 0xcc, 0xfe,
- 0x00, 0x1a, 0x86, 0x89, 0xd0, 0xd5, 0x74, 0x09, 0x35, 0x75, 0x69, 0x71, 0x15, 0xd2, 0x87, 0x8b,
- 0x58, 0x41, 0x2a, 0xda, 0xdd, 0x40, 0xfc, 0x9a, 0x60, 0xc6, 0x1f, 0x78, 0xb7, 0xdf, 0xdf, 0x2b,
- 0x1a, 0x2f, 0xc8, 0x75, 0x62, 0x52, 0x24, 0xfb, 0x29, 0x00, 0xd4, 0xab, 0xe9, 0x9a, 0xc5, 0x55,
- 0xbb, 0x4c, 0xbf, 0x31, 0x6e, 0xb9, 0x0e, 0xff, 0x20, 0x1a, 0xe9, 0xf9, 0x04, 0xb9, 0x46, 0x0c,
- 0xa2, 0xe9, 0x61, 0x70, 0x26, 0x5a, 0x99, 0xab, 0x91, 0xb8, 0xf3, 0xc3, 0x8a, 0xd4, 0x1b, 0x54,
- 0x9c, 0x10, 0x8b, 0x9d, 0x80, 0x7b, 0xbe, 0xd7, 0x63, 0xb7, 0x8e, 0xb7, 0x98, 0x03, 0x24, 0xbc,
- 0xed, 0x3a, 0xfc, 0x59, 0x2c, 0x3c, 0x00, 0x08, 0xf2, 0xbb, 0x34, 0x43, 0xb0, 0xc1, 0x2e, 0xc1,
- 0xfd, 0xd0, 0x1b, 0x34, 0xa6, 0x7e, 0x8b, 0xc6, 0xf0, 0x7e, 0x63, 0xce, 0x83, 0x41, 0xc4, 0x73,
- 0x08, 0xf2, 0xbd, 0x70, 0xcb, 0x6f, 0xd0, 0x5e, 0xc0, 0x8d, 0x14, 0x01, 0x77, 0xc0, 0x45, 0x92,
- 0x3c, 0x43, 0xfd, 0xfe, 0x53, 0x4a, 0xd0, 0xef, 0x48, 0x59, 0xb1, 0x5f, 0x81, 0x77, 0xe2, 0x1a,
- 0xa4, 0x1a, 0xe6, 0x5c, 0x87, 0x6f, 0x86, 0xe7, 0x8b, 0x4a, 0xaf, 0xa1, 0x44, 0x25, 0xa7, 0x80,
- 0x76, 0x8c, 0x48, 0x49, 0x7a, 0xfe, 0xd0, 0x75, 0xf8, 0x5e, 0x02, 0xe9, 0x0e, 0x12, 0x73, 0x51,
- 0x67, 0x4c, 0xd7, 0x27, 0x3d, 0x9c, 0x87, 0x8f, 0x42, 0xf1, 0xe4, 0x47, 0xe1, 0x50, 0x0c, 0xa5,
- 0x3b, 0x15, 0xc3, 0x00, 0x50, 0x8e, 0x4f, 0x2d, 0xd3, 0xe6, 0xca, 0x84, 0x94, 0x91, 0x07, 0x35,
- 0x74, 0x09, 0x72, 0x95, 0xac, 0xbd, 0x37, 0xf8, 0x50, 0x09, 0x95, 0xd3, 0x94, 0x50, 0xbd, 0x13,
- 0x25, 0xd4, 0xde, 0xb2, 0x12, 0x40, 0x06, 0x25, 0x8c, 0x94, 0x55, 0xa8, 0x84, 0xdf, 0xf2, 0x80,
- 0x3b, 0x02, 0x4c, 0x90, 0x7e, 0xa5, 0x99, 0x9b, 0x53, 0xd5, 0x10, 0xce, 0x6e, 0xa6, 0xac, 0x08,
- 0xf9, 0x13, 0x66, 0x37, 0x53, 0x56, 0xc1, 0xec, 0x3c, 0xfd, 0x1d, 0x92, 0xa9, 0x70, 0xa7, 0x64,
- 0xda, 0xb7, 0xab, 0x98, 0xd2, 0x2e, 0x01, 0x74, 0xd3, 0xba, 0x11, 0xb4, 0xec, 0xf2, 0xd7, 0x22,
- 0x28, 0x3c, 0xc5, 0x2a, 0xfb, 0x13, 0x60, 0x13, 0xfe, 0x56, 0x52, 0xba, 0x18, 0x13, 0xff, 0x14,
- 0xed, 0xcf, 0x33, 0x06, 0x04, 0xe7, 0x60, 0x7f, 0x04, 0x0f, 0x8e, 0x3f, 0x20, 0x62, 0x86, 0x6c,
- 0x2f, 0x4c, 0xbb, 0xfd, 0x59, 0x36, 0x7c, 0x7a, 0x71, 0x6f, 0x7a, 0x59, 0x8a, 0x8f, 0x94, 0x55,
- 0xa6, 0xe2, 0x11, 0xd2, 0xb2, 0x3f, 0x33, 0xa0, 0x95, 0xcc, 0xd8, 0xcb, 0x0c, 0x19, 0xfd, 0x98,
- 0xf6, 0x30, 0x7b, 0x4c, 0x70, 0x92, 0xf1, 0xf3, 0xd7, 0xd7, 0x1d, 0xe6, 0xcd, 0x75, 0x87, 0xf9,
- 0xfb, 0xba, 0xc3, 0xfc, 0x7e, 0xd3, 0xc9, 0xbd, 0xb9, 0xe9, 0xe4, 0xfe, 0xba, 0xe9, 0xe4, 0xbe,
- 0xff, 0x42, 0xd5, 0xac, 0xe5, 0x76, 0x2e, 0x2a, 0x68, 0x23, 0x29, 0x08, 0x6f, 0x10, 0x96, 0xb4,
- 0xb9, 0xf2, 0x48, 0x45, 0xd2, 0x06, 0x2d, 0xb6, 0x6b, 0x88, 0xe9, 0x1f, 0xfe, 0x93, 0xc7, 0x8f,
- 0x22, 0xdf, 0x78, 0xcb, 0x36, 0x20, 0x9e, 0x97, 0xc9, 0x2b, 0xfc, 0xf8, 0xbf, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x49, 0xfc, 0x07, 0xd9, 0x7d, 0x0c, 0x00, 0x00,
+ proto.RegisterType((*MsgConnectionOpenInit)(nil), "ibc.core.connection.v1.MsgConnectionOpenInit")
+ proto.RegisterType((*MsgConnectionOpenInitResponse)(nil), "ibc.core.connection.v1.MsgConnectionOpenInitResponse")
+ proto.RegisterType((*MsgConnectionOpenTry)(nil), "ibc.core.connection.v1.MsgConnectionOpenTry")
+ proto.RegisterType((*MsgConnectionOpenTryResponse)(nil), "ibc.core.connection.v1.MsgConnectionOpenTryResponse")
+ proto.RegisterType((*MsgConnectionOpenAck)(nil), "ibc.core.connection.v1.MsgConnectionOpenAck")
+ proto.RegisterType((*MsgConnectionOpenAckResponse)(nil), "ibc.core.connection.v1.MsgConnectionOpenAckResponse")
+ proto.RegisterType((*MsgConnectionOpenConfirm)(nil), "ibc.core.connection.v1.MsgConnectionOpenConfirm")
+ proto.RegisterType((*MsgConnectionOpenConfirmResponse)(nil), "ibc.core.connection.v1.MsgConnectionOpenConfirmResponse")
+}
+
+func init() { proto.RegisterFile("ibc/core/connection/v1/tx.proto", fileDescriptor_5d00fde5fc97399e) }
+
+var fileDescriptor_5d00fde5fc97399e = []byte{
+ // 925 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x31, 0x6f, 0xdb, 0x46,
+ 0x14, 0x16, 0x2d, 0xd9, 0x96, 0x4e, 0x6a, 0x93, 0x5c, 0x65, 0x9b, 0x55, 0x13, 0x51, 0x21, 0x5a,
+ 0xd4, 0x43, 0x4d, 0x46, 0x49, 0x0a, 0x34, 0x06, 0x3a, 0x58, 0x5a, 0xea, 0x21, 0x6d, 0xc0, 0x04,
+ 0x2d, 0x90, 0x45, 0x90, 0x4e, 0x67, 0xfa, 0x20, 0x89, 0x47, 0xf0, 0x28, 0xb5, 0xec, 0xda, 0xa5,
+ 0xe8, 0xd4, 0xa5, 0x7b, 0xfe, 0x43, 0xff, 0x44, 0x46, 0x8f, 0x9d, 0x88, 0xd6, 0x5e, 0x3a, 0x73,
+ 0xeb, 0x56, 0xf0, 0x8e, 0xa4, 0x4e, 0x32, 0x85, 0x5a, 0x95, 0xb3, 0xdd, 0xbb, 0xf7, 0xbd, 0xf7,
+ 0xee, 0xde, 0xfb, 0xbe, 0xc3, 0x01, 0x8d, 0x0c, 0x90, 0x89, 0xa8, 0x87, 0x4d, 0x44, 0x1d, 0x07,
+ 0x23, 0x9f, 0x50, 0xc7, 0x9c, 0xb5, 0x4d, 0xff, 0x07, 0xc3, 0xf5, 0xa8, 0x4f, 0xe1, 0x3e, 0x19,
+ 0x20, 0x23, 0x06, 0x18, 0x73, 0x80, 0x31, 0x6b, 0x37, 0xea, 0x36, 0xb5, 0x29, 0x87, 0x98, 0xf1,
+ 0x4a, 0xa0, 0x1b, 0x1f, 0xda, 0x94, 0xda, 0x63, 0x6c, 0x72, 0x6b, 0x30, 0x3d, 0x33, 0xfb, 0x4e,
+ 0x90, 0xb8, 0xa4, 0x4a, 0x63, 0x82, 0x1d, 0x3f, 0xae, 0x22, 0x56, 0x09, 0xe0, 0xd3, 0x15, 0x47,
+ 0x91, 0xea, 0x72, 0xa0, 0xfe, 0xfb, 0x16, 0xd8, 0x7b, 0xce, 0xec, 0x6e, 0xb6, 0xff, 0x8d, 0x8b,
+ 0x9d, 0x53, 0x87, 0xf8, 0xb0, 0x0d, 0x2a, 0x22, 0x65, 0x8f, 0x0c, 0x55, 0xa5, 0xa5, 0x1c, 0x56,
+ 0x3a, 0xf5, 0x28, 0xd4, 0xee, 0x06, 0xfd, 0xc9, 0xf8, 0x58, 0xcf, 0x5c, 0xba, 0x55, 0x16, 0xeb,
+ 0xd3, 0x21, 0xfc, 0x1a, 0xd4, 0x10, 0x9d, 0x3a, 0x3e, 0xf6, 0xdc, 0xbe, 0xe7, 0x07, 0xea, 0x56,
+ 0x4b, 0x39, 0xac, 0x3e, 0xfe, 0xd8, 0xc8, 0xbf, 0xb6, 0xd1, 0x95, 0xb0, 0x9d, 0xd2, 0xdb, 0x50,
+ 0x2b, 0x58, 0x0b, 0xf1, 0xf0, 0x19, 0xd8, 0x9d, 0x61, 0x8f, 0x11, 0xea, 0xa8, 0x45, 0x9e, 0x4a,
+ 0x5b, 0x95, 0xea, 0x5b, 0x01, 0xb3, 0x52, 0x3c, 0x3c, 0x06, 0xb5, 0x21, 0x1e, 0xf7, 0x83, 0x9e,
+ 0x8b, 0x3d, 0x42, 0x87, 0x6a, 0xa9, 0xa5, 0x1c, 0x96, 0x3a, 0x07, 0x51, 0xa8, 0x7d, 0x20, 0x2e,
+ 0x20, 0x7b, 0x75, 0xab, 0xca, 0xcd, 0x17, 0xdc, 0x82, 0xfb, 0x60, 0x87, 0x11, 0xdb, 0xc1, 0x9e,
+ 0xba, 0x1d, 0x5f, 0xdb, 0x4a, 0xac, 0xe3, 0xf2, 0xcf, 0x6f, 0xb4, 0xc2, 0xdf, 0x6f, 0xb4, 0x82,
+ 0xae, 0x81, 0x07, 0xb9, 0x4d, 0xb3, 0x30, 0x73, 0xa9, 0xc3, 0xb0, 0xfe, 0xdb, 0x2e, 0xa8, 0x5f,
+ 0x43, 0xbc, 0xf2, 0x82, 0xff, 0xd3, 0xd5, 0xef, 0xc0, 0xbe, 0xeb, 0xe1, 0x19, 0xa1, 0x53, 0xd6,
+ 0x9b, 0xdf, 0x3a, 0x8e, 0xdf, 0xe2, 0xf1, 0x0f, 0xa3, 0x50, 0x7b, 0x20, 0xe2, 0xf3, 0x71, 0xba,
+ 0x55, 0x4f, 0x1d, 0xf3, 0x03, 0x9d, 0x0e, 0xe1, 0x0b, 0x50, 0x4b, 0x0a, 0x32, 0xbf, 0xef, 0xe3,
+ 0xa4, 0xc7, 0x75, 0x43, 0xf0, 0xce, 0x48, 0x79, 0x67, 0x9c, 0x38, 0x81, 0xdc, 0x39, 0x39, 0x46,
+ 0xb7, 0xaa, 0xc2, 0x7c, 0x19, 0x5b, 0xd7, 0x08, 0x50, 0xda, 0x90, 0x00, 0xcb, 0x53, 0xdc, 0x5e,
+ 0x63, 0x8a, 0x33, 0xb0, 0x27, 0xe7, 0xea, 0x25, 0xcc, 0x60, 0xea, 0x4e, 0xab, 0x78, 0x03, 0x2a,
+ 0x75, 0x5a, 0x51, 0xa8, 0xdd, 0x4f, 0x6e, 0x9c, 0x97, 0x47, 0xb7, 0xea, 0xf2, 0x7e, 0x12, 0xc6,
+ 0xe0, 0x6b, 0x50, 0x73, 0x3d, 0x4a, 0xcf, 0x7a, 0xe7, 0x98, 0xd8, 0xe7, 0xbe, 0xba, 0xcb, 0x7b,
+ 0xd0, 0x90, 0xca, 0x09, 0xa1, 0xce, 0xda, 0xc6, 0x57, 0x1c, 0xd1, 0xf9, 0x28, 0xbe, 0xf9, 0xfc,
+ 0x4e, 0x72, 0xb4, 0x6e, 0x55, 0xb9, 0x29, 0x90, 0xf0, 0x29, 0x00, 0xc2, 0x4b, 0x1c, 0xe2, 0xab,
+ 0xe5, 0x96, 0x72, 0x58, 0xeb, 0xec, 0x45, 0xa1, 0x76, 0x4f, 0x8e, 0x8c, 0x7d, 0xba, 0x55, 0xe1,
+ 0x06, 0x57, 0xf2, 0x71, 0x7a, 0x22, 0x51, 0x59, 0xad, 0xf0, 0xb8, 0x83, 0xe5, 0x8a, 0xc2, 0x9b,
+ 0x56, 0xec, 0x72, 0x0b, 0x76, 0xc1, 0x9d, 0xc4, 0x1b, 0xf3, 0xda, 0x61, 0x53, 0xa6, 0x02, 0x1e,
+ 0xde, 0x88, 0x42, 0x6d, 0x7f, 0x21, 0x3c, 0x05, 0xe8, 0xd6, 0xfb, 0x22, 0x43, 0xba, 0x01, 0xcf,
+ 0xc0, 0xdd, 0xcc, 0x9b, 0xb6, 0xa5, 0xfa, 0x9f, 0x6d, 0xd1, 0x92, 0xb6, 0x1c, 0xa4, 0x43, 0x58,
+ 0xcc, 0xa0, 0x5b, 0x77, 0xb2, 0xad, 0xa4, 0x3d, 0x73, 0xe1, 0xd6, 0x56, 0x08, 0xb7, 0x09, 0xee,
+ 0xe7, 0xc9, 0x32, 0xd3, 0xed, 0x5f, 0xdb, 0x39, 0xba, 0x3d, 0x41, 0x23, 0xf8, 0x25, 0x78, 0x6f,
+ 0x51, 0x7b, 0x42, 0xbb, 0x6a, 0x14, 0x6a, 0xf5, 0xec, 0x7c, 0xb2, 0xe4, 0x6a, 0x48, 0x96, 0x1a,
+ 0x02, 0x8d, 0x05, 0x12, 0xe5, 0xe9, 0xf8, 0x93, 0x28, 0xd4, 0x1e, 0xe6, 0x10, 0x6e, 0x29, 0xb1,
+ 0x2a, 0x3b, 0x17, 0xf4, 0xbc, 0xc1, 0x73, 0xb9, 0xfc, 0x14, 0x94, 0x36, 0x7e, 0x0a, 0x96, 0x65,
+ 0xb0, 0x7d, 0x8b, 0x32, 0x68, 0x03, 0xc1, 0xee, 0x9e, 0xef, 0x05, 0xea, 0x0e, 0xa7, 0xa3, 0xf4,
+ 0x88, 0x66, 0x2e, 0xdd, 0x2a, 0xf3, 0x75, 0xfc, 0xee, 0x2e, 0x6b, 0x60, 0x77, 0x33, 0x0d, 0x94,
+ 0x6f, 0x45, 0x03, 0x95, 0x77, 0xaa, 0x01, 0xb0, 0x86, 0x06, 0x4e, 0xd0, 0x28, 0xd3, 0xc0, 0x2f,
+ 0x5b, 0x40, 0xbd, 0x06, 0xe8, 0x52, 0xe7, 0x8c, 0x78, 0x93, 0x4d, 0x75, 0x90, 0x4d, 0xae, 0x8f,
+ 0x46, 0x9c, 0xf6, 0x39, 0x93, 0xeb, 0xa3, 0x51, 0x3a, 0xb9, 0x58, 0x79, 0xcb, 0x44, 0x2a, 0xde,
+ 0x22, 0x91, 0xe6, 0xcd, 0x2a, 0xad, 0x68, 0x96, 0x0e, 0x5a, 0xab, 0x7a, 0x91, 0x36, 0xec, 0xf1,
+ 0x3f, 0x45, 0x50, 0x7c, 0xce, 0x6c, 0xf8, 0x23, 0x80, 0x39, 0xff, 0xa8, 0xa3, 0x55, 0x22, 0xcc,
+ 0xfd, 0x41, 0x34, 0x3e, 0x5f, 0x0b, 0x9e, 0x9e, 0x01, 0x7e, 0x0f, 0xee, 0x5d, 0xff, 0x6c, 0x7c,
+ 0x76, 0xe3, 0x5c, 0xaf, 0xbc, 0xa0, 0xf1, 0x74, 0x1d, 0xf4, 0xea, 0xc2, 0xf1, 0xcc, 0x6e, 0x5e,
+ 0xf8, 0x04, 0x8d, 0xd6, 0x28, 0x2c, 0xd1, 0x14, 0xfe, 0xa4, 0x80, 0xbd, 0x7c, 0x8e, 0x3e, 0xba,
+ 0x71, 0xbe, 0x24, 0xa2, 0xf1, 0xc5, 0xba, 0x11, 0xe9, 0x29, 0x3a, 0x2f, 0xdf, 0x5e, 0x36, 0x95,
+ 0x8b, 0xcb, 0xa6, 0xf2, 0xe7, 0x65, 0x53, 0xf9, 0xf5, 0xaa, 0x59, 0xb8, 0xb8, 0x6a, 0x16, 0xfe,
+ 0xb8, 0x6a, 0x16, 0x5e, 0x3f, 0xb3, 0x89, 0x7f, 0x3e, 0x1d, 0x18, 0x88, 0x4e, 0x4c, 0x44, 0xd9,
+ 0x84, 0x32, 0x93, 0x0c, 0xd0, 0x91, 0x4d, 0xcd, 0x09, 0x1d, 0x4e, 0xc7, 0x98, 0x89, 0xff, 0xf9,
+ 0xa3, 0x27, 0x47, 0xd2, 0x17, 0xdd, 0x0f, 0x5c, 0xcc, 0x06, 0x3b, 0xfc, 0xbd, 0x7d, 0xf2, 0x6f,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x63, 0x63, 0xc9, 0x51, 0x0c, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -482,7 +482,7 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient {
func (c *msgClient) ConnectionOpenInit(ctx context.Context, in *MsgConnectionOpenInit, opts ...grpc.CallOption) (*MsgConnectionOpenInitResponse, error) {
out := new(MsgConnectionOpenInitResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenInit", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Msg/ConnectionOpenInit", in, out, opts...)
if err != nil {
return nil, err
}
@@ -491,7 +491,7 @@ func (c *msgClient) ConnectionOpenInit(ctx context.Context, in *MsgConnectionOpe
func (c *msgClient) ConnectionOpenTry(ctx context.Context, in *MsgConnectionOpenTry, opts ...grpc.CallOption) (*MsgConnectionOpenTryResponse, error) {
out := new(MsgConnectionOpenTryResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenTry", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Msg/ConnectionOpenTry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -500,7 +500,7 @@ func (c *msgClient) ConnectionOpenTry(ctx context.Context, in *MsgConnectionOpen
func (c *msgClient) ConnectionOpenAck(ctx context.Context, in *MsgConnectionOpenAck, opts ...grpc.CallOption) (*MsgConnectionOpenAckResponse, error) {
out := new(MsgConnectionOpenAckResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenAck", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Msg/ConnectionOpenAck", in, out, opts...)
if err != nil {
return nil, err
}
@@ -509,7 +509,7 @@ func (c *msgClient) ConnectionOpenAck(ctx context.Context, in *MsgConnectionOpen
func (c *msgClient) ConnectionOpenConfirm(ctx context.Context, in *MsgConnectionOpenConfirm, opts ...grpc.CallOption) (*MsgConnectionOpenConfirmResponse, error) {
out := new(MsgConnectionOpenConfirmResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenConfirm", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.connection.v1.Msg/ConnectionOpenConfirm", in, out, opts...)
if err != nil {
return nil, err
}
@@ -560,7 +560,7 @@ func _Msg_ConnectionOpenInit_Handler(srv interface{}, ctx context.Context, dec f
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenInit",
+ FullMethod: "/ibc.core.connection.v1.Msg/ConnectionOpenInit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ConnectionOpenInit(ctx, req.(*MsgConnectionOpenInit))
@@ -578,7 +578,7 @@ func _Msg_ConnectionOpenTry_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenTry",
+ FullMethod: "/ibc.core.connection.v1.Msg/ConnectionOpenTry",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ConnectionOpenTry(ctx, req.(*MsgConnectionOpenTry))
@@ -596,7 +596,7 @@ func _Msg_ConnectionOpenAck_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenAck",
+ FullMethod: "/ibc.core.connection.v1.Msg/ConnectionOpenAck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ConnectionOpenAck(ctx, req.(*MsgConnectionOpenAck))
@@ -614,7 +614,7 @@ func _Msg_ConnectionOpenConfirm_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenConfirm",
+ FullMethod: "/ibc.core.connection.v1.Msg/ConnectionOpenConfirm",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ConnectionOpenConfirm(ctx, req.(*MsgConnectionOpenConfirm))
@@ -623,7 +623,7 @@ func _Msg_ConnectionOpenConfirm_Handler(srv interface{}, ctx context.Context, de
}
var _Msg_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.core.connection.v1.Msg",
+ ServiceName: "ibc.core.connection.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -644,7 +644,7 @@ var _Msg_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/core/connection/v1/tx.proto",
+ Metadata: "ibc/core/connection/v1/tx.proto",
}
func (m *MsgConnectionOpenInit) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/04-channel/types/channel.pb.go b/modules/core/04-channel/types/channel.pb.go
index 88da8ebd..166d836c 100644
--- a/modules/core/04-channel/types/channel.pb.go
+++ b/modules/core/04-channel/types/channel.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/channel/v1/channel.proto
+// source: ibc/core/channel/v1/channel.proto
package types
@@ -64,7 +64,7 @@ func (x State) String() string {
}
func (State) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{0}
+ return fileDescriptor_c3a07336710636a0, []int{0}
}
// Order defines if a channel is ORDERED or UNORDERED
@@ -97,7 +97,7 @@ func (x Order) String() string {
}
func (Order) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{1}
+ return fileDescriptor_c3a07336710636a0, []int{1}
}
// Channel defines pipeline for exactly-once packet delivery between specific
@@ -105,9 +105,9 @@ func (Order) EnumDescriptor() ([]byte, []int) {
// sending packets and one end capable of receiving packets.
type Channel struct {
// current state of the channel end
- State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibcgo.core.channel.v1.State" json:"state,omitempty"`
+ State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibc.core.channel.v1.State" json:"state,omitempty"`
// whether the channel is ordered or unordered
- Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibcgo.core.channel.v1.Order" json:"ordering,omitempty"`
+ Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibc.core.channel.v1.Order" json:"ordering,omitempty"`
// counterparty channel end
Counterparty Counterparty `protobuf:"bytes,3,opt,name=counterparty,proto3" json:"counterparty"`
// list of connection identifiers, in order, along which packets sent on
@@ -121,7 +121,7 @@ func (m *Channel) Reset() { *m = Channel{} }
func (m *Channel) String() string { return proto.CompactTextString(m) }
func (*Channel) ProtoMessage() {}
func (*Channel) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{0}
+ return fileDescriptor_c3a07336710636a0, []int{0}
}
func (m *Channel) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -154,9 +154,9 @@ var xxx_messageInfo_Channel proto.InternalMessageInfo
// identifier fields.
type IdentifiedChannel struct {
// current state of the channel end
- State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibcgo.core.channel.v1.State" json:"state,omitempty"`
+ State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibc.core.channel.v1.State" json:"state,omitempty"`
// whether the channel is ordered or unordered
- Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibcgo.core.channel.v1.Order" json:"ordering,omitempty"`
+ Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibc.core.channel.v1.Order" json:"ordering,omitempty"`
// counterparty channel end
Counterparty Counterparty `protobuf:"bytes,3,opt,name=counterparty,proto3" json:"counterparty"`
// list of connection identifiers, in order, along which packets sent on
@@ -174,7 +174,7 @@ func (m *IdentifiedChannel) Reset() { *m = IdentifiedChannel{} }
func (m *IdentifiedChannel) String() string { return proto.CompactTextString(m) }
func (*IdentifiedChannel) ProtoMessage() {}
func (*IdentifiedChannel) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{1}
+ return fileDescriptor_c3a07336710636a0, []int{1}
}
func (m *IdentifiedChannel) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -215,7 +215,7 @@ func (m *Counterparty) Reset() { *m = Counterparty{} }
func (m *Counterparty) String() string { return proto.CompactTextString(m) }
func (*Counterparty) ProtoMessage() {}
func (*Counterparty) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{2}
+ return fileDescriptor_c3a07336710636a0, []int{2}
}
func (m *Counterparty) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -270,7 +270,7 @@ func (m *Packet) Reset() { *m = Packet{} }
func (m *Packet) String() string { return proto.CompactTextString(m) }
func (*Packet) ProtoMessage() {}
func (*Packet) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{3}
+ return fileDescriptor_c3a07336710636a0, []int{3}
}
func (m *Packet) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -318,7 +318,7 @@ func (m *PacketState) Reset() { *m = PacketState{} }
func (m *PacketState) String() string { return proto.CompactTextString(m) }
func (*PacketState) ProtoMessage() {}
func (*PacketState) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{4}
+ return fileDescriptor_c3a07336710636a0, []int{4}
}
func (m *PacketState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -367,7 +367,7 @@ func (m *Acknowledgement) Reset() { *m = Acknowledgement{} }
func (m *Acknowledgement) String() string { return proto.CompactTextString(m) }
func (*Acknowledgement) ProtoMessage() {}
func (*Acknowledgement) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a7a8797f9808eee, []int{5}
+ return fileDescriptor_c3a07336710636a0, []int{5}
}
func (m *Acknowledgement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -442,80 +442,77 @@ func (*Acknowledgement) XXX_OneofWrappers() []interface{} {
}
func init() {
- proto.RegisterEnum("ibcgo.core.channel.v1.State", State_name, State_value)
- proto.RegisterEnum("ibcgo.core.channel.v1.Order", Order_name, Order_value)
- proto.RegisterType((*Channel)(nil), "ibcgo.core.channel.v1.Channel")
- proto.RegisterType((*IdentifiedChannel)(nil), "ibcgo.core.channel.v1.IdentifiedChannel")
- proto.RegisterType((*Counterparty)(nil), "ibcgo.core.channel.v1.Counterparty")
- proto.RegisterType((*Packet)(nil), "ibcgo.core.channel.v1.Packet")
- proto.RegisterType((*PacketState)(nil), "ibcgo.core.channel.v1.PacketState")
- proto.RegisterType((*Acknowledgement)(nil), "ibcgo.core.channel.v1.Acknowledgement")
-}
-
-func init() {
- proto.RegisterFile("ibcgo/core/channel/v1/channel.proto", fileDescriptor_3a7a8797f9808eee)
-}
-
-var fileDescriptor_3a7a8797f9808eee = []byte{
- // 913 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x3d, 0x6f, 0xe3, 0x46,
- 0x13, 0x16, 0x65, 0xea, 0x6b, 0x64, 0xc9, 0xf2, 0xbe, 0xaf, 0x75, 0x0c, 0xe3, 0x13, 0x15, 0x5e,
- 0x0a, 0xe3, 0x82, 0x93, 0x62, 0xe7, 0x70, 0x09, 0xae, 0x8a, 0x25, 0xf1, 0x60, 0x02, 0x17, 0x49,
- 0xa0, 0xe5, 0x22, 0xd7, 0x08, 0x14, 0xb9, 0x91, 0x88, 0x93, 0xb8, 0x0a, 0xb9, 0xb2, 0xe1, 0x32,
- 0xdd, 0x41, 0x55, 0xfe, 0x80, 0x80, 0x00, 0x01, 0xd2, 0xe4, 0x07, 0xe4, 0x2f, 0x5c, 0x79, 0x65,
- 0x2a, 0x22, 0xb0, 0xeb, 0x34, 0xfa, 0x05, 0x01, 0x77, 0x49, 0x7d, 0x5d, 0x3e, 0xba, 0x54, 0xa9,
- 0xb4, 0x33, 0xcf, 0x33, 0x33, 0xcf, 0xce, 0x8c, 0xb8, 0xf0, 0xc8, 0x19, 0x58, 0x43, 0x52, 0xb7,
- 0x88, 0x87, 0xeb, 0xd6, 0xc8, 0x74, 0x5d, 0x3c, 0xae, 0x5f, 0x9f, 0xc6, 0xc7, 0xda, 0xd4, 0x23,
- 0x94, 0xa0, 0x23, 0x46, 0xaa, 0x85, 0xa4, 0x5a, 0x8c, 0x5c, 0x9f, 0xca, 0xff, 0x1f, 0x92, 0x21,
- 0x61, 0x8c, 0x7a, 0x78, 0xe2, 0x64, 0xf9, 0xa3, 0xcd, 0x8c, 0x63, 0x07, 0xbb, 0x94, 0x25, 0x64,
- 0x27, 0x4e, 0x51, 0x7f, 0x4e, 0x42, 0xa6, 0xc9, 0xf3, 0xa0, 0x33, 0x48, 0xf9, 0xd4, 0xa4, 0x58,
- 0x12, 0xaa, 0xc2, 0x49, 0xf1, 0xec, 0xb8, 0xf6, 0xa7, 0xb5, 0x6a, 0x97, 0x21, 0xc7, 0xe0, 0x54,
- 0xf4, 0x05, 0x64, 0x89, 0x67, 0x63, 0xcf, 0x71, 0x87, 0x52, 0xf2, 0x6f, 0xc3, 0x3a, 0x21, 0xcd,
- 0x58, 0xb1, 0xd1, 0x57, 0xb0, 0x6f, 0x91, 0x99, 0x4b, 0xb1, 0x37, 0x35, 0x3d, 0x7a, 0x2b, 0xed,
- 0x55, 0x85, 0x93, 0xfc, 0xd9, 0xa3, 0xbf, 0x88, 0x6e, 0x6e, 0x50, 0x1b, 0xe2, 0xdb, 0x40, 0x49,
- 0x18, 0x5b, 0xe1, 0xa8, 0x09, 0x07, 0x16, 0x71, 0x5d, 0x6c, 0x51, 0x87, 0xb8, 0xfd, 0x11, 0x99,
- 0xfa, 0x92, 0x58, 0xdd, 0x3b, 0xc9, 0x35, 0xe4, 0x65, 0xa0, 0x94, 0x6f, 0xcd, 0xc9, 0xf8, 0xb9,
- 0xba, 0x43, 0x50, 0x8d, 0xe2, 0xda, 0x73, 0x41, 0xa6, 0x3e, 0x92, 0x20, 0x73, 0x8d, 0x3d, 0xdf,
- 0x21, 0xae, 0x94, 0xaa, 0x0a, 0x27, 0x39, 0x23, 0x36, 0x9f, 0x8b, 0x6f, 0x7e, 0x50, 0x12, 0xea,
- 0xef, 0x49, 0x38, 0xd4, 0x6d, 0xec, 0x52, 0xe7, 0x1b, 0x07, 0xdb, 0xff, 0xf5, 0xed, 0x1f, 0xfb,
- 0x86, 0x1e, 0x40, 0x66, 0x4a, 0x3c, 0xda, 0x77, 0x6c, 0x29, 0xcd, 0x90, 0x74, 0x68, 0xea, 0x36,
- 0x7a, 0x08, 0x10, 0xc9, 0x0c, 0xb1, 0x0c, 0xc3, 0x72, 0x91, 0x47, 0xb7, 0xa3, 0x7e, 0xdf, 0xc0,
- 0xfe, 0xe6, 0x05, 0xd0, 0x27, 0xeb, 0x6c, 0x61, 0xaf, 0x73, 0x0d, 0xb4, 0x0c, 0x94, 0x22, 0x17,
- 0x19, 0x01, 0xea, 0xaa, 0xc2, 0xd3, 0xad, 0x0a, 0x49, 0xc6, 0x3f, 0x5a, 0x06, 0xca, 0x61, 0x74,
- 0xa9, 0x15, 0xa6, 0xbe, 0x5f, 0xf8, 0x3b, 0x11, 0xd2, 0x5d, 0xd3, 0x7a, 0x8d, 0x29, 0x92, 0x21,
- 0xeb, 0xe3, 0x6f, 0x67, 0xd8, 0xb5, 0xf8, 0x80, 0x45, 0x63, 0x65, 0xa3, 0xcf, 0x21, 0xef, 0x93,
- 0x99, 0x67, 0xe1, 0x7e, 0x58, 0x33, 0xaa, 0x51, 0x5e, 0x06, 0x0a, 0xe2, 0x35, 0x36, 0x40, 0xd5,
- 0x00, 0x6e, 0x75, 0x89, 0x47, 0xd1, 0x97, 0x50, 0x8c, 0xb0, 0xa8, 0x32, 0x1b, 0x63, 0xae, 0xf1,
- 0xc1, 0x32, 0x50, 0x8e, 0xb6, 0x62, 0x23, 0x5c, 0x35, 0x0a, 0xdc, 0x11, 0x2f, 0xdd, 0x0b, 0x28,
- 0xd9, 0xd8, 0xa7, 0x8e, 0x6b, 0xb2, 0xb9, 0xb0, 0xfa, 0x22, 0xcb, 0xf1, 0xe1, 0x32, 0x50, 0x1e,
- 0xf0, 0x1c, 0xbb, 0x0c, 0xd5, 0x38, 0xd8, 0x70, 0x31, 0x25, 0x1d, 0xf8, 0xdf, 0x26, 0x2b, 0x96,
- 0xc3, 0xc6, 0xd8, 0xa8, 0x2c, 0x03, 0x45, 0x7e, 0x3f, 0xd5, 0x4a, 0x13, 0xda, 0xf0, 0xc6, 0xc2,
- 0x10, 0x88, 0xb6, 0x49, 0x4d, 0x36, 0xee, 0x7d, 0x83, 0x9d, 0xd1, 0x00, 0x8a, 0xd4, 0x99, 0x60,
- 0x32, 0xa3, 0xfd, 0x11, 0x76, 0x86, 0x23, 0xca, 0x06, 0x9e, 0xdf, 0xd9, 0x79, 0xfe, 0x5d, 0xba,
- 0x3e, 0xad, 0x5d, 0x30, 0x4e, 0xe3, 0x61, 0xb8, 0xae, 0xeb, 0x86, 0x6c, 0x67, 0x50, 0x8d, 0x42,
- 0xe4, 0xe0, 0x6c, 0xa4, 0xc3, 0x61, 0xcc, 0x08, 0x7f, 0x7d, 0x6a, 0x4e, 0xa6, 0x52, 0x36, 0x1c,
- 0x58, 0xe3, 0x78, 0x19, 0x28, 0xd2, 0x76, 0x92, 0x15, 0x45, 0x35, 0x4a, 0x91, 0xaf, 0x17, 0xbb,
- 0xa2, 0x1d, 0xf8, 0x49, 0x80, 0x3c, 0xdf, 0x01, 0xf6, 0xcf, 0xfd, 0x17, 0x96, 0x6f, 0x6b, 0xd7,
- 0xf6, 0x76, 0x76, 0x2d, 0xee, 0xab, 0xb8, 0xee, 0x6b, 0x24, 0xb4, 0x03, 0x07, 0xe7, 0xd6, 0x6b,
- 0x97, 0xdc, 0x8c, 0xb1, 0x3d, 0xc4, 0x13, 0xec, 0x52, 0x24, 0x41, 0xda, 0xc3, 0xfe, 0x6c, 0x4c,
- 0xa5, 0xa3, 0x90, 0x7e, 0x91, 0x30, 0x22, 0x1b, 0x95, 0x21, 0x85, 0x3d, 0x8f, 0x78, 0x52, 0x39,
- 0xd4, 0x74, 0x91, 0x30, 0xb8, 0xd9, 0x00, 0xc8, 0x7a, 0xd8, 0x9f, 0x12, 0xd7, 0xc7, 0x8f, 0x7f,
- 0x11, 0x20, 0xc5, 0xef, 0xfc, 0x0c, 0x94, 0xcb, 0xde, 0x79, 0x4f, 0xeb, 0x5f, 0xb5, 0xf5, 0xb6,
- 0xde, 0xd3, 0xcf, 0x5f, 0xea, 0xaf, 0xb4, 0x56, 0xff, 0xaa, 0x7d, 0xd9, 0xd5, 0x9a, 0xfa, 0x0b,
- 0x5d, 0x6b, 0x95, 0x12, 0xf2, 0xe1, 0x7c, 0x51, 0x2d, 0x6c, 0x11, 0x90, 0x04, 0xc0, 0xe3, 0x42,
- 0x67, 0x49, 0x90, 0xb3, 0xf3, 0x45, 0x55, 0x0c, 0xcf, 0xa8, 0x02, 0x05, 0x8e, 0xf4, 0x8c, 0xaf,
- 0x3b, 0x5d, 0xad, 0x5d, 0x4a, 0xca, 0xf9, 0xf9, 0xa2, 0x9a, 0x89, 0xcc, 0x75, 0x24, 0x03, 0xf7,
- 0x78, 0x24, 0x43, 0x8e, 0x61, 0x9f, 0x23, 0xcd, 0x97, 0x9d, 0x4b, 0xad, 0x55, 0x12, 0x65, 0x98,
- 0x2f, 0xaa, 0x69, 0x6e, 0xc9, 0xe2, 0x9b, 0x1f, 0x2b, 0x89, 0xc7, 0x37, 0x90, 0x62, 0xdf, 0x4b,
- 0xf4, 0x31, 0x94, 0x3b, 0x46, 0x4b, 0x33, 0xfa, 0xed, 0x4e, 0x5b, 0xdb, 0xd1, 0xcb, 0x52, 0x86,
- 0x7e, 0xa4, 0xc2, 0x01, 0x67, 0x5d, 0xb5, 0xd9, 0xaf, 0xd6, 0x2a, 0x09, 0x72, 0x61, 0xbe, 0xa8,
- 0xe6, 0x56, 0x8e, 0x50, 0x30, 0xe7, 0xc4, 0x8c, 0x48, 0x70, 0x64, 0xf2, 0xc2, 0x8d, 0xee, 0xdb,
- 0xbb, 0x8a, 0xf0, 0xee, 0xae, 0x22, 0xfc, 0x76, 0x57, 0x11, 0xbe, 0xbf, 0xaf, 0x24, 0xde, 0xdd,
- 0x57, 0x12, 0xbf, 0xde, 0x57, 0x12, 0xaf, 0x9e, 0x0d, 0x1d, 0x3a, 0x9a, 0x0d, 0x6a, 0x16, 0x99,
- 0xd4, 0x2d, 0xe2, 0x4f, 0x88, 0x5f, 0x77, 0x06, 0xd6, 0x93, 0x21, 0xa9, 0x4f, 0x88, 0x3d, 0x1b,
- 0x63, 0x9f, 0xbf, 0xcf, 0x9f, 0x3e, 0x7d, 0x12, 0x3f, 0xfa, 0xf4, 0x76, 0x8a, 0xfd, 0x41, 0x9a,
- 0x3d, 0xd0, 0x9f, 0xfd, 0x11, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x56, 0x2b, 0x23, 0x17, 0x08, 0x00,
- 0x00,
+ proto.RegisterEnum("ibc.core.channel.v1.State", State_name, State_value)
+ proto.RegisterEnum("ibc.core.channel.v1.Order", Order_name, Order_value)
+ proto.RegisterType((*Channel)(nil), "ibc.core.channel.v1.Channel")
+ proto.RegisterType((*IdentifiedChannel)(nil), "ibc.core.channel.v1.IdentifiedChannel")
+ proto.RegisterType((*Counterparty)(nil), "ibc.core.channel.v1.Counterparty")
+ proto.RegisterType((*Packet)(nil), "ibc.core.channel.v1.Packet")
+ proto.RegisterType((*PacketState)(nil), "ibc.core.channel.v1.PacketState")
+ proto.RegisterType((*Acknowledgement)(nil), "ibc.core.channel.v1.Acknowledgement")
+}
+
+func init() { proto.RegisterFile("ibc/core/channel/v1/channel.proto", fileDescriptor_c3a07336710636a0) }
+
+var fileDescriptor_c3a07336710636a0 = []byte{
+ // 908 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xcd, 0x8e, 0xda, 0x56,
+ 0x14, 0xc6, 0x60, 0xfe, 0x0e, 0x03, 0xc3, 0xdc, 0x34, 0xc4, 0x75, 0x13, 0x4c, 0xac, 0x2e, 0x46,
+ 0xa9, 0x02, 0x99, 0x34, 0x4a, 0xa5, 0xac, 0x3a, 0xfc, 0x44, 0x63, 0x35, 0x02, 0x64, 0x98, 0x45,
+ 0xb3, 0xa1, 0x60, 0xdf, 0x82, 0x15, 0xf0, 0xa5, 0xf6, 0x65, 0x46, 0xf3, 0x06, 0x11, 0xab, 0xbe,
+ 0x00, 0x52, 0xa5, 0xaa, 0x7d, 0x85, 0xbe, 0x42, 0x96, 0x59, 0x76, 0x65, 0x55, 0x33, 0x8b, 0xee,
+ 0x79, 0x81, 0x56, 0xbe, 0xf7, 0x9a, 0x9f, 0x49, 0x94, 0x65, 0x57, 0x5d, 0x71, 0xcf, 0xf7, 0x7d,
+ 0xe7, 0xc7, 0xe7, 0x1c, 0xee, 0x85, 0x87, 0xce, 0xc8, 0xaa, 0x59, 0xc4, 0xc3, 0x35, 0x6b, 0x32,
+ 0x74, 0x5d, 0x3c, 0xad, 0x5d, 0x9c, 0x44, 0xc7, 0xea, 0xdc, 0x23, 0x94, 0xa0, 0x3b, 0xce, 0xc8,
+ 0xaa, 0x86, 0x92, 0x6a, 0x84, 0x5f, 0x9c, 0xa8, 0x9f, 0x8d, 0xc9, 0x98, 0x30, 0xbe, 0x16, 0x9e,
+ 0xb8, 0x54, 0xd5, 0xb6, 0xd1, 0xa6, 0x0e, 0x76, 0x29, 0x0b, 0xc6, 0x4e, 0x5c, 0xa0, 0xff, 0x16,
+ 0x87, 0x74, 0x83, 0x47, 0x41, 0x4f, 0x20, 0xe9, 0xd3, 0x21, 0xc5, 0x8a, 0x54, 0x91, 0x8e, 0x0b,
+ 0x4f, 0xd5, 0xea, 0x47, 0xf2, 0x54, 0x7b, 0xa1, 0xc2, 0xe4, 0x42, 0xf4, 0x1c, 0x32, 0xc4, 0xb3,
+ 0xb1, 0xe7, 0xb8, 0x63, 0x25, 0xfe, 0x09, 0xa7, 0x4e, 0x28, 0x32, 0x37, 0x5a, 0xf4, 0x1d, 0x1c,
+ 0x58, 0x64, 0xe1, 0x52, 0xec, 0xcd, 0x87, 0x1e, 0xbd, 0x52, 0x12, 0x15, 0xe9, 0x38, 0xf7, 0xf4,
+ 0xe1, 0x47, 0x7d, 0x1b, 0x3b, 0xc2, 0xba, 0xfc, 0x2e, 0xd0, 0x62, 0xe6, 0x9e, 0x33, 0x6a, 0xc0,
+ 0xa1, 0x45, 0x5c, 0x17, 0x5b, 0xd4, 0x21, 0xee, 0x60, 0x42, 0xe6, 0xbe, 0x22, 0x57, 0x12, 0xc7,
+ 0xd9, 0xba, 0xba, 0x0e, 0xb4, 0xd2, 0xd5, 0x70, 0x36, 0x7d, 0xa1, 0xdf, 0x12, 0xe8, 0x66, 0x61,
+ 0x8b, 0x9c, 0x91, 0xb9, 0x8f, 0x14, 0x48, 0x5f, 0x60, 0xcf, 0x77, 0x88, 0xab, 0x24, 0x2b, 0xd2,
+ 0x71, 0xd6, 0x8c, 0xcc, 0x17, 0xf2, 0xdb, 0x5f, 0xb4, 0x98, 0xfe, 0x77, 0x1c, 0x8e, 0x0c, 0x1b,
+ 0xbb, 0xd4, 0xf9, 0xd1, 0xc1, 0xf6, 0xff, 0x1d, 0xfb, 0x44, 0xc7, 0xd0, 0x3d, 0x48, 0xcf, 0x89,
+ 0x47, 0x07, 0x8e, 0xad, 0xa4, 0x18, 0x93, 0x0a, 0x4d, 0xc3, 0x46, 0x0f, 0x00, 0x44, 0x99, 0x21,
+ 0x97, 0x66, 0x5c, 0x56, 0x20, 0x86, 0x2d, 0x3a, 0x7d, 0x09, 0x07, 0xbb, 0x1f, 0x80, 0xbe, 0xda,
+ 0x46, 0x0b, 0xbb, 0x9c, 0xad, 0xa3, 0x75, 0xa0, 0x15, 0x78, 0x91, 0x82, 0xd0, 0x37, 0x19, 0x9e,
+ 0xed, 0x65, 0x88, 0x33, 0xfd, 0xdd, 0x75, 0xa0, 0x1d, 0x89, 0x8f, 0xda, 0x70, 0xfa, 0x87, 0x89,
+ 0xff, 0x49, 0x40, 0xaa, 0x3b, 0xb4, 0xde, 0x60, 0x8a, 0x54, 0xc8, 0xf8, 0xf8, 0xa7, 0x05, 0x76,
+ 0x2d, 0x3e, 0x5a, 0xd9, 0xdc, 0xd8, 0xe8, 0x1b, 0xc8, 0xf9, 0x64, 0xe1, 0x59, 0x78, 0x10, 0xe6,
+ 0x14, 0x39, 0x4a, 0xeb, 0x40, 0x43, 0x3c, 0xc7, 0x0e, 0xa9, 0x9b, 0xc0, 0xad, 0x2e, 0xf1, 0x28,
+ 0xfa, 0x16, 0x0a, 0x82, 0x13, 0x99, 0xd9, 0x10, 0xb3, 0xf5, 0xcf, 0xd7, 0x81, 0x76, 0x77, 0xcf,
+ 0x57, 0xf0, 0xba, 0x99, 0xe7, 0x40, 0xb4, 0x6e, 0x2f, 0xa1, 0x68, 0x63, 0x9f, 0x3a, 0xee, 0x90,
+ 0xcd, 0x85, 0xe5, 0x97, 0x59, 0x8c, 0x2f, 0xd6, 0x81, 0x76, 0x8f, 0xc7, 0xb8, 0xad, 0xd0, 0xcd,
+ 0xc3, 0x1d, 0x88, 0x55, 0xd2, 0x81, 0x3b, 0xbb, 0xaa, 0xa8, 0x1c, 0x36, 0xc6, 0x7a, 0x79, 0x1d,
+ 0x68, 0xea, 0x87, 0xa1, 0x36, 0x35, 0xa1, 0x1d, 0x34, 0x2a, 0x0c, 0x81, 0x6c, 0x0f, 0xe9, 0x90,
+ 0x8d, 0xfb, 0xc0, 0x64, 0x67, 0xf4, 0x03, 0x14, 0xa8, 0x33, 0xc3, 0x64, 0x41, 0x07, 0x13, 0xec,
+ 0x8c, 0x27, 0x94, 0x0d, 0x3c, 0xb7, 0xb7, 0xef, 0xfc, 0x26, 0xba, 0x38, 0xa9, 0x9e, 0x31, 0x45,
+ 0xfd, 0x41, 0xb8, 0xac, 0xdb, 0x76, 0xec, 0xfb, 0xeb, 0x66, 0x5e, 0x00, 0x5c, 0x8d, 0x0c, 0x38,
+ 0x8a, 0x14, 0xe1, 0xaf, 0x4f, 0x87, 0xb3, 0xb9, 0x92, 0x09, 0xc7, 0x55, 0xbf, 0xbf, 0x0e, 0x34,
+ 0x65, 0x3f, 0xc8, 0x46, 0xa2, 0x9b, 0x45, 0x81, 0xf5, 0x23, 0x48, 0x6c, 0xc0, 0xef, 0x12, 0xe4,
+ 0xf8, 0x06, 0xb0, 0xff, 0xec, 0x7f, 0xb0, 0x7a, 0x7b, 0x9b, 0x96, 0xb8, 0xb5, 0x69, 0x51, 0x57,
+ 0xe5, 0x6d, 0x57, 0x45, 0xa1, 0x1d, 0x38, 0x3c, 0xb5, 0xde, 0xb8, 0xe4, 0x72, 0x8a, 0xed, 0x31,
+ 0x9e, 0x61, 0x97, 0x22, 0x05, 0x52, 0x1e, 0xf6, 0x17, 0x53, 0xaa, 0xdc, 0x0d, 0xe5, 0x67, 0x31,
+ 0x53, 0xd8, 0xa8, 0x04, 0x49, 0xec, 0x79, 0xc4, 0x53, 0x4a, 0x61, 0x4d, 0x67, 0x31, 0x93, 0x9b,
+ 0x75, 0x80, 0x8c, 0x87, 0xfd, 0x39, 0x71, 0x7d, 0xfc, 0xe8, 0x0f, 0x09, 0x92, 0x3d, 0x71, 0x41,
+ 0x69, 0xbd, 0xfe, 0x69, 0xbf, 0x35, 0x38, 0x6f, 0x1b, 0x6d, 0xa3, 0x6f, 0x9c, 0xbe, 0x32, 0x5e,
+ 0xb7, 0x9a, 0x83, 0xf3, 0x76, 0xaf, 0xdb, 0x6a, 0x18, 0x2f, 0x8d, 0x56, 0xb3, 0x18, 0x53, 0x8f,
+ 0x96, 0xab, 0x4a, 0x7e, 0x4f, 0x80, 0x14, 0x00, 0xee, 0x17, 0x82, 0x45, 0x49, 0xcd, 0x2c, 0x57,
+ 0x15, 0x39, 0x3c, 0xa3, 0x32, 0xe4, 0x39, 0xd3, 0x37, 0xbf, 0xef, 0x74, 0x5b, 0xed, 0x62, 0x5c,
+ 0xcd, 0x2d, 0x57, 0x95, 0xb4, 0x30, 0xb7, 0x9e, 0x8c, 0x4c, 0x70, 0x4f, 0xc6, 0xdc, 0x87, 0x03,
+ 0xce, 0x34, 0x5e, 0x75, 0x7a, 0xad, 0x66, 0x51, 0x56, 0x61, 0xb9, 0xaa, 0xa4, 0xb8, 0xa5, 0xca,
+ 0x6f, 0x7f, 0x2d, 0xc7, 0x1e, 0x5d, 0x42, 0x92, 0xdd, 0x95, 0xe8, 0x4b, 0x28, 0x75, 0xcc, 0x66,
+ 0xcb, 0x1c, 0xb4, 0x3b, 0xed, 0xd6, 0xad, 0x7a, 0x59, 0xc8, 0x10, 0x47, 0x3a, 0x1c, 0x72, 0xd5,
+ 0x79, 0x9b, 0xfd, 0xb6, 0x9a, 0x45, 0x49, 0xcd, 0x2f, 0x57, 0x95, 0xec, 0x06, 0x08, 0x0b, 0xe6,
+ 0x9a, 0x48, 0x21, 0x0a, 0x16, 0x26, 0x4f, 0x5c, 0xef, 0xbe, 0xbb, 0x2e, 0x4b, 0xef, 0xaf, 0xcb,
+ 0xd2, 0x5f, 0xd7, 0x65, 0xe9, 0xe7, 0x9b, 0x72, 0xec, 0xfd, 0x4d, 0x39, 0xf6, 0xe7, 0x4d, 0x39,
+ 0xf6, 0xfa, 0xf9, 0xd8, 0xa1, 0x93, 0xc5, 0xa8, 0x6a, 0x91, 0x59, 0xcd, 0x22, 0xfe, 0x8c, 0xf8,
+ 0x35, 0x67, 0x64, 0x3d, 0x1e, 0x93, 0xda, 0x8c, 0xd8, 0x8b, 0x29, 0xf6, 0xf9, 0x8b, 0xfc, 0xe4,
+ 0xd9, 0xe3, 0xe8, 0x89, 0xa7, 0x57, 0x73, 0xec, 0x8f, 0x52, 0xec, 0x49, 0xfe, 0xfa, 0xdf, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xf8, 0x7f, 0x8d, 0x61, 0x03, 0x08, 0x00, 0x00,
}
func (m *Channel) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/04-channel/types/codec.go b/modules/core/04-channel/types/codec.go
index 7f8cc901..9c0ac65a 100644
--- a/modules/core/04-channel/types/codec.go
+++ b/modules/core/04-channel/types/codec.go
@@ -12,17 +12,17 @@ import (
// Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibcgo.core.channel.v1.ChannelI",
+ "ibc.core.channel.v1.ChannelI",
(*exported.ChannelI)(nil),
&Channel{},
)
registry.RegisterInterface(
- "ibcgo.core.channel.v1.CounterpartyChannelI",
+ "ibc.core.channel.v1.CounterpartyChannelI",
(*exported.CounterpartyChannelI)(nil),
&Counterparty{},
)
registry.RegisterInterface(
- "ibcgo.core.channel.v1.PacketI",
+ "ibc.core.channel.v1.PacketI",
(*exported.PacketI)(nil),
&Packet{},
)
diff --git a/modules/core/04-channel/types/genesis.pb.go b/modules/core/04-channel/types/genesis.pb.go
index ddd254d7..c5b2e8a5 100644
--- a/modules/core/04-channel/types/genesis.pb.go
+++ b/modules/core/04-channel/types/genesis.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/channel/v1/genesis.proto
+// source: ibc/core/channel/v1/genesis.proto
package types
@@ -40,7 +40,7 @@ func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_c4d4e081eaaab7c3, []int{0}
+ return fileDescriptor_cb06ec201f452595, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -137,7 +137,7 @@ func (m *PacketSequence) Reset() { *m = PacketSequence{} }
func (m *PacketSequence) String() string { return proto.CompactTextString(m) }
func (*PacketSequence) ProtoMessage() {}
func (*PacketSequence) Descriptor() ([]byte, []int) {
- return fileDescriptor_c4d4e081eaaab7c3, []int{1}
+ return fileDescriptor_cb06ec201f452595, []int{1}
}
func (m *PacketSequence) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -188,48 +188,46 @@ func (m *PacketSequence) GetSequence() uint64 {
}
func init() {
- proto.RegisterType((*GenesisState)(nil), "ibcgo.core.channel.v1.GenesisState")
- proto.RegisterType((*PacketSequence)(nil), "ibcgo.core.channel.v1.PacketSequence")
+ proto.RegisterType((*GenesisState)(nil), "ibc.core.channel.v1.GenesisState")
+ proto.RegisterType((*PacketSequence)(nil), "ibc.core.channel.v1.PacketSequence")
}
-func init() {
- proto.RegisterFile("ibcgo/core/channel/v1/genesis.proto", fileDescriptor_c4d4e081eaaab7c3)
-}
+func init() { proto.RegisterFile("ibc/core/channel/v1/genesis.proto", fileDescriptor_cb06ec201f452595) }
-var fileDescriptor_c4d4e081eaaab7c3 = []byte{
+var fileDescriptor_cb06ec201f452595 = []byte{
// 505 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcf, 0x6e, 0xd3, 0x30,
- 0x1c, 0x80, 0x9b, 0xb5, 0xeb, 0x3a, 0x6f, 0xad, 0x98, 0x59, 0xa5, 0x50, 0x8d, 0xa4, 0x32, 0x42,
- 0xaa, 0x84, 0x96, 0x30, 0x98, 0x38, 0x70, 0x0c, 0x48, 0xa8, 0x9c, 0xa6, 0xb0, 0x13, 0x97, 0x2a,
- 0x75, 0x7e, 0xa4, 0x56, 0x9a, 0xb8, 0xc4, 0x6e, 0x61, 0x4f, 0x01, 0x8f, 0xb5, 0xe3, 0x8e, 0x9c,
- 0x22, 0xd4, 0x3e, 0x01, 0x3d, 0x72, 0x42, 0xf9, 0xdb, 0x56, 0xdb, 0x90, 0xaa, 0xdd, 0x62, 0xfb,
- 0xf3, 0xf7, 0xfd, 0x0e, 0x31, 0x7a, 0xc6, 0x86, 0xd4, 0xe3, 0x26, 0xe5, 0x11, 0x98, 0x74, 0xe4,
- 0x84, 0x21, 0x8c, 0xcd, 0xd9, 0x99, 0xe9, 0x41, 0x08, 0x82, 0x09, 0x63, 0x12, 0x71, 0xc9, 0x71,
- 0x3b, 0x85, 0x8c, 0x04, 0x32, 0x72, 0xc8, 0x98, 0x9d, 0x75, 0x8e, 0x3d, 0xee, 0xf1, 0x94, 0x30,
- 0x93, 0xaf, 0x0c, 0xee, 0xdc, 0x63, 0x2c, 0xee, 0xa5, 0x10, 0xf9, 0xb3, 0x8b, 0x0e, 0x3f, 0x64,
- 0x8d, 0x4f, 0xd2, 0x91, 0x80, 0x07, 0xa8, 0x91, 0x13, 0x42, 0x55, 0xba, 0xd5, 0xde, 0xc1, 0xab,
- 0x9e, 0x71, 0x67, 0xd5, 0xe8, 0xbb, 0x10, 0x4a, 0xf6, 0x85, 0x81, 0xfb, 0x2e, 0xdb, 0xb4, 0x9e,
- 0x5c, 0xc7, 0x7a, 0xe5, 0x6f, 0xac, 0x1f, 0xdd, 0x3a, 0xb2, 0x4b, 0x29, 0xbe, 0x44, 0x8f, 0x1c,
- 0xea, 0x87, 0xfc, 0xdb, 0x18, 0x5c, 0x0f, 0x02, 0x08, 0xa5, 0x50, 0x77, 0xd2, 0x10, 0xb9, 0x27,
- 0x74, 0xe1, 0x50, 0x1f, 0x64, 0x3a, 0x9e, 0x55, 0x4b, 0x12, 0xf6, 0x2d, 0x03, 0xfe, 0x88, 0x0e,
- 0x28, 0x0f, 0x02, 0x26, 0x33, 0x61, 0x75, 0x4b, 0xe1, 0xfa, 0x65, 0xfc, 0x1e, 0x35, 0x22, 0xa0,
- 0xc0, 0x26, 0x52, 0xa8, 0xb5, 0x2d, 0x45, 0xe5, 0x4d, 0xec, 0xa3, 0x96, 0x80, 0xd0, 0x1d, 0x08,
- 0xf8, 0x3a, 0x85, 0x90, 0x82, 0x50, 0x77, 0x53, 0xd7, 0xf3, 0xff, 0xbb, 0x72, 0xda, 0x7a, 0x9a,
- 0xe8, 0x96, 0xb1, 0xde, 0xbe, 0x72, 0x82, 0xf1, 0x5b, 0xb2, 0xa9, 0x22, 0x76, 0x33, 0xd9, 0x28,
- 0xe0, 0x34, 0x16, 0x01, 0x9d, 0xad, 0xc5, 0xea, 0x0f, 0x88, 0x6d, 0xaa, 0x88, 0xdd, 0x4c, 0x36,
- 0x56, 0xb1, 0x11, 0x6a, 0x3a, 0xd4, 0x5f, 0x6b, 0xed, 0x6d, 0xd3, 0x3a, 0xc9, 0x5b, 0xc7, 0x59,
- 0x6b, 0xc3, 0x44, 0xec, 0x43, 0x87, 0xfa, 0xab, 0xd2, 0x25, 0x6a, 0x87, 0xf0, 0x5d, 0x0e, 0x72,
- 0x5b, 0x09, 0xaa, 0x8d, 0xae, 0xd2, 0xab, 0x59, 0xdd, 0x65, 0xac, 0x9f, 0x64, 0x9a, 0x3b, 0x31,
- 0x62, 0x3f, 0x4e, 0xf6, 0xf3, 0x7f, 0xb0, 0xd0, 0x92, 0x1f, 0x0a, 0x6a, 0x6d, 0x0e, 0x85, 0x5f,
- 0xa0, 0xbd, 0x09, 0x8f, 0xe4, 0x80, 0xb9, 0xaa, 0xd2, 0x55, 0x7a, 0xfb, 0x16, 0x5e, 0xc6, 0x7a,
- 0x2b, 0x53, 0xe7, 0x07, 0xc4, 0xae, 0x27, 0x5f, 0x7d, 0x17, 0x9f, 0x23, 0x54, 0x94, 0x98, 0xab,
- 0xee, 0xa4, 0x7c, 0x7b, 0x19, 0xeb, 0x47, 0x19, 0xbf, 0x3a, 0x23, 0xf6, 0x7e, 0xbe, 0xe8, 0xbb,
- 0xb8, 0x83, 0x1a, 0xe5, 0xf8, 0xd5, 0x64, 0x7c, 0xbb, 0x5c, 0x5b, 0x17, 0xd7, 0x73, 0x4d, 0xb9,
- 0x99, 0x6b, 0xca, 0xef, 0xb9, 0xa6, 0xfc, 0x5c, 0x68, 0x95, 0x9b, 0x85, 0x56, 0xf9, 0xb5, 0xd0,
- 0x2a, 0x9f, 0xdf, 0x78, 0x4c, 0x8e, 0xa6, 0x43, 0x83, 0xf2, 0xc0, 0xa4, 0x5c, 0x04, 0x5c, 0x98,
- 0x6c, 0x48, 0x4f, 0x3d, 0x6e, 0x06, 0xdc, 0x9d, 0x8e, 0x41, 0x64, 0xef, 0xfb, 0xe5, 0xf9, 0x69,
- 0xf1, 0xc4, 0xe5, 0xd5, 0x04, 0xc4, 0xb0, 0x9e, 0x3e, 0xef, 0xd7, 0xff, 0x02, 0x00, 0x00, 0xff,
- 0xff, 0xd6, 0x46, 0x49, 0x69, 0x57, 0x04, 0x00, 0x00,
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x4f, 0x6f, 0xd3, 0x4c,
+ 0x10, 0x87, 0xe3, 0x26, 0x4d, 0xd3, 0x6d, 0x13, 0xbd, 0xdd, 0x36, 0x92, 0xdf, 0xa8, 0xd8, 0xc6,
+ 0x48, 0x28, 0x12, 0xaa, 0x4d, 0xa1, 0xe2, 0xc0, 0xd1, 0x1c, 0x20, 0xb7, 0x6a, 0xe1, 0x84, 0x84,
+ 0x22, 0x7b, 0x3d, 0x75, 0x57, 0x89, 0xbd, 0xc1, 0xbb, 0x09, 0xf4, 0x53, 0xc0, 0xc7, 0xea, 0xb1,
+ 0x47, 0x4e, 0x16, 0x4a, 0xbe, 0x41, 0x8e, 0x9c, 0x90, 0xff, 0x26, 0x51, 0x23, 0x44, 0xb9, 0x79,
+ 0x67, 0x7e, 0xf3, 0x3c, 0x73, 0xf0, 0xa0, 0xc7, 0xcc, 0xa3, 0x36, 0xe5, 0x31, 0xd8, 0xf4, 0xda,
+ 0x8d, 0x22, 0x18, 0xdb, 0xb3, 0x73, 0x3b, 0x80, 0x08, 0x04, 0x13, 0xd6, 0x24, 0xe6, 0x92, 0xe3,
+ 0x63, 0xe6, 0x51, 0x2b, 0x8d, 0x58, 0x45, 0xc4, 0x9a, 0x9d, 0xf7, 0x4e, 0x02, 0x1e, 0xf0, 0xac,
+ 0x6f, 0xa7, 0x5f, 0x79, 0xb4, 0xb7, 0x95, 0x56, 0x4e, 0x65, 0x11, 0x73, 0xb1, 0x8b, 0x0e, 0xdf,
+ 0xe6, 0xfc, 0xf7, 0xd2, 0x95, 0x80, 0x3f, 0xa1, 0x56, 0x91, 0x10, 0xaa, 0x62, 0xd4, 0xfb, 0x07,
+ 0x2f, 0x9e, 0x5a, 0x5b, 0x8c, 0xd6, 0xc0, 0x87, 0x48, 0xb2, 0x2b, 0x06, 0xfe, 0x9b, 0xbc, 0xe8,
+ 0xfc, 0x7f, 0x9b, 0xe8, 0xb5, 0x5f, 0x89, 0x7e, 0x74, 0xaf, 0x45, 0x2a, 0x24, 0x26, 0xe8, 0x3f,
+ 0x97, 0x8e, 0x22, 0xfe, 0x65, 0x0c, 0x7e, 0x00, 0x21, 0x44, 0x52, 0xa8, 0x3b, 0x99, 0xc6, 0xd8,
+ 0xaa, 0xb9, 0x74, 0xe9, 0x08, 0x64, 0xb6, 0x9a, 0xd3, 0x48, 0x05, 0xe4, 0xde, 0x3c, 0x7e, 0x87,
+ 0x0e, 0x28, 0x0f, 0x43, 0x26, 0x73, 0x5c, 0xfd, 0x41, 0xb8, 0xf5, 0x51, 0xec, 0xa0, 0x56, 0x0c,
+ 0x14, 0xd8, 0x44, 0x0a, 0xb5, 0xf1, 0x20, 0x4c, 0x35, 0x87, 0x19, 0xea, 0x08, 0x88, 0xfc, 0xa1,
+ 0x80, 0xcf, 0x53, 0x88, 0x28, 0x08, 0x75, 0x37, 0x23, 0x3d, 0xf9, 0x13, 0xa9, 0xc8, 0x3a, 0x8f,
+ 0x52, 0xd8, 0x32, 0xd1, 0xbb, 0x37, 0x6e, 0x38, 0x7e, 0x6d, 0x6e, 0x82, 0x4c, 0xd2, 0x4e, 0x0b,
+ 0x65, 0x38, 0x53, 0xc5, 0x40, 0x67, 0x6b, 0xaa, 0xe6, 0x3f, 0xab, 0x36, 0x41, 0x26, 0x69, 0xa7,
+ 0x85, 0x95, 0xea, 0x0a, 0xb5, 0x5d, 0x3a, 0x5a, 0x33, 0xed, 0xfd, 0xbd, 0xe9, 0xb4, 0x30, 0x9d,
+ 0xe4, 0xa6, 0x0d, 0x8e, 0x49, 0x0e, 0x5d, 0x3a, 0x5a, 0x79, 0x3e, 0xa0, 0x6e, 0x04, 0x5f, 0xe5,
+ 0xb0, 0xa0, 0x55, 0x41, 0xb5, 0x65, 0x28, 0xfd, 0x86, 0x63, 0x2c, 0x13, 0xfd, 0x34, 0xc7, 0x6c,
+ 0x8d, 0x99, 0xe4, 0x38, 0xad, 0x17, 0xff, 0x5d, 0x89, 0x35, 0xbf, 0x29, 0xa8, 0xb3, 0xb9, 0x14,
+ 0x7e, 0x86, 0xf6, 0x26, 0x3c, 0x96, 0x43, 0xe6, 0xab, 0x8a, 0xa1, 0xf4, 0xf7, 0x1d, 0xbc, 0x4c,
+ 0xf4, 0x4e, 0x8e, 0x2e, 0x1a, 0x26, 0x69, 0xa6, 0x5f, 0x03, 0x1f, 0x5f, 0x20, 0x54, 0x9a, 0x98,
+ 0xaf, 0xee, 0x64, 0xf9, 0xee, 0x32, 0xd1, 0x8f, 0xf2, 0xfc, 0xaa, 0x67, 0x92, 0xfd, 0xe2, 0x31,
+ 0xf0, 0x71, 0x0f, 0xb5, 0xaa, 0xf5, 0xeb, 0xe9, 0xfa, 0xa4, 0x7a, 0x3b, 0x97, 0xb7, 0x73, 0x4d,
+ 0xb9, 0x9b, 0x6b, 0xca, 0xcf, 0xb9, 0xa6, 0x7c, 0x5f, 0x68, 0xb5, 0xbb, 0x85, 0x56, 0xfb, 0xb1,
+ 0xd0, 0x6a, 0x1f, 0x5f, 0x05, 0x4c, 0x5e, 0x4f, 0x3d, 0x8b, 0xf2, 0xd0, 0xa6, 0x5c, 0x84, 0x5c,
+ 0xd8, 0xcc, 0xa3, 0x67, 0x01, 0xb7, 0x43, 0xee, 0x4f, 0xc7, 0x20, 0xf2, 0x8b, 0x7e, 0x7e, 0x71,
+ 0x56, 0x1e, 0xb5, 0xbc, 0x99, 0x80, 0xf0, 0x9a, 0xd9, 0x41, 0xbf, 0xfc, 0x1d, 0x00, 0x00, 0xff,
+ 0xff, 0x14, 0xd3, 0x45, 0x43, 0x43, 0x04, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/04-channel/types/query.pb.go b/modules/core/04-channel/types/query.pb.go
index 07c825a1..a5e23569 100644
--- a/modules/core/04-channel/types/query.pb.go
+++ b/modules/core/04-channel/types/query.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/channel/v1/query.proto
+// source: ibc/core/channel/v1/query.proto
package types
@@ -44,7 +44,7 @@ func (m *QueryChannelRequest) Reset() { *m = QueryChannelRequest{} }
func (m *QueryChannelRequest) String() string { return proto.CompactTextString(m) }
func (*QueryChannelRequest) ProtoMessage() {}
func (*QueryChannelRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{0}
+ return fileDescriptor_1034a1e9abc4cca1, []int{0}
}
func (m *QueryChannelRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -103,7 +103,7 @@ func (m *QueryChannelResponse) Reset() { *m = QueryChannelResponse{} }
func (m *QueryChannelResponse) String() string { return proto.CompactTextString(m) }
func (*QueryChannelResponse) ProtoMessage() {}
func (*QueryChannelResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{1}
+ return fileDescriptor_1034a1e9abc4cca1, []int{1}
}
func (m *QueryChannelResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -163,7 +163,7 @@ func (m *QueryChannelsRequest) Reset() { *m = QueryChannelsRequest{} }
func (m *QueryChannelsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryChannelsRequest) ProtoMessage() {}
func (*QueryChannelsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{2}
+ return fileDescriptor_1034a1e9abc4cca1, []int{2}
}
func (m *QueryChannelsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -213,7 +213,7 @@ func (m *QueryChannelsResponse) Reset() { *m = QueryChannelsResponse{} }
func (m *QueryChannelsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryChannelsResponse) ProtoMessage() {}
func (*QueryChannelsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{3}
+ return fileDescriptor_1034a1e9abc4cca1, []int{3}
}
func (m *QueryChannelsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -276,7 +276,7 @@ func (m *QueryConnectionChannelsRequest) Reset() { *m = QueryConnectionC
func (m *QueryConnectionChannelsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionChannelsRequest) ProtoMessage() {}
func (*QueryConnectionChannelsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{4}
+ return fileDescriptor_1034a1e9abc4cca1, []int{4}
}
func (m *QueryConnectionChannelsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -334,7 +334,7 @@ func (m *QueryConnectionChannelsResponse) Reset() { *m = QueryConnection
func (m *QueryConnectionChannelsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryConnectionChannelsResponse) ProtoMessage() {}
func (*QueryConnectionChannelsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{5}
+ return fileDescriptor_1034a1e9abc4cca1, []int{5}
}
func (m *QueryConnectionChannelsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -397,7 +397,7 @@ func (m *QueryChannelClientStateRequest) Reset() { *m = QueryChannelClie
func (m *QueryChannelClientStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryChannelClientStateRequest) ProtoMessage() {}
func (*QueryChannelClientStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{6}
+ return fileDescriptor_1034a1e9abc4cca1, []int{6}
}
func (m *QueryChannelClientStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -455,7 +455,7 @@ func (m *QueryChannelClientStateResponse) Reset() { *m = QueryChannelCli
func (m *QueryChannelClientStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryChannelClientStateResponse) ProtoMessage() {}
func (*QueryChannelClientStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{7}
+ return fileDescriptor_1034a1e9abc4cca1, []int{7}
}
func (m *QueryChannelClientStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -522,7 +522,7 @@ func (m *QueryChannelConsensusStateRequest) Reset() { *m = QueryChannelC
func (m *QueryChannelConsensusStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryChannelConsensusStateRequest) ProtoMessage() {}
func (*QueryChannelConsensusStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{8}
+ return fileDescriptor_1034a1e9abc4cca1, []int{8}
}
func (m *QueryChannelConsensusStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -596,7 +596,7 @@ func (m *QueryChannelConsensusStateResponse) Reset() { *m = QueryChannel
func (m *QueryChannelConsensusStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryChannelConsensusStateResponse) ProtoMessage() {}
func (*QueryChannelConsensusStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{9}
+ return fileDescriptor_1034a1e9abc4cca1, []int{9}
}
func (m *QueryChannelConsensusStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -668,7 +668,7 @@ func (m *QueryPacketCommitmentRequest) Reset() { *m = QueryPacketCommitm
func (m *QueryPacketCommitmentRequest) String() string { return proto.CompactTextString(m) }
func (*QueryPacketCommitmentRequest) ProtoMessage() {}
func (*QueryPacketCommitmentRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{10}
+ return fileDescriptor_1034a1e9abc4cca1, []int{10}
}
func (m *QueryPacketCommitmentRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -734,7 +734,7 @@ func (m *QueryPacketCommitmentResponse) Reset() { *m = QueryPacketCommit
func (m *QueryPacketCommitmentResponse) String() string { return proto.CompactTextString(m) }
func (*QueryPacketCommitmentResponse) ProtoMessage() {}
func (*QueryPacketCommitmentResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{11}
+ return fileDescriptor_1034a1e9abc4cca1, []int{11}
}
func (m *QueryPacketCommitmentResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -799,7 +799,7 @@ func (m *QueryPacketCommitmentsRequest) Reset() { *m = QueryPacketCommit
func (m *QueryPacketCommitmentsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryPacketCommitmentsRequest) ProtoMessage() {}
func (*QueryPacketCommitmentsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{12}
+ return fileDescriptor_1034a1e9abc4cca1, []int{12}
}
func (m *QueryPacketCommitmentsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -863,7 +863,7 @@ func (m *QueryPacketCommitmentsResponse) Reset() { *m = QueryPacketCommi
func (m *QueryPacketCommitmentsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryPacketCommitmentsResponse) ProtoMessage() {}
func (*QueryPacketCommitmentsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{13}
+ return fileDescriptor_1034a1e9abc4cca1, []int{13}
}
func (m *QueryPacketCommitmentsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -928,7 +928,7 @@ func (m *QueryPacketReceiptRequest) Reset() { *m = QueryPacketReceiptReq
func (m *QueryPacketReceiptRequest) String() string { return proto.CompactTextString(m) }
func (*QueryPacketReceiptRequest) ProtoMessage() {}
func (*QueryPacketReceiptRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{14}
+ return fileDescriptor_1034a1e9abc4cca1, []int{14}
}
func (m *QueryPacketReceiptRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -994,7 +994,7 @@ func (m *QueryPacketReceiptResponse) Reset() { *m = QueryPacketReceiptRe
func (m *QueryPacketReceiptResponse) String() string { return proto.CompactTextString(m) }
func (*QueryPacketReceiptResponse) ProtoMessage() {}
func (*QueryPacketReceiptResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{15}
+ return fileDescriptor_1034a1e9abc4cca1, []int{15}
}
func (m *QueryPacketReceiptResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1059,7 +1059,7 @@ func (m *QueryPacketAcknowledgementRequest) Reset() { *m = QueryPacketAc
func (m *QueryPacketAcknowledgementRequest) String() string { return proto.CompactTextString(m) }
func (*QueryPacketAcknowledgementRequest) ProtoMessage() {}
func (*QueryPacketAcknowledgementRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{16}
+ return fileDescriptor_1034a1e9abc4cca1, []int{16}
}
func (m *QueryPacketAcknowledgementRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1125,7 +1125,7 @@ func (m *QueryPacketAcknowledgementResponse) Reset() { *m = QueryPacketA
func (m *QueryPacketAcknowledgementResponse) String() string { return proto.CompactTextString(m) }
func (*QueryPacketAcknowledgementResponse) ProtoMessage() {}
func (*QueryPacketAcknowledgementResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{17}
+ return fileDescriptor_1034a1e9abc4cca1, []int{17}
}
func (m *QueryPacketAcknowledgementResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1190,7 +1190,7 @@ func (m *QueryPacketAcknowledgementsRequest) Reset() { *m = QueryPacketA
func (m *QueryPacketAcknowledgementsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryPacketAcknowledgementsRequest) ProtoMessage() {}
func (*QueryPacketAcknowledgementsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{18}
+ return fileDescriptor_1034a1e9abc4cca1, []int{18}
}
func (m *QueryPacketAcknowledgementsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1254,7 +1254,7 @@ func (m *QueryPacketAcknowledgementsResponse) Reset() { *m = QueryPacket
func (m *QueryPacketAcknowledgementsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryPacketAcknowledgementsResponse) ProtoMessage() {}
func (*QueryPacketAcknowledgementsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{19}
+ return fileDescriptor_1034a1e9abc4cca1, []int{19}
}
func (m *QueryPacketAcknowledgementsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1319,7 +1319,7 @@ func (m *QueryUnreceivedPacketsRequest) Reset() { *m = QueryUnreceivedPa
func (m *QueryUnreceivedPacketsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryUnreceivedPacketsRequest) ProtoMessage() {}
func (*QueryUnreceivedPacketsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{20}
+ return fileDescriptor_1034a1e9abc4cca1, []int{20}
}
func (m *QueryUnreceivedPacketsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1382,7 +1382,7 @@ func (m *QueryUnreceivedPacketsResponse) Reset() { *m = QueryUnreceivedP
func (m *QueryUnreceivedPacketsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryUnreceivedPacketsResponse) ProtoMessage() {}
func (*QueryUnreceivedPacketsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{21}
+ return fileDescriptor_1034a1e9abc4cca1, []int{21}
}
func (m *QueryUnreceivedPacketsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1440,7 +1440,7 @@ func (m *QueryUnreceivedAcksRequest) Reset() { *m = QueryUnreceivedAcksR
func (m *QueryUnreceivedAcksRequest) String() string { return proto.CompactTextString(m) }
func (*QueryUnreceivedAcksRequest) ProtoMessage() {}
func (*QueryUnreceivedAcksRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{22}
+ return fileDescriptor_1034a1e9abc4cca1, []int{22}
}
func (m *QueryUnreceivedAcksRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1503,7 +1503,7 @@ func (m *QueryUnreceivedAcksResponse) Reset() { *m = QueryUnreceivedAcks
func (m *QueryUnreceivedAcksResponse) String() string { return proto.CompactTextString(m) }
func (*QueryUnreceivedAcksResponse) ProtoMessage() {}
func (*QueryUnreceivedAcksResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{23}
+ return fileDescriptor_1034a1e9abc4cca1, []int{23}
}
func (m *QueryUnreceivedAcksResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1559,7 +1559,7 @@ func (m *QueryNextSequenceReceiveRequest) Reset() { *m = QueryNextSequen
func (m *QueryNextSequenceReceiveRequest) String() string { return proto.CompactTextString(m) }
func (*QueryNextSequenceReceiveRequest) ProtoMessage() {}
func (*QueryNextSequenceReceiveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{24}
+ return fileDescriptor_1034a1e9abc4cca1, []int{24}
}
func (m *QueryNextSequenceReceiveRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1617,7 +1617,7 @@ func (m *QueryNextSequenceReceiveResponse) Reset() { *m = QueryNextSeque
func (m *QueryNextSequenceReceiveResponse) String() string { return proto.CompactTextString(m) }
func (*QueryNextSequenceReceiveResponse) ProtoMessage() {}
func (*QueryNextSequenceReceiveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3acdacc9aeb4fa50, []int{25}
+ return fileDescriptor_1034a1e9abc4cca1, []int{25}
}
func (m *QueryNextSequenceReceiveResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1668,132 +1668,131 @@ func (m *QueryNextSequenceReceiveResponse) GetProofHeight() types.Height {
}
func init() {
- proto.RegisterType((*QueryChannelRequest)(nil), "ibcgo.core.channel.v1.QueryChannelRequest")
- proto.RegisterType((*QueryChannelResponse)(nil), "ibcgo.core.channel.v1.QueryChannelResponse")
- proto.RegisterType((*QueryChannelsRequest)(nil), "ibcgo.core.channel.v1.QueryChannelsRequest")
- proto.RegisterType((*QueryChannelsResponse)(nil), "ibcgo.core.channel.v1.QueryChannelsResponse")
- proto.RegisterType((*QueryConnectionChannelsRequest)(nil), "ibcgo.core.channel.v1.QueryConnectionChannelsRequest")
- proto.RegisterType((*QueryConnectionChannelsResponse)(nil), "ibcgo.core.channel.v1.QueryConnectionChannelsResponse")
- proto.RegisterType((*QueryChannelClientStateRequest)(nil), "ibcgo.core.channel.v1.QueryChannelClientStateRequest")
- proto.RegisterType((*QueryChannelClientStateResponse)(nil), "ibcgo.core.channel.v1.QueryChannelClientStateResponse")
- proto.RegisterType((*QueryChannelConsensusStateRequest)(nil), "ibcgo.core.channel.v1.QueryChannelConsensusStateRequest")
- proto.RegisterType((*QueryChannelConsensusStateResponse)(nil), "ibcgo.core.channel.v1.QueryChannelConsensusStateResponse")
- proto.RegisterType((*QueryPacketCommitmentRequest)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentRequest")
- proto.RegisterType((*QueryPacketCommitmentResponse)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentResponse")
- proto.RegisterType((*QueryPacketCommitmentsRequest)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentsRequest")
- proto.RegisterType((*QueryPacketCommitmentsResponse)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentsResponse")
- proto.RegisterType((*QueryPacketReceiptRequest)(nil), "ibcgo.core.channel.v1.QueryPacketReceiptRequest")
- proto.RegisterType((*QueryPacketReceiptResponse)(nil), "ibcgo.core.channel.v1.QueryPacketReceiptResponse")
- proto.RegisterType((*QueryPacketAcknowledgementRequest)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementRequest")
- proto.RegisterType((*QueryPacketAcknowledgementResponse)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementResponse")
- proto.RegisterType((*QueryPacketAcknowledgementsRequest)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementsRequest")
- proto.RegisterType((*QueryPacketAcknowledgementsResponse)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementsResponse")
- proto.RegisterType((*QueryUnreceivedPacketsRequest)(nil), "ibcgo.core.channel.v1.QueryUnreceivedPacketsRequest")
- proto.RegisterType((*QueryUnreceivedPacketsResponse)(nil), "ibcgo.core.channel.v1.QueryUnreceivedPacketsResponse")
- proto.RegisterType((*QueryUnreceivedAcksRequest)(nil), "ibcgo.core.channel.v1.QueryUnreceivedAcksRequest")
- proto.RegisterType((*QueryUnreceivedAcksResponse)(nil), "ibcgo.core.channel.v1.QueryUnreceivedAcksResponse")
- proto.RegisterType((*QueryNextSequenceReceiveRequest)(nil), "ibcgo.core.channel.v1.QueryNextSequenceReceiveRequest")
- proto.RegisterType((*QueryNextSequenceReceiveResponse)(nil), "ibcgo.core.channel.v1.QueryNextSequenceReceiveResponse")
-}
-
-func init() { proto.RegisterFile("ibcgo/core/channel/v1/query.proto", fileDescriptor_3acdacc9aeb4fa50) }
-
-var fileDescriptor_3acdacc9aeb4fa50 = []byte{
- // 1495 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdf, 0x6f, 0x14, 0xd5,
- 0x17, 0xef, 0xdd, 0x16, 0x68, 0x4f, 0xf9, 0xf2, 0xe3, 0xd2, 0x42, 0x19, 0xca, 0x52, 0x96, 0x7c,
- 0xa5, 0x01, 0x99, 0x61, 0xcb, 0x0f, 0x91, 0x28, 0x09, 0xa0, 0x42, 0x4d, 0x80, 0x32, 0x48, 0x04,
- 0x12, 0x5d, 0x67, 0x67, 0x2f, 0xdb, 0x49, 0xbb, 0x73, 0x87, 0x9d, 0xd9, 0x05, 0xac, 0x6b, 0x8c,
- 0x0f, 0x6a, 0x8c, 0x0f, 0x26, 0x46, 0x7d, 0x31, 0xf1, 0xc5, 0xf8, 0x62, 0x78, 0xf2, 0x0f, 0xf0,
- 0xc1, 0x17, 0x1e, 0x49, 0x30, 0x91, 0x17, 0x7f, 0xa4, 0x35, 0xf1, 0x01, 0x5f, 0x7d, 0xf1, 0xc9,
- 0xcc, 0xfd, 0x31, 0x3b, 0xb3, 0x3b, 0x33, 0xed, 0x76, 0xbb, 0x49, 0xe3, 0xdb, 0xcc, 0x9d, 0x7b,
- 0xce, 0xfd, 0x7c, 0x3e, 0xe7, 0x9e, 0xd3, 0x73, 0xb6, 0xb0, 0xdf, 0x2a, 0x9a, 0x65, 0xaa, 0x99,
- 0xb4, 0x4a, 0x34, 0x73, 0xd6, 0xb0, 0x6d, 0x32, 0xaf, 0xd5, 0xf3, 0xda, 0x9d, 0x1a, 0xa9, 0xde,
- 0x57, 0x9d, 0x2a, 0xf5, 0x28, 0x1e, 0x65, 0x5b, 0x54, 0x7f, 0x8b, 0x2a, 0xb6, 0xa8, 0xf5, 0xbc,
- 0x12, 0xb1, 0x9c, 0xb7, 0x88, 0xed, 0xf9, 0x86, 0xfc, 0x89, 0x5b, 0x2a, 0x87, 0x4c, 0xea, 0x56,
- 0xa8, 0xab, 0x15, 0x0d, 0x97, 0x70, 0x97, 0x5a, 0x3d, 0x5f, 0x24, 0x9e, 0x91, 0xd7, 0x1c, 0xa3,
- 0x6c, 0xd9, 0x86, 0x67, 0x51, 0x5b, 0xec, 0x3d, 0x10, 0x0f, 0x44, 0x1e, 0xc8, 0x37, 0x8d, 0x97,
- 0x29, 0x2d, 0xcf, 0x13, 0xcd, 0x70, 0x2c, 0xcd, 0xb0, 0x6d, 0xea, 0x31, 0x0f, 0xae, 0xf8, 0xba,
- 0x5b, 0x7c, 0x65, 0x6f, 0xc5, 0xda, 0x6d, 0xcd, 0xb0, 0x05, 0x07, 0x65, 0xa4, 0x4c, 0xcb, 0x94,
- 0x3d, 0x6a, 0xfe, 0x13, 0x5f, 0xcd, 0x5d, 0x82, 0x1d, 0x57, 0x7d, 0x54, 0xe7, 0xf9, 0x21, 0x3a,
- 0xb9, 0x53, 0x23, 0xae, 0x87, 0x77, 0xc1, 0x26, 0x87, 0x56, 0xbd, 0x82, 0x55, 0x1a, 0x43, 0x13,
- 0x68, 0x72, 0x48, 0xdf, 0xe8, 0xbf, 0x4e, 0x97, 0xf0, 0x5e, 0x00, 0x81, 0xc7, 0xff, 0x96, 0x61,
- 0xdf, 0x86, 0xc4, 0xca, 0x74, 0x29, 0xf7, 0x00, 0xc1, 0x48, 0xd4, 0x9f, 0xeb, 0x50, 0xdb, 0x25,
- 0xf8, 0x14, 0x6c, 0x12, 0xbb, 0x98, 0xc3, 0xe1, 0xa9, 0xac, 0x1a, 0xab, 0xa9, 0x2a, 0x0d, 0xe5,
- 0x76, 0x3c, 0x02, 0x1b, 0x9c, 0x2a, 0xa5, 0xb7, 0xd9, 0x61, 0x9b, 0x75, 0xfe, 0x82, 0x5f, 0x86,
- 0xcd, 0xec, 0xa1, 0x30, 0x4b, 0xac, 0xf2, 0xac, 0x37, 0xd6, 0xcf, 0x9c, 0x8e, 0x47, 0x9c, 0xf2,
- 0x38, 0xd4, 0xf3, 0xea, 0x45, 0xb6, 0xe7, 0xdc, 0xc0, 0xc3, 0x5f, 0xf7, 0xf5, 0xe9, 0xc3, 0xcc,
- 0x8e, 0x2f, 0xe5, 0xde, 0x8c, 0xc2, 0x75, 0x25, 0xff, 0x57, 0x00, 0x9a, 0xe1, 0x11, 0x88, 0x9f,
- 0x51, 0x79, 0x2c, 0x55, 0x3f, 0x96, 0x2a, 0xbf, 0x1e, 0x22, 0x96, 0xea, 0x8c, 0x51, 0x26, 0xc2,
- 0x56, 0x0f, 0x59, 0xe6, 0x16, 0x11, 0x8c, 0xb6, 0x1c, 0x20, 0x04, 0x79, 0x09, 0x06, 0x05, 0x43,
- 0x77, 0x0c, 0x4d, 0xf4, 0x4f, 0x0e, 0x4f, 0x4d, 0x26, 0x28, 0x32, 0x5d, 0x22, 0xb6, 0x67, 0xdd,
- 0xb6, 0x48, 0x49, 0x6a, 0x13, 0x58, 0xe2, 0x0b, 0x11, 0x9c, 0x19, 0x86, 0xf3, 0xe0, 0xb2, 0x38,
- 0x39, 0x84, 0x30, 0x50, 0x7c, 0x1a, 0x36, 0x76, 0xac, 0xa4, 0xb0, 0xc8, 0x7d, 0x84, 0x20, 0xcb,
- 0x49, 0x52, 0xdb, 0x26, 0xa6, 0xef, 0xaf, 0x55, 0xcf, 0x2c, 0x80, 0x19, 0x7c, 0x14, 0x57, 0x2a,
- 0xb4, 0xd2, 0xa2, 0x77, 0x66, 0xd5, 0x7a, 0xff, 0x85, 0x60, 0x5f, 0x22, 0x94, 0xff, 0x9e, 0xf2,
- 0x37, 0xa4, 0xf0, 0x1c, 0xd5, 0x79, 0xb6, 0xfb, 0x9a, 0x67, 0x78, 0xa4, 0xdb, 0x44, 0x5e, 0x0a,
- 0x84, 0x8c, 0x71, 0x2d, 0x84, 0x34, 0x61, 0x97, 0x15, 0x28, 0x54, 0xe0, 0x50, 0x0b, 0xae, 0xbf,
- 0x45, 0x64, 0xcc, 0xe1, 0x78, 0x2a, 0x21, 0x59, 0x43, 0x5e, 0x47, 0xad, 0xb8, 0xe5, 0xde, 0xa6,
- 0xff, 0x03, 0x04, 0xfb, 0x23, 0x2c, 0x7d, 0x5e, 0xb6, 0x5b, 0x73, 0xd7, 0x42, 0x43, 0x7c, 0x10,
- 0xb6, 0x56, 0x49, 0xdd, 0x72, 0x2d, 0x6a, 0x17, 0xec, 0x5a, 0xa5, 0x48, 0xaa, 0x0c, 0xe7, 0x80,
- 0xbe, 0x45, 0x2e, 0x5f, 0x66, 0xab, 0x91, 0x8d, 0x82, 0xd0, 0x40, 0x74, 0xa3, 0xc0, 0xfb, 0x0b,
- 0x82, 0x5c, 0x1a, 0x5e, 0x11, 0x98, 0x17, 0x61, 0xab, 0x29, 0xbf, 0x44, 0x02, 0x32, 0xa2, 0xf2,
- 0xbf, 0x0f, 0xaa, 0xfc, 0xfb, 0xa0, 0x9e, 0xb5, 0xef, 0xeb, 0x5b, 0xcc, 0x88, 0x1b, 0xbc, 0x07,
- 0x86, 0x44, 0x30, 0x03, 0x56, 0x83, 0x7c, 0x61, 0xba, 0xd4, 0x8c, 0x47, 0x7f, 0x5a, 0x3c, 0x06,
- 0x56, 0x17, 0x8f, 0x2a, 0x8c, 0x33, 0x7a, 0x33, 0x86, 0x39, 0x47, 0xbc, 0xf3, 0xb4, 0x52, 0xb1,
- 0xbc, 0x0a, 0xb1, 0xbd, 0x6e, 0x23, 0xa1, 0xc0, 0xa0, 0xeb, 0xbb, 0xb0, 0x4d, 0x22, 0x42, 0x10,
- 0xbc, 0xe7, 0xbe, 0x42, 0xb0, 0x37, 0xe1, 0x50, 0x21, 0x27, 0x2b, 0x5e, 0x72, 0x95, 0x1d, 0xbc,
- 0x59, 0x0f, 0xad, 0xf4, 0xf6, 0x8a, 0x7e, 0x9d, 0x04, 0xcf, 0xed, 0x56, 0x94, 0x68, 0xcd, 0xed,
- 0x5f, 0x75, 0xcd, 0x7d, 0x2a, 0xcb, 0x7f, 0x0c, 0xc2, 0xa0, 0xe4, 0x0e, 0x37, 0xf5, 0x92, 0x55,
- 0x37, 0x97, 0x50, 0x75, 0xb9, 0x1b, 0x7e, 0xa3, 0xc3, 0x66, 0xeb, 0xa3, 0xe4, 0x52, 0xd8, 0x1d,
- 0x22, 0xab, 0x13, 0x93, 0x58, 0x4e, 0x4f, 0xef, 0xe7, 0xe7, 0x08, 0x94, 0xb8, 0x13, 0x85, 0xb4,
- 0x0a, 0x0c, 0x56, 0xfd, 0xa5, 0x3a, 0xe1, 0x7e, 0x07, 0xf5, 0xe0, 0xbd, 0xb7, 0xb9, 0x7a, 0x57,
- 0x94, 0x4e, 0x0e, 0xeb, 0xac, 0x39, 0x67, 0xd3, 0xbb, 0xf3, 0xa4, 0x54, 0x26, 0xbd, 0x4e, 0xd8,
- 0xef, 0x64, 0x11, 0x4c, 0x38, 0x59, 0x08, 0x33, 0x09, 0x5b, 0x8d, 0xe8, 0x27, 0x91, 0xba, 0xad,
- 0xcb, 0xbd, 0xcd, 0xdf, 0x6f, 0x52, 0xd1, 0xae, 0x9b, 0x24, 0xfe, 0x07, 0xc1, 0x81, 0x54, 0x98,
- 0x42, 0xd5, 0xcb, 0xb0, 0xad, 0x45, 0xbe, 0x4e, 0xd2, 0xb9, 0xcd, 0x76, 0x7d, 0xe4, 0xf4, 0x97,
- 0xb2, 0xc6, 0x5e, 0xb7, 0x65, 0xee, 0x70, 0xd4, 0x5d, 0x87, 0xe7, 0x0c, 0xec, 0x71, 0x98, 0xa7,
- 0x42, 0xb3, 0x90, 0x15, 0xe4, 0x4d, 0x76, 0xc7, 0xfa, 0x27, 0xfa, 0x27, 0x07, 0xf4, 0xdd, 0x4e,
- 0x4b, 0xe1, 0xbc, 0x26, 0x37, 0xe4, 0xde, 0x16, 0xa5, 0x35, 0x06, 0x98, 0x08, 0xc8, 0x38, 0x0c,
- 0x35, 0xfd, 0x21, 0xe6, 0xaf, 0xb9, 0x10, 0x52, 0x25, 0xd3, 0xb1, 0x2a, 0x1f, 0xc8, 0xc2, 0xd3,
- 0x3c, 0xfc, 0xac, 0x39, 0xd7, 0xb5, 0x24, 0x47, 0x61, 0x44, 0x48, 0x62, 0x98, 0x73, 0x6d, 0x5a,
- 0x60, 0x47, 0xde, 0xbf, 0xa6, 0x08, 0x77, 0x61, 0x4f, 0x2c, 0x8e, 0x9e, 0x2b, 0x70, 0x53, 0xf4,
- 0xc0, 0x97, 0xc9, 0xbd, 0x20, 0x26, 0x3a, 0x87, 0xd0, 0x6d, 0x7f, 0xfd, 0x3d, 0x82, 0x89, 0x64,
- 0xdf, 0x82, 0xd9, 0x14, 0x8c, 0xda, 0xe4, 0x5e, 0xf3, 0xc2, 0x14, 0x04, 0x7f, 0x76, 0xd4, 0x80,
- 0xbe, 0xc3, 0x6e, 0xb7, 0xed, 0x69, 0x31, 0x9b, 0xfa, 0x61, 0x27, 0x6c, 0x60, 0xa8, 0xf1, 0xb7,
- 0x08, 0x36, 0x89, 0x26, 0x14, 0x1f, 0x4a, 0xc8, 0xfc, 0x98, 0x1f, 0x16, 0x94, 0xc3, 0x2b, 0xda,
- 0xcb, 0xf9, 0xe7, 0xce, 0xbd, 0xff, 0xf8, 0x8f, 0xcf, 0x32, 0x2f, 0xe0, 0xd3, 0x9a, 0x55, 0x34,
- 0x93, 0x7e, 0x17, 0x71, 0xb5, 0x85, 0xa6, 0xd0, 0x0d, 0xcd, 0x97, 0xdf, 0xd5, 0x16, 0x44, 0x50,
- 0x1a, 0xf8, 0x13, 0x04, 0x83, 0x72, 0x04, 0xc4, 0x2b, 0x39, 0x5d, 0x5e, 0x70, 0xe5, 0xd9, 0x95,
- 0x6d, 0x16, 0x58, 0xff, 0xcf, 0xb0, 0xee, 0xc3, 0x7b, 0x53, 0xb1, 0xe2, 0x1f, 0x11, 0xe0, 0xf6,
- 0xd9, 0x14, 0x9f, 0x48, 0x3d, 0x2b, 0x69, 0xac, 0x56, 0x4e, 0x76, 0x6a, 0x26, 0xc0, 0x9e, 0x61,
- 0x60, 0x4f, 0xe1, 0x93, 0xf1, 0x60, 0x03, 0x43, 0x5f, 0xdb, 0xe0, 0xa5, 0xd1, 0x64, 0xf1, 0x93,
- 0xcf, 0xa2, 0x6d, 0x30, 0x5c, 0x86, 0x45, 0xd2, 0x8c, 0xba, 0x0c, 0x8b, 0xc4, 0xf9, 0x33, 0x77,
- 0x85, 0xb1, 0x98, 0xc6, 0x17, 0x56, 0x7f, 0x3d, 0xb4, 0xf0, 0xd4, 0x8a, 0xbf, 0xc8, 0xc0, 0x68,
- 0xec, 0x64, 0x85, 0x4f, 0xad, 0x04, 0x62, 0xdc, 0xf0, 0xa8, 0x3c, 0xbf, 0x0a, 0x4b, 0xc1, 0xef,
- 0x43, 0xc4, 0x08, 0xbe, 0x87, 0xf0, 0xbb, 0xdd, 0x30, 0x8c, 0x4e, 0x82, 0x9a, 0x1c, 0x29, 0xb5,
- 0x85, 0x96, 0xe1, 0xb4, 0xa1, 0xf1, 0xe2, 0x10, 0xfa, 0xc0, 0x17, 0x1a, 0xf8, 0x37, 0x04, 0xdb,
- 0x5a, 0xbb, 0x7b, 0x7c, 0x2c, 0x8d, 0x59, 0xc2, 0x04, 0xa7, 0x1c, 0xef, 0xcc, 0x48, 0x28, 0xf1,
- 0x16, 0x13, 0xe2, 0x16, 0xbe, 0xd1, 0x85, 0x0e, 0x6d, 0x7f, 0x87, 0x5d, 0x6d, 0x41, 0x16, 0xd6,
- 0x06, 0xfe, 0x19, 0xc1, 0xf6, 0xb6, 0xf9, 0x05, 0x77, 0x84, 0x36, 0xc8, 0xca, 0x13, 0x1d, 0x5a,
- 0x09, 0x92, 0xd7, 0x19, 0xc9, 0x2b, 0xf8, 0xd2, 0x9a, 0x92, 0xc4, 0x8f, 0x11, 0xfc, 0x2f, 0x32,
- 0x3a, 0xe0, 0xa3, 0xcb, 0xe3, 0x8b, 0xce, 0x35, 0x4a, 0xbe, 0x03, 0x0b, 0xc1, 0xe6, 0x0d, 0xc6,
- 0xe6, 0x75, 0x7c, 0xbd, 0x7b, 0x36, 0x55, 0xee, 0x3a, 0x12, 0xaf, 0x3f, 0x11, 0x8c, 0xc6, 0xb6,
- 0xaa, 0xe9, 0xa9, 0x9a, 0x36, 0xac, 0xa4, 0xa7, 0x6a, 0xea, 0xb0, 0x91, 0xbb, 0xc9, 0xd8, 0x5e,
- 0xc3, 0x57, 0xbb, 0x67, 0x6b, 0x98, 0x73, 0x11, 0xa6, 0x4f, 0x11, 0xec, 0x8c, 0x6f, 0xca, 0x71,
- 0xe7, 0x80, 0x83, 0x3b, 0x7a, 0x7a, 0x35, 0xa6, 0x82, 0xec, 0x2d, 0x46, 0xf6, 0x35, 0xac, 0xaf,
- 0x09, 0xd9, 0x28, 0xa5, 0x8f, 0x33, 0xb0, 0xbd, 0xad, 0xd9, 0x4d, 0xcf, 0xc3, 0xa4, 0xa6, 0x3d,
- 0x3d, 0x0f, 0x13, 0x3b, 0xea, 0x35, 0x2a, 0xbb, 0x71, 0xe5, 0x26, 0x65, 0x14, 0x68, 0x68, 0xb5,
- 0x00, 0x50, 0xc1, 0x11, 0xb4, 0xff, 0x46, 0xb0, 0x25, 0xda, 0xf4, 0xe2, 0xfc, 0xca, 0x38, 0x85,
- 0x1a, 0x75, 0x65, 0xaa, 0x13, 0x13, 0xa1, 0xc1, 0x3b, 0x4c, 0x82, 0x3a, 0xf6, 0x7a, 0xa3, 0x40,
- 0xa4, 0xf3, 0x8f, 0x50, 0xf7, 0x6f, 0x3f, 0x7e, 0x82, 0x60, 0x47, 0x4c, 0x5f, 0x8c, 0x53, 0x1b,
- 0x85, 0xe4, 0x26, 0x5d, 0x79, 0xae, 0x63, 0x3b, 0x21, 0xc3, 0x0c, 0x93, 0xe1, 0x55, 0x7c, 0xb1,
- 0x0b, 0x19, 0x22, 0x1d, 0xfc, 0xb9, 0x99, 0x87, 0x8b, 0x59, 0xf4, 0x68, 0x31, 0x8b, 0x7e, 0x5f,
- 0xcc, 0xa2, 0x4f, 0x97, 0xb2, 0x7d, 0x8f, 0x96, 0xb2, 0x7d, 0x4f, 0x96, 0xb2, 0x7d, 0xb7, 0x4e,
- 0x96, 0x2d, 0x6f, 0xb6, 0x56, 0x54, 0x4d, 0x5a, 0xd1, 0xc4, 0x3f, 0x0d, 0xad, 0xa2, 0x79, 0xa4,
- 0x4c, 0xb5, 0x0a, 0x2d, 0xd5, 0xe6, 0x89, 0xcb, 0xcf, 0x3f, 0x7a, 0xfc, 0x88, 0x84, 0xe0, 0xdd,
- 0x77, 0x88, 0x5b, 0xdc, 0xc8, 0x7e, 0xcb, 0x3d, 0xf6, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54,
- 0x68, 0xbe, 0xa1, 0xc7, 0x1c, 0x00, 0x00,
+ proto.RegisterType((*QueryChannelRequest)(nil), "ibc.core.channel.v1.QueryChannelRequest")
+ proto.RegisterType((*QueryChannelResponse)(nil), "ibc.core.channel.v1.QueryChannelResponse")
+ proto.RegisterType((*QueryChannelsRequest)(nil), "ibc.core.channel.v1.QueryChannelsRequest")
+ proto.RegisterType((*QueryChannelsResponse)(nil), "ibc.core.channel.v1.QueryChannelsResponse")
+ proto.RegisterType((*QueryConnectionChannelsRequest)(nil), "ibc.core.channel.v1.QueryConnectionChannelsRequest")
+ proto.RegisterType((*QueryConnectionChannelsResponse)(nil), "ibc.core.channel.v1.QueryConnectionChannelsResponse")
+ proto.RegisterType((*QueryChannelClientStateRequest)(nil), "ibc.core.channel.v1.QueryChannelClientStateRequest")
+ proto.RegisterType((*QueryChannelClientStateResponse)(nil), "ibc.core.channel.v1.QueryChannelClientStateResponse")
+ proto.RegisterType((*QueryChannelConsensusStateRequest)(nil), "ibc.core.channel.v1.QueryChannelConsensusStateRequest")
+ proto.RegisterType((*QueryChannelConsensusStateResponse)(nil), "ibc.core.channel.v1.QueryChannelConsensusStateResponse")
+ proto.RegisterType((*QueryPacketCommitmentRequest)(nil), "ibc.core.channel.v1.QueryPacketCommitmentRequest")
+ proto.RegisterType((*QueryPacketCommitmentResponse)(nil), "ibc.core.channel.v1.QueryPacketCommitmentResponse")
+ proto.RegisterType((*QueryPacketCommitmentsRequest)(nil), "ibc.core.channel.v1.QueryPacketCommitmentsRequest")
+ proto.RegisterType((*QueryPacketCommitmentsResponse)(nil), "ibc.core.channel.v1.QueryPacketCommitmentsResponse")
+ proto.RegisterType((*QueryPacketReceiptRequest)(nil), "ibc.core.channel.v1.QueryPacketReceiptRequest")
+ proto.RegisterType((*QueryPacketReceiptResponse)(nil), "ibc.core.channel.v1.QueryPacketReceiptResponse")
+ proto.RegisterType((*QueryPacketAcknowledgementRequest)(nil), "ibc.core.channel.v1.QueryPacketAcknowledgementRequest")
+ proto.RegisterType((*QueryPacketAcknowledgementResponse)(nil), "ibc.core.channel.v1.QueryPacketAcknowledgementResponse")
+ proto.RegisterType((*QueryPacketAcknowledgementsRequest)(nil), "ibc.core.channel.v1.QueryPacketAcknowledgementsRequest")
+ proto.RegisterType((*QueryPacketAcknowledgementsResponse)(nil), "ibc.core.channel.v1.QueryPacketAcknowledgementsResponse")
+ proto.RegisterType((*QueryUnreceivedPacketsRequest)(nil), "ibc.core.channel.v1.QueryUnreceivedPacketsRequest")
+ proto.RegisterType((*QueryUnreceivedPacketsResponse)(nil), "ibc.core.channel.v1.QueryUnreceivedPacketsResponse")
+ proto.RegisterType((*QueryUnreceivedAcksRequest)(nil), "ibc.core.channel.v1.QueryUnreceivedAcksRequest")
+ proto.RegisterType((*QueryUnreceivedAcksResponse)(nil), "ibc.core.channel.v1.QueryUnreceivedAcksResponse")
+ proto.RegisterType((*QueryNextSequenceReceiveRequest)(nil), "ibc.core.channel.v1.QueryNextSequenceReceiveRequest")
+ proto.RegisterType((*QueryNextSequenceReceiveResponse)(nil), "ibc.core.channel.v1.QueryNextSequenceReceiveResponse")
+}
+
+func init() { proto.RegisterFile("ibc/core/channel/v1/query.proto", fileDescriptor_1034a1e9abc4cca1) }
+
+var fileDescriptor_1034a1e9abc4cca1 = []byte{
+ // 1481 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdf, 0x6f, 0x14, 0x55,
+ 0x14, 0xee, 0xdd, 0x16, 0x68, 0x0f, 0xc8, 0x8f, 0xdb, 0x16, 0xca, 0x50, 0xb6, 0x65, 0x8d, 0x52,
+ 0x48, 0x98, 0x4b, 0x0b, 0x56, 0x62, 0x94, 0x84, 0x36, 0x11, 0x6a, 0x04, 0xca, 0x20, 0x11, 0x48,
+ 0x74, 0x9d, 0x9d, 0xbd, 0x6c, 0x27, 0xed, 0xce, 0x0c, 0x3b, 0xb3, 0x0b, 0xa4, 0xae, 0x31, 0x3e,
+ 0x20, 0x89, 0x2f, 0x46, 0x1e, 0x4c, 0x7c, 0x31, 0x31, 0xbe, 0xf0, 0xe0, 0x83, 0x7f, 0x81, 0xaf,
+ 0xbc, 0x49, 0x82, 0x0f, 0x26, 0x24, 0x68, 0xa8, 0x09, 0xbe, 0xfa, 0xe2, 0xb3, 0x99, 0xfb, 0x63,
+ 0x76, 0x66, 0x77, 0x66, 0xba, 0xdb, 0xed, 0x26, 0x8d, 0x6f, 0x33, 0x77, 0xee, 0x39, 0xf7, 0xfb,
+ 0xbe, 0x73, 0xcf, 0xe9, 0x39, 0x5b, 0x98, 0x30, 0x0b, 0x06, 0x31, 0xec, 0x0a, 0x25, 0xc6, 0x92,
+ 0x6e, 0x59, 0x74, 0x85, 0xd4, 0xa6, 0xc9, 0xed, 0x2a, 0xad, 0xdc, 0x53, 0x9d, 0x8a, 0xed, 0xd9,
+ 0x78, 0xd8, 0x2c, 0x18, 0xaa, 0xbf, 0x41, 0x15, 0x1b, 0xd4, 0xda, 0xb4, 0x12, 0xb2, 0x5a, 0x31,
+ 0xa9, 0xe5, 0xf9, 0x46, 0xfc, 0x89, 0x5b, 0x29, 0xc7, 0x0d, 0xdb, 0x2d, 0xdb, 0x2e, 0x29, 0xe8,
+ 0x2e, 0xe5, 0xee, 0x48, 0x6d, 0xba, 0x40, 0x3d, 0x7d, 0x9a, 0x38, 0x7a, 0xc9, 0xb4, 0x74, 0xcf,
+ 0xb4, 0x2d, 0xb1, 0xf7, 0x48, 0x1c, 0x04, 0x79, 0x18, 0xdf, 0x32, 0x5e, 0xb2, 0xed, 0xd2, 0x0a,
+ 0x25, 0xba, 0x63, 0x12, 0xdd, 0xb2, 0x6c, 0x8f, 0xd9, 0xbb, 0xe2, 0xeb, 0x41, 0xf1, 0x95, 0xbd,
+ 0x15, 0xaa, 0xb7, 0x88, 0x6e, 0x09, 0xf4, 0xca, 0x48, 0xc9, 0x2e, 0xd9, 0xec, 0x91, 0xf8, 0x4f,
+ 0x7c, 0x35, 0x77, 0x11, 0x86, 0xaf, 0xf8, 0x98, 0xe6, 0xf9, 0x21, 0x1a, 0xbd, 0x5d, 0xa5, 0xae,
+ 0x87, 0x0f, 0xc0, 0x0e, 0xc7, 0xae, 0x78, 0x79, 0xb3, 0x38, 0x86, 0x26, 0xd1, 0xd4, 0x90, 0xb6,
+ 0xdd, 0x7f, 0x5d, 0x28, 0xe2, 0xc3, 0x00, 0x02, 0x8f, 0xff, 0x2d, 0xc3, 0xbe, 0x0d, 0x89, 0x95,
+ 0x85, 0x62, 0xee, 0x11, 0x82, 0x91, 0xa8, 0x3f, 0xd7, 0xb1, 0x2d, 0x97, 0xe2, 0x59, 0xd8, 0x21,
+ 0x76, 0x31, 0x87, 0x3b, 0x67, 0xc6, 0xd5, 0x18, 0x35, 0x55, 0x69, 0x26, 0x37, 0xe3, 0x11, 0xd8,
+ 0xe6, 0x54, 0x6c, 0xfb, 0x16, 0x3b, 0x6a, 0x97, 0xc6, 0x5f, 0xf0, 0x3c, 0xec, 0x62, 0x0f, 0xf9,
+ 0x25, 0x6a, 0x96, 0x96, 0xbc, 0xb1, 0x7e, 0xe6, 0x52, 0x09, 0xb9, 0xe4, 0x11, 0xa8, 0x4d, 0xab,
+ 0x17, 0xd8, 0x8e, 0xb9, 0x81, 0xc7, 0xcf, 0x27, 0xfa, 0xb4, 0x9d, 0xcc, 0x8a, 0x2f, 0xe5, 0x3e,
+ 0x8e, 0x42, 0x75, 0x25, 0xf7, 0x77, 0x01, 0x1a, 0x81, 0x11, 0x68, 0x5f, 0x57, 0x79, 0x14, 0x55,
+ 0x3f, 0x8a, 0x2a, 0xbf, 0x14, 0x22, 0x8a, 0xea, 0xa2, 0x5e, 0xa2, 0xc2, 0x56, 0x0b, 0x59, 0xe6,
+ 0x9e, 0x23, 0x18, 0x6d, 0x3a, 0x40, 0x88, 0x31, 0x07, 0x83, 0x82, 0x9f, 0x3b, 0x86, 0x26, 0xfb,
+ 0x99, 0xff, 0x38, 0x35, 0x16, 0x8a, 0xd4, 0xf2, 0xcc, 0x5b, 0x26, 0x2d, 0x4a, 0x5d, 0x02, 0x3b,
+ 0x7c, 0x3e, 0x82, 0x32, 0xc3, 0x50, 0x1e, 0x5d, 0x17, 0x25, 0x07, 0x10, 0x86, 0x89, 0xcf, 0xc0,
+ 0xf6, 0x0e, 0x55, 0x14, 0xfb, 0x73, 0x0f, 0x10, 0x64, 0x39, 0x41, 0xdb, 0xb2, 0xa8, 0xe1, 0x7b,
+ 0x6b, 0xd6, 0x32, 0x0b, 0x60, 0x04, 0x1f, 0xc5, 0x55, 0x0a, 0xad, 0x34, 0x69, 0x9d, 0xd9, 0xb0,
+ 0xd6, 0x7f, 0x23, 0x98, 0x48, 0x84, 0xf2, 0xff, 0x52, 0xfd, 0xba, 0x14, 0x9d, 0x63, 0x9a, 0x67,
+ 0xbb, 0xaf, 0x7a, 0xba, 0x47, 0xbb, 0x4d, 0xde, 0x3f, 0x02, 0x11, 0x63, 0x5c, 0x0b, 0x11, 0x75,
+ 0x38, 0x60, 0x06, 0xfa, 0xe4, 0x39, 0xd4, 0xbc, 0xeb, 0x6f, 0x11, 0x99, 0x72, 0x2c, 0x8e, 0x48,
+ 0x48, 0xd2, 0x90, 0xcf, 0x51, 0x33, 0x6e, 0xb9, 0x97, 0x29, 0xff, 0x13, 0x82, 0x23, 0x11, 0x86,
+ 0x3e, 0x27, 0xcb, 0xad, 0xba, 0x9b, 0xa1, 0x1f, 0x3e, 0x0a, 0x7b, 0x2a, 0xb4, 0x66, 0xba, 0xa6,
+ 0x6d, 0xe5, 0xad, 0x6a, 0xb9, 0x40, 0x2b, 0x0c, 0xe5, 0x80, 0xb6, 0x5b, 0x2e, 0x5f, 0x62, 0xab,
+ 0x91, 0x8d, 0x82, 0xce, 0x40, 0x74, 0xa3, 0xc0, 0xfb, 0x0c, 0x41, 0x2e, 0x0d, 0xaf, 0x08, 0xca,
+ 0x3b, 0xb0, 0xc7, 0x90, 0x5f, 0x22, 0xc1, 0x18, 0x51, 0xf9, 0xdf, 0x03, 0x55, 0xfe, 0x3d, 0x50,
+ 0xcf, 0x59, 0xf7, 0xb4, 0xdd, 0x46, 0xc4, 0x0d, 0x3e, 0x04, 0x43, 0x22, 0x90, 0x01, 0xab, 0x41,
+ 0xbe, 0xb0, 0x50, 0x6c, 0x44, 0xa3, 0x3f, 0x2d, 0x1a, 0x03, 0x1b, 0x89, 0x46, 0x05, 0xc6, 0x19,
+ 0xb9, 0x45, 0xdd, 0x58, 0xa6, 0xde, 0xbc, 0x5d, 0x2e, 0x9b, 0x5e, 0x99, 0x5a, 0x5e, 0xb7, 0x71,
+ 0x50, 0x60, 0xd0, 0xf5, 0x5d, 0x58, 0x06, 0x15, 0x01, 0x08, 0xde, 0x73, 0xdf, 0x21, 0x38, 0x9c,
+ 0x70, 0xa8, 0x10, 0x93, 0x95, 0x2c, 0xb9, 0xca, 0x0e, 0xde, 0xa5, 0x85, 0x56, 0x7a, 0x79, 0x3d,
+ 0xbf, 0x4f, 0x02, 0xe7, 0x76, 0x2b, 0x49, 0xb4, 0xce, 0xf6, 0x6f, 0xb8, 0xce, 0xbe, 0x94, 0x25,
+ 0x3f, 0x06, 0x61, 0x50, 0x66, 0x77, 0x36, 0xd4, 0x92, 0x95, 0x76, 0x32, 0xb6, 0xd2, 0x72, 0x27,
+ 0xfc, 0x2e, 0x87, 0x8d, 0xb6, 0x42, 0x99, 0xb5, 0xe1, 0x60, 0x88, 0xa8, 0x46, 0x0d, 0x6a, 0x3a,
+ 0x3d, 0xbd, 0x99, 0x0f, 0x11, 0x28, 0x71, 0x27, 0x0a, 0x59, 0x15, 0x18, 0xac, 0xf8, 0x4b, 0x35,
+ 0xca, 0xfd, 0x0e, 0x6a, 0xc1, 0x7b, 0x2f, 0x73, 0xf4, 0x8e, 0x28, 0x98, 0x1c, 0xd4, 0x39, 0x63,
+ 0xd9, 0xb2, 0xef, 0xac, 0xd0, 0x62, 0x89, 0xf6, 0x3a, 0x51, 0x1f, 0xc9, 0xd2, 0x97, 0x70, 0xb2,
+ 0x90, 0x65, 0x0a, 0xf6, 0xe8, 0xd1, 0x4f, 0x22, 0x65, 0x9b, 0x97, 0x7b, 0x99, 0xb7, 0x3f, 0xa6,
+ 0x62, 0xdd, 0x32, 0xc9, 0xfb, 0x2f, 0x82, 0x57, 0x53, 0x61, 0x0a, 0x4d, 0xdf, 0x87, 0xbd, 0x4d,
+ 0xe2, 0xb5, 0x9f, 0xc6, 0x2d, 0x96, 0x5b, 0x21, 0x97, 0xbf, 0x95, 0x75, 0xf5, 0x9a, 0x25, 0x73,
+ 0x86, 0x63, 0xee, 0x3a, 0x34, 0x67, 0xe1, 0x90, 0xc3, 0x3c, 0xe5, 0x1b, 0xe5, 0x2b, 0x2f, 0xef,
+ 0xb0, 0x3b, 0xd6, 0x3f, 0xd9, 0x3f, 0x35, 0xa0, 0x1d, 0x74, 0x9a, 0x8a, 0xe5, 0x55, 0xb9, 0x21,
+ 0x77, 0x57, 0x94, 0xd3, 0x18, 0x60, 0x22, 0x18, 0xe3, 0x30, 0xd4, 0xf0, 0x87, 0x98, 0xbf, 0xc6,
+ 0x42, 0x48, 0x93, 0x4c, 0x87, 0x9a, 0xdc, 0x97, 0xe5, 0xa6, 0x71, 0xf4, 0x39, 0x63, 0xb9, 0x6b,
+ 0x41, 0x4e, 0xc2, 0x88, 0x10, 0x44, 0x37, 0x96, 0x5b, 0x94, 0xc0, 0x8e, 0xbc, 0x79, 0x0d, 0x09,
+ 0xaa, 0x70, 0x28, 0x16, 0x47, 0x8f, 0xf9, 0xdf, 0x10, 0xbd, 0xee, 0x25, 0x7a, 0x37, 0x88, 0x87,
+ 0xc6, 0x01, 0x74, 0xdb, 0x47, 0xff, 0x8c, 0x60, 0x32, 0xd9, 0xb7, 0xe0, 0x35, 0x03, 0xa3, 0x16,
+ 0xbd, 0xdb, 0xb8, 0x2c, 0x79, 0xc1, 0x9e, 0x1d, 0x35, 0xa0, 0x0d, 0x5b, 0xad, 0xb6, 0x3d, 0x2c,
+ 0x61, 0x33, 0x5f, 0xed, 0x87, 0x6d, 0x0c, 0x33, 0xfe, 0x01, 0xc1, 0x0e, 0xd1, 0x6e, 0xe2, 0xa9,
+ 0xd8, 0x7c, 0x8f, 0xf9, 0xc1, 0x40, 0x39, 0xd6, 0xc6, 0x4e, 0xce, 0x3c, 0x37, 0xf7, 0xc5, 0xd3,
+ 0xbf, 0x1e, 0x66, 0xde, 0xc6, 0x6f, 0x91, 0x94, 0x5f, 0x3b, 0x5c, 0xb2, 0xda, 0x90, 0xb8, 0x4e,
+ 0x7c, 0xe1, 0x5d, 0xb2, 0x2a, 0xc2, 0x51, 0xc7, 0x0f, 0x10, 0x0c, 0xca, 0x01, 0x0f, 0xaf, 0x7f,
+ 0xb6, 0xbc, 0xd6, 0xca, 0xf1, 0x76, 0xb6, 0x0a, 0x9c, 0xaf, 0x31, 0x9c, 0x13, 0xf8, 0x70, 0x2a,
+ 0x4e, 0xfc, 0x0b, 0x02, 0xdc, 0x3a, 0x75, 0xe2, 0x53, 0x29, 0x27, 0x25, 0x8d, 0xcb, 0xca, 0xe9,
+ 0xce, 0x8c, 0x04, 0xd0, 0xb3, 0x0c, 0xe8, 0x19, 0x3c, 0x1b, 0x0f, 0x34, 0x30, 0xf4, 0x35, 0x0d,
+ 0x5e, 0xea, 0x0d, 0x06, 0x4f, 0x7c, 0x06, 0x2d, 0x23, 0x5f, 0x2a, 0x83, 0xa4, 0xd9, 0x33, 0x95,
+ 0x41, 0xe2, 0x54, 0x99, 0xbb, 0xcc, 0x18, 0x2c, 0xe0, 0xf3, 0x1b, 0xbf, 0x12, 0x24, 0x3c, 0x8b,
+ 0xe2, 0x6f, 0x32, 0x30, 0x1a, 0x3b, 0x33, 0xe1, 0xd9, 0xf5, 0x01, 0xc6, 0x0d, 0x85, 0xca, 0x9b,
+ 0x1d, 0xdb, 0x09, 0x6e, 0x5f, 0x22, 0x46, 0xee, 0x73, 0x84, 0x3f, 0xeb, 0x86, 0x5d, 0x74, 0xbe,
+ 0x23, 0x72, 0x50, 0x24, 0xab, 0x4d, 0x23, 0x67, 0x9d, 0xf0, 0x32, 0x10, 0xfa, 0xc0, 0x17, 0xea,
+ 0xf8, 0x19, 0x82, 0xbd, 0xcd, 0x7d, 0x3b, 0x9e, 0x4e, 0xe6, 0x95, 0x30, 0x97, 0x29, 0x33, 0x9d,
+ 0x98, 0x08, 0x15, 0x3e, 0x61, 0x22, 0xdc, 0xc4, 0xd7, 0xbb, 0xd0, 0xa0, 0xe5, 0x2f, 0xad, 0x4b,
+ 0x56, 0x65, 0xf9, 0xac, 0xe3, 0xa7, 0x08, 0xf6, 0xb5, 0x4c, 0x25, 0xb8, 0x03, 0xac, 0x41, 0x16,
+ 0x9e, 0xea, 0xc8, 0x46, 0x10, 0xbc, 0xc6, 0x08, 0x5e, 0xc6, 0x17, 0x37, 0x95, 0x20, 0xfe, 0x15,
+ 0xc1, 0x2b, 0x91, 0x81, 0x00, 0xab, 0xeb, 0xa1, 0x8b, 0xce, 0x2a, 0x0a, 0x69, 0x7b, 0xbf, 0x60,
+ 0xf2, 0x11, 0x63, 0xf2, 0x21, 0xbe, 0xd6, 0x3d, 0x93, 0x0a, 0x77, 0x1d, 0x89, 0xd3, 0x1a, 0x82,
+ 0xd1, 0xd8, 0x06, 0x34, 0x2d, 0x35, 0xd3, 0xc6, 0x8f, 0xb4, 0xd4, 0x4c, 0x1d, 0x1e, 0x72, 0x37,
+ 0x18, 0xd3, 0xab, 0xf8, 0x4a, 0xf7, 0x4c, 0x75, 0x63, 0x39, 0xc2, 0xf2, 0x25, 0x82, 0xfd, 0xf1,
+ 0x6d, 0x36, 0xee, 0x14, 0x6e, 0x70, 0x2f, 0xcf, 0x74, 0x6e, 0x28, 0x88, 0xde, 0x64, 0x44, 0x3f,
+ 0xc0, 0xda, 0xa6, 0x10, 0x8d, 0xd2, 0xb9, 0x9f, 0x81, 0x7d, 0x2d, 0xed, 0x6b, 0x5a, 0xde, 0x25,
+ 0x35, 0xe1, 0x69, 0x79, 0x97, 0xd8, 0x1f, 0x6f, 0x52, 0x79, 0x8d, 0x2b, 0x2d, 0x29, 0x8d, 0x7d,
+ 0x9d, 0x54, 0x03, 0x40, 0x79, 0x47, 0x50, 0xfe, 0x07, 0xc1, 0xee, 0x68, 0x13, 0x8b, 0x49, 0x3b,
+ 0x8c, 0x42, 0x6d, 0xb7, 0x72, 0xb2, 0x7d, 0x03, 0xc1, 0xff, 0x53, 0x46, 0xbf, 0x86, 0xbd, 0xde,
+ 0xb0, 0x8f, 0x74, 0xf1, 0x11, 0xda, 0xfe, 0x8d, 0xc7, 0xbf, 0x21, 0x18, 0x8e, 0xe9, 0x72, 0x71,
+ 0x4a, 0x1b, 0x90, 0xdc, 0x70, 0x2b, 0x6f, 0x74, 0x68, 0x25, 0x24, 0x58, 0x64, 0x12, 0xbc, 0x87,
+ 0x2f, 0x74, 0x21, 0x41, 0xa4, 0x17, 0x9f, 0x5b, 0x7c, 0xfc, 0x22, 0x8b, 0x9e, 0xbc, 0xc8, 0xa2,
+ 0x3f, 0x5f, 0x64, 0xd1, 0xd7, 0x6b, 0xd9, 0xbe, 0x27, 0x6b, 0xd9, 0xbe, 0xdf, 0xd7, 0xb2, 0x7d,
+ 0x37, 0x67, 0x4b, 0xa6, 0xb7, 0x54, 0x2d, 0xa8, 0x86, 0x5d, 0x26, 0xe2, 0x1f, 0x7b, 0x66, 0xc1,
+ 0x38, 0x51, 0xb2, 0x49, 0xd9, 0x2e, 0x56, 0x57, 0xa8, 0xcb, 0xcf, 0x3f, 0x79, 0xfa, 0x84, 0x84,
+ 0xe0, 0xdd, 0x73, 0xa8, 0x5b, 0xd8, 0xce, 0x7e, 0x81, 0x3d, 0xf5, 0x5f, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0x90, 0x44, 0xdb, 0xbc, 0x65, 0x1c, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -1854,7 +1853,7 @@ func NewQueryClient(cc grpc1.ClientConn) QueryClient {
func (c *queryClient) Channel(ctx context.Context, in *QueryChannelRequest, opts ...grpc.CallOption) (*QueryChannelResponse, error) {
out := new(QueryChannelResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/Channel", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/Channel", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1863,7 +1862,7 @@ func (c *queryClient) Channel(ctx context.Context, in *QueryChannelRequest, opts
func (c *queryClient) Channels(ctx context.Context, in *QueryChannelsRequest, opts ...grpc.CallOption) (*QueryChannelsResponse, error) {
out := new(QueryChannelsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/Channels", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/Channels", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1872,7 +1871,7 @@ func (c *queryClient) Channels(ctx context.Context, in *QueryChannelsRequest, op
func (c *queryClient) ConnectionChannels(ctx context.Context, in *QueryConnectionChannelsRequest, opts ...grpc.CallOption) (*QueryConnectionChannelsResponse, error) {
out := new(QueryConnectionChannelsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ConnectionChannels", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/ConnectionChannels", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1881,7 +1880,7 @@ func (c *queryClient) ConnectionChannels(ctx context.Context, in *QueryConnectio
func (c *queryClient) ChannelClientState(ctx context.Context, in *QueryChannelClientStateRequest, opts ...grpc.CallOption) (*QueryChannelClientStateResponse, error) {
out := new(QueryChannelClientStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ChannelClientState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/ChannelClientState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1890,7 +1889,7 @@ func (c *queryClient) ChannelClientState(ctx context.Context, in *QueryChannelCl
func (c *queryClient) ChannelConsensusState(ctx context.Context, in *QueryChannelConsensusStateRequest, opts ...grpc.CallOption) (*QueryChannelConsensusStateResponse, error) {
out := new(QueryChannelConsensusStateResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ChannelConsensusState", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/ChannelConsensusState", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1899,7 +1898,7 @@ func (c *queryClient) ChannelConsensusState(ctx context.Context, in *QueryChanne
func (c *queryClient) PacketCommitment(ctx context.Context, in *QueryPacketCommitmentRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentResponse, error) {
out := new(QueryPacketCommitmentResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketCommitment", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/PacketCommitment", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1908,7 +1907,7 @@ func (c *queryClient) PacketCommitment(ctx context.Context, in *QueryPacketCommi
func (c *queryClient) PacketCommitments(ctx context.Context, in *QueryPacketCommitmentsRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentsResponse, error) {
out := new(QueryPacketCommitmentsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketCommitments", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/PacketCommitments", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1917,7 +1916,7 @@ func (c *queryClient) PacketCommitments(ctx context.Context, in *QueryPacketComm
func (c *queryClient) PacketReceipt(ctx context.Context, in *QueryPacketReceiptRequest, opts ...grpc.CallOption) (*QueryPacketReceiptResponse, error) {
out := new(QueryPacketReceiptResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketReceipt", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/PacketReceipt", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1926,7 +1925,7 @@ func (c *queryClient) PacketReceipt(ctx context.Context, in *QueryPacketReceiptR
func (c *queryClient) PacketAcknowledgement(ctx context.Context, in *QueryPacketAcknowledgementRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementResponse, error) {
out := new(QueryPacketAcknowledgementResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketAcknowledgement", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/PacketAcknowledgement", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1935,7 +1934,7 @@ func (c *queryClient) PacketAcknowledgement(ctx context.Context, in *QueryPacket
func (c *queryClient) PacketAcknowledgements(ctx context.Context, in *QueryPacketAcknowledgementsRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementsResponse, error) {
out := new(QueryPacketAcknowledgementsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketAcknowledgements", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/PacketAcknowledgements", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1944,7 +1943,7 @@ func (c *queryClient) PacketAcknowledgements(ctx context.Context, in *QueryPacke
func (c *queryClient) UnreceivedPackets(ctx context.Context, in *QueryUnreceivedPacketsRequest, opts ...grpc.CallOption) (*QueryUnreceivedPacketsResponse, error) {
out := new(QueryUnreceivedPacketsResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/UnreceivedPackets", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/UnreceivedPackets", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1953,7 +1952,7 @@ func (c *queryClient) UnreceivedPackets(ctx context.Context, in *QueryUnreceived
func (c *queryClient) UnreceivedAcks(ctx context.Context, in *QueryUnreceivedAcksRequest, opts ...grpc.CallOption) (*QueryUnreceivedAcksResponse, error) {
out := new(QueryUnreceivedAcksResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/UnreceivedAcks", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/UnreceivedAcks", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1962,7 +1961,7 @@ func (c *queryClient) UnreceivedAcks(ctx context.Context, in *QueryUnreceivedAck
func (c *queryClient) NextSequenceReceive(ctx context.Context, in *QueryNextSequenceReceiveRequest, opts ...grpc.CallOption) (*QueryNextSequenceReceiveResponse, error) {
out := new(QueryNextSequenceReceiveResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/NextSequenceReceive", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Query/NextSequenceReceive", in, out, opts...)
if err != nil {
return nil, err
}
@@ -2065,7 +2064,7 @@ func _Query_Channel_Handler(srv interface{}, ctx context.Context, dec func(inter
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/Channel",
+ FullMethod: "/ibc.core.channel.v1.Query/Channel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Channel(ctx, req.(*QueryChannelRequest))
@@ -2083,7 +2082,7 @@ func _Query_Channels_Handler(srv interface{}, ctx context.Context, dec func(inte
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/Channels",
+ FullMethod: "/ibc.core.channel.v1.Query/Channels",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Channels(ctx, req.(*QueryChannelsRequest))
@@ -2101,7 +2100,7 @@ func _Query_ConnectionChannels_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/ConnectionChannels",
+ FullMethod: "/ibc.core.channel.v1.Query/ConnectionChannels",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ConnectionChannels(ctx, req.(*QueryConnectionChannelsRequest))
@@ -2119,7 +2118,7 @@ func _Query_ChannelClientState_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/ChannelClientState",
+ FullMethod: "/ibc.core.channel.v1.Query/ChannelClientState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ChannelClientState(ctx, req.(*QueryChannelClientStateRequest))
@@ -2137,7 +2136,7 @@ func _Query_ChannelConsensusState_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/ChannelConsensusState",
+ FullMethod: "/ibc.core.channel.v1.Query/ChannelConsensusState",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ChannelConsensusState(ctx, req.(*QueryChannelConsensusStateRequest))
@@ -2155,7 +2154,7 @@ func _Query_PacketCommitment_Handler(srv interface{}, ctx context.Context, dec f
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/PacketCommitment",
+ FullMethod: "/ibc.core.channel.v1.Query/PacketCommitment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).PacketCommitment(ctx, req.(*QueryPacketCommitmentRequest))
@@ -2173,7 +2172,7 @@ func _Query_PacketCommitments_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/PacketCommitments",
+ FullMethod: "/ibc.core.channel.v1.Query/PacketCommitments",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).PacketCommitments(ctx, req.(*QueryPacketCommitmentsRequest))
@@ -2191,7 +2190,7 @@ func _Query_PacketReceipt_Handler(srv interface{}, ctx context.Context, dec func
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/PacketReceipt",
+ FullMethod: "/ibc.core.channel.v1.Query/PacketReceipt",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).PacketReceipt(ctx, req.(*QueryPacketReceiptRequest))
@@ -2209,7 +2208,7 @@ func _Query_PacketAcknowledgement_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/PacketAcknowledgement",
+ FullMethod: "/ibc.core.channel.v1.Query/PacketAcknowledgement",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).PacketAcknowledgement(ctx, req.(*QueryPacketAcknowledgementRequest))
@@ -2227,7 +2226,7 @@ func _Query_PacketAcknowledgements_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/PacketAcknowledgements",
+ FullMethod: "/ibc.core.channel.v1.Query/PacketAcknowledgements",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).PacketAcknowledgements(ctx, req.(*QueryPacketAcknowledgementsRequest))
@@ -2245,7 +2244,7 @@ func _Query_UnreceivedPackets_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/UnreceivedPackets",
+ FullMethod: "/ibc.core.channel.v1.Query/UnreceivedPackets",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).UnreceivedPackets(ctx, req.(*QueryUnreceivedPacketsRequest))
@@ -2263,7 +2262,7 @@ func _Query_UnreceivedAcks_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/UnreceivedAcks",
+ FullMethod: "/ibc.core.channel.v1.Query/UnreceivedAcks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).UnreceivedAcks(ctx, req.(*QueryUnreceivedAcksRequest))
@@ -2281,7 +2280,7 @@ func _Query_NextSequenceReceive_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Query/NextSequenceReceive",
+ FullMethod: "/ibc.core.channel.v1.Query/NextSequenceReceive",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).NextSequenceReceive(ctx, req.(*QueryNextSequenceReceiveRequest))
@@ -2290,7 +2289,7 @@ func _Query_NextSequenceReceive_Handler(srv interface{}, ctx context.Context, de
}
var _Query_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.core.channel.v1.Query",
+ ServiceName: "ibc.core.channel.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -2347,7 +2346,7 @@ var _Query_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/core/channel/v1/query.proto",
+ Metadata: "ibc/core/channel/v1/query.proto",
}
func (m *QueryChannelRequest) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/04-channel/types/query.pb.gw.go b/modules/core/04-channel/types/query.pb.gw.go
index 58be2aca..9e59c03b 100644
--- a/modules/core/04-channel/types/query.pb.gw.go
+++ b/modules/core/04-channel/types/query.pb.gw.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: ibcgo/core/channel/v1/query.proto
+// source: ibc/core/channel/v1/query.proto
/*
Package types is a reverse proxy.
diff --git a/modules/core/04-channel/types/tx.pb.go b/modules/core/04-channel/types/tx.pb.go
index 70131bfe..00b9bc08 100644
--- a/modules/core/04-channel/types/tx.pb.go
+++ b/modules/core/04-channel/types/tx.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/channel/v1/tx.proto
+// source: ibc/core/channel/v1/tx.proto
package types
@@ -41,7 +41,7 @@ func (m *MsgChannelOpenInit) Reset() { *m = MsgChannelOpenInit{} }
func (m *MsgChannelOpenInit) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenInit) ProtoMessage() {}
func (*MsgChannelOpenInit) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{0}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{0}
}
func (m *MsgChannelOpenInit) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -78,7 +78,7 @@ func (m *MsgChannelOpenInitResponse) Reset() { *m = MsgChannelOpenInitRe
func (m *MsgChannelOpenInitResponse) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenInitResponse) ProtoMessage() {}
func (*MsgChannelOpenInitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{1}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{1}
}
func (m *MsgChannelOpenInitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -125,7 +125,7 @@ func (m *MsgChannelOpenTry) Reset() { *m = MsgChannelOpenTry{} }
func (m *MsgChannelOpenTry) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenTry) ProtoMessage() {}
func (*MsgChannelOpenTry) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{2}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{2}
}
func (m *MsgChannelOpenTry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -162,7 +162,7 @@ func (m *MsgChannelOpenTryResponse) Reset() { *m = MsgChannelOpenTryResp
func (m *MsgChannelOpenTryResponse) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenTryResponse) ProtoMessage() {}
func (*MsgChannelOpenTryResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{3}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{3}
}
func (m *MsgChannelOpenTryResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -207,7 +207,7 @@ func (m *MsgChannelOpenAck) Reset() { *m = MsgChannelOpenAck{} }
func (m *MsgChannelOpenAck) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenAck) ProtoMessage() {}
func (*MsgChannelOpenAck) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{4}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{4}
}
func (m *MsgChannelOpenAck) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -244,7 +244,7 @@ func (m *MsgChannelOpenAckResponse) Reset() { *m = MsgChannelOpenAckResp
func (m *MsgChannelOpenAckResponse) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenAckResponse) ProtoMessage() {}
func (*MsgChannelOpenAckResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{5}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{5}
}
func (m *MsgChannelOpenAckResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -287,7 +287,7 @@ func (m *MsgChannelOpenConfirm) Reset() { *m = MsgChannelOpenConfirm{} }
func (m *MsgChannelOpenConfirm) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenConfirm) ProtoMessage() {}
func (*MsgChannelOpenConfirm) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{6}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{6}
}
func (m *MsgChannelOpenConfirm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -325,7 +325,7 @@ func (m *MsgChannelOpenConfirmResponse) Reset() { *m = MsgChannelOpenCon
func (m *MsgChannelOpenConfirmResponse) String() string { return proto.CompactTextString(m) }
func (*MsgChannelOpenConfirmResponse) ProtoMessage() {}
func (*MsgChannelOpenConfirmResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{7}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{7}
}
func (m *MsgChannelOpenConfirmResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -366,7 +366,7 @@ func (m *MsgChannelCloseInit) Reset() { *m = MsgChannelCloseInit{} }
func (m *MsgChannelCloseInit) String() string { return proto.CompactTextString(m) }
func (*MsgChannelCloseInit) ProtoMessage() {}
func (*MsgChannelCloseInit) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{8}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{8}
}
func (m *MsgChannelCloseInit) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -403,7 +403,7 @@ func (m *MsgChannelCloseInitResponse) Reset() { *m = MsgChannelCloseInit
func (m *MsgChannelCloseInitResponse) String() string { return proto.CompactTextString(m) }
func (*MsgChannelCloseInitResponse) ProtoMessage() {}
func (*MsgChannelCloseInitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{9}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{9}
}
func (m *MsgChannelCloseInitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -446,7 +446,7 @@ func (m *MsgChannelCloseConfirm) Reset() { *m = MsgChannelCloseConfirm{}
func (m *MsgChannelCloseConfirm) String() string { return proto.CompactTextString(m) }
func (*MsgChannelCloseConfirm) ProtoMessage() {}
func (*MsgChannelCloseConfirm) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{10}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{10}
}
func (m *MsgChannelCloseConfirm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -484,7 +484,7 @@ func (m *MsgChannelCloseConfirmResponse) Reset() { *m = MsgChannelCloseC
func (m *MsgChannelCloseConfirmResponse) String() string { return proto.CompactTextString(m) }
func (*MsgChannelCloseConfirmResponse) ProtoMessage() {}
func (*MsgChannelCloseConfirmResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{11}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{11}
}
func (m *MsgChannelCloseConfirmResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -525,7 +525,7 @@ func (m *MsgRecvPacket) Reset() { *m = MsgRecvPacket{} }
func (m *MsgRecvPacket) String() string { return proto.CompactTextString(m) }
func (*MsgRecvPacket) ProtoMessage() {}
func (*MsgRecvPacket) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{12}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{12}
}
func (m *MsgRecvPacket) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -562,7 +562,7 @@ func (m *MsgRecvPacketResponse) Reset() { *m = MsgRecvPacketResponse{} }
func (m *MsgRecvPacketResponse) String() string { return proto.CompactTextString(m) }
func (*MsgRecvPacketResponse) ProtoMessage() {}
func (*MsgRecvPacketResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{13}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{13}
}
func (m *MsgRecvPacketResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -604,7 +604,7 @@ func (m *MsgTimeout) Reset() { *m = MsgTimeout{} }
func (m *MsgTimeout) String() string { return proto.CompactTextString(m) }
func (*MsgTimeout) ProtoMessage() {}
func (*MsgTimeout) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{14}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{14}
}
func (m *MsgTimeout) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -641,7 +641,7 @@ func (m *MsgTimeoutResponse) Reset() { *m = MsgTimeoutResponse{} }
func (m *MsgTimeoutResponse) String() string { return proto.CompactTextString(m) }
func (*MsgTimeoutResponse) ProtoMessage() {}
func (*MsgTimeoutResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{15}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{15}
}
func (m *MsgTimeoutResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -684,7 +684,7 @@ func (m *MsgTimeoutOnClose) Reset() { *m = MsgTimeoutOnClose{} }
func (m *MsgTimeoutOnClose) String() string { return proto.CompactTextString(m) }
func (*MsgTimeoutOnClose) ProtoMessage() {}
func (*MsgTimeoutOnClose) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{16}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{16}
}
func (m *MsgTimeoutOnClose) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -721,7 +721,7 @@ func (m *MsgTimeoutOnCloseResponse) Reset() { *m = MsgTimeoutOnCloseResp
func (m *MsgTimeoutOnCloseResponse) String() string { return proto.CompactTextString(m) }
func (*MsgTimeoutOnCloseResponse) ProtoMessage() {}
func (*MsgTimeoutOnCloseResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{17}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{17}
}
func (m *MsgTimeoutOnCloseResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -763,7 +763,7 @@ func (m *MsgAcknowledgement) Reset() { *m = MsgAcknowledgement{} }
func (m *MsgAcknowledgement) String() string { return proto.CompactTextString(m) }
func (*MsgAcknowledgement) ProtoMessage() {}
func (*MsgAcknowledgement) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{18}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{18}
}
func (m *MsgAcknowledgement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -800,7 +800,7 @@ func (m *MsgAcknowledgementResponse) Reset() { *m = MsgAcknowledgementRe
func (m *MsgAcknowledgementResponse) String() string { return proto.CompactTextString(m) }
func (*MsgAcknowledgementResponse) ProtoMessage() {}
func (*MsgAcknowledgementResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_4f707a6c6f551009, []int{19}
+ return fileDescriptor_bc4637e0ac3fc7b7, []int{19}
}
func (m *MsgAcknowledgementResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -830,103 +830,103 @@ func (m *MsgAcknowledgementResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_MsgAcknowledgementResponse proto.InternalMessageInfo
func init() {
- proto.RegisterType((*MsgChannelOpenInit)(nil), "ibcgo.core.channel.v1.MsgChannelOpenInit")
- proto.RegisterType((*MsgChannelOpenInitResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenInitResponse")
- proto.RegisterType((*MsgChannelOpenTry)(nil), "ibcgo.core.channel.v1.MsgChannelOpenTry")
- proto.RegisterType((*MsgChannelOpenTryResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenTryResponse")
- proto.RegisterType((*MsgChannelOpenAck)(nil), "ibcgo.core.channel.v1.MsgChannelOpenAck")
- proto.RegisterType((*MsgChannelOpenAckResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenAckResponse")
- proto.RegisterType((*MsgChannelOpenConfirm)(nil), "ibcgo.core.channel.v1.MsgChannelOpenConfirm")
- proto.RegisterType((*MsgChannelOpenConfirmResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenConfirmResponse")
- proto.RegisterType((*MsgChannelCloseInit)(nil), "ibcgo.core.channel.v1.MsgChannelCloseInit")
- proto.RegisterType((*MsgChannelCloseInitResponse)(nil), "ibcgo.core.channel.v1.MsgChannelCloseInitResponse")
- proto.RegisterType((*MsgChannelCloseConfirm)(nil), "ibcgo.core.channel.v1.MsgChannelCloseConfirm")
- proto.RegisterType((*MsgChannelCloseConfirmResponse)(nil), "ibcgo.core.channel.v1.MsgChannelCloseConfirmResponse")
- proto.RegisterType((*MsgRecvPacket)(nil), "ibcgo.core.channel.v1.MsgRecvPacket")
- proto.RegisterType((*MsgRecvPacketResponse)(nil), "ibcgo.core.channel.v1.MsgRecvPacketResponse")
- proto.RegisterType((*MsgTimeout)(nil), "ibcgo.core.channel.v1.MsgTimeout")
- proto.RegisterType((*MsgTimeoutResponse)(nil), "ibcgo.core.channel.v1.MsgTimeoutResponse")
- proto.RegisterType((*MsgTimeoutOnClose)(nil), "ibcgo.core.channel.v1.MsgTimeoutOnClose")
- proto.RegisterType((*MsgTimeoutOnCloseResponse)(nil), "ibcgo.core.channel.v1.MsgTimeoutOnCloseResponse")
- proto.RegisterType((*MsgAcknowledgement)(nil), "ibcgo.core.channel.v1.MsgAcknowledgement")
- proto.RegisterType((*MsgAcknowledgementResponse)(nil), "ibcgo.core.channel.v1.MsgAcknowledgementResponse")
-}
-
-func init() { proto.RegisterFile("ibcgo/core/channel/v1/tx.proto", fileDescriptor_4f707a6c6f551009) }
-
-var fileDescriptor_4f707a6c6f551009 = []byte{
- // 1134 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6e, 0xe3, 0x54,
- 0x14, 0xce, 0x5f, 0xd3, 0xf6, 0xb4, 0x4c, 0x5b, 0xa7, 0x3f, 0x19, 0x67, 0x6a, 0x77, 0x0c, 0x8b,
- 0x0c, 0x4c, 0x93, 0x49, 0x29, 0x20, 0x0d, 0x12, 0x52, 0x52, 0x09, 0x31, 0x42, 0x65, 0x46, 0xa6,
- 0x80, 0x34, 0x42, 0x0a, 0xe9, 0xcd, 0x1d, 0xd7, 0x4a, 0xe2, 0x1b, 0x6c, 0x27, 0x34, 0xe2, 0x05,
- 0x58, 0xb2, 0x60, 0xc5, 0x02, 0x8d, 0xc4, 0x9a, 0x05, 0x12, 0x0f, 0x31, 0xcb, 0xd9, 0xf1, 0xb3,
- 0xb0, 0x50, 0xbb, 0x61, 0xed, 0x27, 0x40, 0xbe, 0xbe, 0x76, 0x9c, 0xc4, 0x6e, 0x9d, 0x0e, 0x29,
- 0xdd, 0xd9, 0xe7, 0x7c, 0xf7, 0x9c, 0x73, 0xbf, 0xef, 0xf8, 0xf8, 0xda, 0x20, 0xa8, 0xc7, 0x48,
- 0x21, 0x65, 0x44, 0x74, 0x5c, 0x46, 0x27, 0x0d, 0x4d, 0xc3, 0xed, 0x72, 0xbf, 0x52, 0x36, 0x4f,
- 0x4b, 0x5d, 0x9d, 0x98, 0x84, 0xdb, 0xa0, 0xfe, 0x92, 0xe3, 0x2f, 0x31, 0x7f, 0xa9, 0x5f, 0xe1,
- 0xd7, 0x15, 0xa2, 0x10, 0x8a, 0x28, 0x3b, 0x57, 0x2e, 0x98, 0xbf, 0x1b, 0x0c, 0xd6, 0x56, 0xb1,
- 0x66, 0x3a, 0xb1, 0xdc, 0x2b, 0x06, 0x79, 0x3d, 0x3c, 0x9f, 0x17, 0x9a, 0x82, 0xa4, 0x9f, 0x93,
- 0xc0, 0x1d, 0x1a, 0xca, 0x81, 0x6b, 0x7c, 0xdc, 0xc5, 0xda, 0x23, 0x4d, 0x35, 0xb9, 0xb7, 0x60,
- 0xbe, 0x4b, 0x74, 0xb3, 0xae, 0x36, 0xf3, 0xc9, 0x9d, 0x64, 0x71, 0xb1, 0xc6, 0xd9, 0x96, 0x78,
- 0x6b, 0xd0, 0xe8, 0xb4, 0x1f, 0x4a, 0xcc, 0x21, 0xc9, 0x59, 0xe7, 0xea, 0x51, 0x93, 0xfb, 0x00,
- 0xe6, 0x59, 0xd0, 0x7c, 0x6a, 0x27, 0x59, 0x5c, 0xda, 0x13, 0x4a, 0xa1, 0x5b, 0x29, 0xb1, 0x2c,
- 0xb5, 0xcc, 0x0b, 0x4b, 0x4c, 0xc8, 0xde, 0x22, 0x6e, 0x13, 0xb2, 0x86, 0xaa, 0x68, 0x58, 0xcf,
- 0xa7, 0x9d, 0x5c, 0x32, 0xbb, 0x7b, 0xb8, 0xf0, 0xdd, 0x73, 0x31, 0xf1, 0xcf, 0x73, 0x31, 0x21,
- 0xdd, 0x01, 0x7e, 0xb2, 0x48, 0x19, 0x1b, 0x5d, 0xa2, 0x19, 0x58, 0xfa, 0x2b, 0x0d, 0x6b, 0xa3,
- 0xee, 0x23, 0x7d, 0x30, 0xdd, 0x16, 0x3e, 0x81, 0x5c, 0x57, 0xc7, 0x7d, 0x95, 0xf4, 0x8c, 0x3a,
- 0x2b, 0xcb, 0x59, 0x98, 0xa2, 0x0b, 0x05, 0xdb, 0x12, 0x79, 0xb6, 0x70, 0x12, 0x24, 0xc9, 0x6b,
- 0x9e, 0x95, 0x55, 0x30, 0x4a, 0x49, 0xfa, 0x2a, 0x94, 0xc8, 0xb0, 0x8e, 0x48, 0x4f, 0x33, 0xb1,
- 0xde, 0x6d, 0xe8, 0xe6, 0xa0, 0xde, 0xc7, 0xba, 0xa1, 0x12, 0x2d, 0x9f, 0xa1, 0x05, 0x89, 0xb6,
- 0x25, 0x16, 0xdc, 0x82, 0xc2, 0x50, 0x92, 0x9c, 0x0b, 0x9a, 0x3f, 0x77, 0xad, 0xdc, 0x3e, 0x40,
- 0x57, 0x27, 0xe4, 0x59, 0x5d, 0xd5, 0x54, 0x33, 0x3f, 0xb7, 0x93, 0x2c, 0x2e, 0xd7, 0x36, 0x6c,
- 0x4b, 0x5c, 0xf3, 0xb6, 0xe6, 0xf9, 0x24, 0x79, 0x91, 0xde, 0xd0, 0x4e, 0xf8, 0x12, 0x96, 0x5d,
- 0xcf, 0x09, 0x56, 0x95, 0x13, 0x33, 0x9f, 0xa5, 0xdb, 0xb9, 0x33, 0xb2, 0x1d, 0xb7, 0xeb, 0xfa,
- 0x95, 0xd2, 0x47, 0x14, 0x53, 0x2b, 0x38, 0x9b, 0xb1, 0x2d, 0x31, 0x17, 0x8c, 0xec, 0xae, 0x97,
- 0xe4, 0x25, 0x7a, 0xeb, 0x22, 0x03, 0xd2, 0xcf, 0x47, 0x48, 0x5f, 0x80, 0xdb, 0x13, 0xda, 0xfa,
- 0xca, 0xff, 0x39, 0xa1, 0x7c, 0x15, 0xb5, 0xa6, 0x53, 0x7e, 0x1f, 0x60, 0x42, 0xf0, 0x00, 0x2b,
- 0x41, 0x9d, 0x17, 0x91, 0xaf, 0xef, 0x53, 0xd8, 0x1a, 0x61, 0x3e, 0x10, 0x82, 0xf6, 0x70, 0x4d,
- 0xb2, 0x2d, 0x51, 0x08, 0x91, 0x28, 0x18, 0x6f, 0x23, 0xe8, 0x19, 0xf6, 0xce, 0x2c, 0xb4, 0xaf,
- 0x80, 0x2b, 0x69, 0xdd, 0xd4, 0x07, 0x4c, 0xfa, 0x75, 0xdb, 0x12, 0x57, 0x83, 0x02, 0x99, 0xfa,
- 0x40, 0x92, 0x17, 0xe8, 0xb5, 0xf3, 0xfc, 0xdc, 0x38, 0xe1, 0xab, 0xa8, 0xe5, 0x0b, 0xff, 0x4b,
- 0x0a, 0x36, 0x46, 0xbd, 0x07, 0x44, 0x7b, 0xa6, 0xea, 0x9d, 0xeb, 0x10, 0xdf, 0x27, 0xb3, 0x81,
- 0x5a, 0x54, 0xee, 0x10, 0x32, 0x1b, 0xa8, 0xe5, 0x91, 0xe9, 0xb4, 0xe4, 0x38, 0x99, 0x99, 0x19,
- 0x91, 0x39, 0x17, 0x41, 0xa6, 0x08, 0xdb, 0xa1, 0x74, 0xf9, 0x84, 0xfe, 0x98, 0x84, 0xdc, 0x10,
- 0x71, 0xd0, 0x26, 0x06, 0x9e, 0xfe, 0x45, 0x70, 0x35, 0x3a, 0x2f, 0x1f, 0xff, 0xdb, 0x50, 0x08,
- 0xa9, 0xcd, 0xaf, 0xfd, 0xd7, 0x14, 0x6c, 0x8e, 0xf9, 0xaf, 0xb1, 0x1b, 0x46, 0xc7, 0x6a, 0xfa,
- 0x8a, 0x63, 0xf5, 0xba, 0x1b, 0x62, 0x07, 0x84, 0x70, 0xca, 0x7c, 0x56, 0x7f, 0x48, 0xc1, 0x6b,
- 0x87, 0x86, 0x22, 0x63, 0xd4, 0x7f, 0xd2, 0x40, 0x2d, 0x6c, 0x72, 0xef, 0x43, 0xb6, 0x4b, 0xaf,
- 0x28, 0x97, 0x4b, 0x7b, 0xdb, 0x11, 0xef, 0x34, 0x17, 0xce, 0x5e, 0x69, 0x6c, 0x09, 0xf7, 0x21,
- 0xac, 0xba, 0x05, 0x23, 0xd2, 0xe9, 0xa8, 0x66, 0x07, 0x6b, 0x26, 0xa5, 0x78, 0xb9, 0x56, 0xb0,
- 0x2d, 0x71, 0x2b, 0xb8, 0xa5, 0x21, 0x42, 0x92, 0x57, 0xa8, 0xe9, 0xc0, 0xb7, 0x4c, 0x10, 0x97,
- 0x9e, 0x11, 0x71, 0x99, 0x08, 0xe2, 0xb6, 0xe8, 0xe0, 0x19, 0xb2, 0xe2, 0xf3, 0x65, 0xa5, 0x00,
- 0x0e, 0x0d, 0xe5, 0x48, 0xed, 0x60, 0xd2, 0xfb, 0xaf, 0xc8, 0xea, 0x69, 0x3a, 0x46, 0x58, 0xed,
- 0xe3, 0x66, 0x14, 0x59, 0x43, 0x84, 0x47, 0xd6, 0x67, 0xbe, 0x65, 0xc6, 0x64, 0x7d, 0x0c, 0x9c,
- 0x86, 0x4f, 0xcd, 0xba, 0x81, 0xbf, 0xee, 0x61, 0x0d, 0xe1, 0xba, 0x8e, 0x51, 0x9f, 0x12, 0x97,
- 0xa9, 0x6d, 0xdb, 0x96, 0x78, 0xdb, 0x8d, 0x30, 0x89, 0x91, 0xe4, 0x55, 0xc7, 0xf8, 0x29, 0xb3,
- 0x39, 0x64, 0xc6, 0x68, 0xd9, 0x75, 0x7a, 0x52, 0x65, 0xfc, 0xfa, 0xb4, 0xff, 0xe4, 0x1e, 0x01,
- 0x98, 0xf9, 0xb1, 0x46, 0x7b, 0xf9, 0x66, 0xb0, 0xff, 0x1e, 0x2c, 0xb1, 0x86, 0x76, 0x6a, 0x62,
- 0xa3, 0x61, 0xd3, 0xb6, 0x44, 0x6e, 0xa4, 0xdb, 0x1d, 0xa7, 0x24, 0xbb, 0x43, 0xc4, 0xad, 0x7e,
- 0xb6, 0xc3, 0x21, 0x5c, 0xb6, 0xb9, 0x57, 0x95, 0x2d, 0x7b, 0xe1, 0x7b, 0x7c, 0x54, 0x1f, 0x5f,
- 0xbd, 0xdf, 0x52, 0x54, 0xd4, 0x2a, 0x6a, 0x69, 0xe4, 0x9b, 0x36, 0x6e, 0x2a, 0x98, 0x3e, 0xe4,
- 0xaf, 0x24, 0x5f, 0x11, 0x56, 0x1a, 0xa3, 0xf1, 0x5c, 0xf5, 0xe4, 0x71, 0xf3, 0x50, 0x20, 0x67,
- 0x61, 0x33, 0x4a, 0x20, 0xea, 0xf4, 0x04, 0xaa, 0x3a, 0x37, 0xff, 0xfb, 0xf4, 0x76, 0xbf, 0x87,
- 0xc6, 0x58, 0xf3, 0x48, 0xdd, 0xfb, 0x7d, 0x01, 0xd2, 0x87, 0x86, 0xc2, 0x11, 0x58, 0x19, 0xff,
- 0xae, 0xbb, 0x17, 0x41, 0xe4, 0xe4, 0xd7, 0x15, 0x5f, 0x89, 0x0d, 0xf5, 0x12, 0x73, 0x6d, 0xb8,
- 0x35, 0xf6, 0x11, 0x56, 0x8c, 0x15, 0xe4, 0x48, 0x1f, 0xf0, 0x0f, 0xe2, 0x22, 0x23, 0xb2, 0x39,
- 0xa7, 0xac, 0x78, 0xd9, 0xaa, 0xa8, 0x15, 0x33, 0x5b, 0xe0, 0xc4, 0xc9, 0x9d, 0x02, 0x17, 0x72,
- 0xda, 0xbc, 0x1f, 0x2b, 0x0e, 0x43, 0xf3, 0xfb, 0xd3, 0xa0, 0xfd, 0xcc, 0x3a, 0xac, 0x4e, 0x1c,
- 0xcb, 0xde, 0xbc, 0x34, 0x92, 0x8f, 0xe5, 0xf7, 0xe2, 0x63, 0xfd, 0x9c, 0xdf, 0x42, 0x2e, 0xec,
- 0x38, 0xb5, 0x1b, 0x2f, 0x94, 0xb7, 0xdf, 0x77, 0xa6, 0x82, 0xfb, 0xc9, 0xbf, 0x02, 0x08, 0x9c,
- 0x3a, 0xde, 0x88, 0x0e, 0x32, 0x44, 0xf1, 0xf7, 0xe3, 0xa0, 0xfc, 0x0c, 0x5f, 0xc0, 0xbc, 0xf7,
- 0x9e, 0xbe, 0x1b, 0xbd, 0x90, 0x41, 0xf8, 0x7b, 0x97, 0x42, 0x82, 0x3d, 0x39, 0xf6, 0x26, 0x2a,
- 0x5e, 0xba, 0x98, 0x21, 0x2f, 0xea, 0xc9, 0xf0, 0xe9, 0xe9, 0x3c, 0xe0, 0xe3, 0x93, 0xf3, 0x82,
- 0x5a, 0xc7, 0xa0, 0x17, 0x3d, 0xe0, 0x11, 0x93, 0xa5, 0xf6, 0xe4, 0xc5, 0x99, 0x90, 0x7c, 0x79,
- 0x26, 0x24, 0xff, 0x3e, 0x13, 0x92, 0xdf, 0x9f, 0x0b, 0x89, 0x97, 0xe7, 0x42, 0xe2, 0x8f, 0x73,
- 0x21, 0xf1, 0xf4, 0x5d, 0x45, 0x35, 0x4f, 0x7a, 0xc7, 0x25, 0x44, 0x3a, 0x65, 0x44, 0x8c, 0x0e,
- 0x31, 0xca, 0xea, 0x31, 0xda, 0x55, 0x48, 0xb9, 0x43, 0x9a, 0xbd, 0x36, 0x36, 0xdc, 0xff, 0x50,
- 0x0f, 0xf6, 0x77, 0xbd, 0x5f, 0x51, 0xe6, 0xa0, 0x8b, 0x8d, 0xe3, 0x2c, 0xfd, 0x0d, 0xf5, 0xf6,
- 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xb3, 0x23, 0x46, 0x1d, 0x13, 0x00, 0x00,
+ proto.RegisterType((*MsgChannelOpenInit)(nil), "ibc.core.channel.v1.MsgChannelOpenInit")
+ proto.RegisterType((*MsgChannelOpenInitResponse)(nil), "ibc.core.channel.v1.MsgChannelOpenInitResponse")
+ proto.RegisterType((*MsgChannelOpenTry)(nil), "ibc.core.channel.v1.MsgChannelOpenTry")
+ proto.RegisterType((*MsgChannelOpenTryResponse)(nil), "ibc.core.channel.v1.MsgChannelOpenTryResponse")
+ proto.RegisterType((*MsgChannelOpenAck)(nil), "ibc.core.channel.v1.MsgChannelOpenAck")
+ proto.RegisterType((*MsgChannelOpenAckResponse)(nil), "ibc.core.channel.v1.MsgChannelOpenAckResponse")
+ proto.RegisterType((*MsgChannelOpenConfirm)(nil), "ibc.core.channel.v1.MsgChannelOpenConfirm")
+ proto.RegisterType((*MsgChannelOpenConfirmResponse)(nil), "ibc.core.channel.v1.MsgChannelOpenConfirmResponse")
+ proto.RegisterType((*MsgChannelCloseInit)(nil), "ibc.core.channel.v1.MsgChannelCloseInit")
+ proto.RegisterType((*MsgChannelCloseInitResponse)(nil), "ibc.core.channel.v1.MsgChannelCloseInitResponse")
+ proto.RegisterType((*MsgChannelCloseConfirm)(nil), "ibc.core.channel.v1.MsgChannelCloseConfirm")
+ proto.RegisterType((*MsgChannelCloseConfirmResponse)(nil), "ibc.core.channel.v1.MsgChannelCloseConfirmResponse")
+ proto.RegisterType((*MsgRecvPacket)(nil), "ibc.core.channel.v1.MsgRecvPacket")
+ proto.RegisterType((*MsgRecvPacketResponse)(nil), "ibc.core.channel.v1.MsgRecvPacketResponse")
+ proto.RegisterType((*MsgTimeout)(nil), "ibc.core.channel.v1.MsgTimeout")
+ proto.RegisterType((*MsgTimeoutResponse)(nil), "ibc.core.channel.v1.MsgTimeoutResponse")
+ proto.RegisterType((*MsgTimeoutOnClose)(nil), "ibc.core.channel.v1.MsgTimeoutOnClose")
+ proto.RegisterType((*MsgTimeoutOnCloseResponse)(nil), "ibc.core.channel.v1.MsgTimeoutOnCloseResponse")
+ proto.RegisterType((*MsgAcknowledgement)(nil), "ibc.core.channel.v1.MsgAcknowledgement")
+ proto.RegisterType((*MsgAcknowledgementResponse)(nil), "ibc.core.channel.v1.MsgAcknowledgementResponse")
+}
+
+func init() { proto.RegisterFile("ibc/core/channel/v1/tx.proto", fileDescriptor_bc4637e0ac3fc7b7) }
+
+var fileDescriptor_bc4637e0ac3fc7b7 = []byte{
+ // 1124 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6e, 0xdb, 0x46,
+ 0x10, 0xd6, 0x8f, 0x23, 0xdb, 0x63, 0x37, 0xb6, 0x29, 0xff, 0x28, 0x94, 0x2d, 0xba, 0x3c, 0x24,
+ 0x42, 0x8a, 0x48, 0xb1, 0x63, 0xb4, 0x68, 0xd0, 0x8b, 0x64, 0xa0, 0x68, 0x50, 0xb8, 0x09, 0x18,
+ 0xb7, 0x07, 0xa3, 0x80, 0x20, 0xad, 0x36, 0x14, 0x21, 0x89, 0xab, 0x92, 0x94, 0x12, 0xbd, 0x41,
+ 0x8f, 0x39, 0xf7, 0x94, 0x9e, 0x7b, 0x48, 0x1f, 0x23, 0xc7, 0x9c, 0xda, 0xa2, 0x07, 0xa2, 0xb0,
+ 0x2f, 0x3d, 0xf3, 0x09, 0x0a, 0xee, 0x2e, 0x29, 0x4a, 0x22, 0x2b, 0x2a, 0xa9, 0xdc, 0xdc, 0x96,
+ 0x33, 0xdf, 0xce, 0xce, 0x7e, 0xdf, 0x70, 0x76, 0x49, 0xd8, 0xd7, 0x1a, 0xa8, 0x8c, 0x88, 0x81,
+ 0xcb, 0xa8, 0x55, 0xd7, 0x75, 0xdc, 0x29, 0x0f, 0x8e, 0xca, 0xd6, 0x8b, 0x52, 0xcf, 0x20, 0x16,
+ 0x11, 0xb2, 0x5a, 0x03, 0x95, 0x5c, 0x6f, 0x89, 0x7b, 0x4b, 0x83, 0x23, 0x71, 0x5b, 0x25, 0x2a,
+ 0xa1, 0xfe, 0xb2, 0x3b, 0x62, 0x50, 0x51, 0x1a, 0x05, 0xea, 0x68, 0x58, 0xb7, 0xdc, 0x38, 0x6c,
+ 0xc4, 0x01, 0x1f, 0x87, 0xad, 0xe4, 0x85, 0xa5, 0x10, 0xf9, 0xe7, 0x24, 0x08, 0x67, 0xa6, 0x7a,
+ 0xca, 0x8c, 0x8f, 0x7b, 0x58, 0x7f, 0xa4, 0x6b, 0x96, 0xf0, 0x09, 0x2c, 0xf7, 0x88, 0x61, 0xd5,
+ 0xb4, 0x66, 0x2e, 0x79, 0x98, 0x2c, 0xae, 0x56, 0x05, 0xc7, 0x96, 0x6e, 0x0e, 0xeb, 0xdd, 0xce,
+ 0x43, 0x99, 0x3b, 0x64, 0x25, 0xe3, 0x8e, 0x1e, 0x35, 0x85, 0x2f, 0x60, 0x99, 0x07, 0xcd, 0xa5,
+ 0x0e, 0x93, 0xc5, 0xb5, 0xe3, 0xfd, 0x52, 0xc8, 0x26, 0x4a, 0x7c, 0x8d, 0xea, 0xd2, 0x1b, 0x5b,
+ 0x4a, 0x28, 0xde, 0x14, 0x61, 0x17, 0x32, 0xa6, 0xa6, 0xea, 0xd8, 0xc8, 0xa5, 0xdd, 0x95, 0x14,
+ 0xfe, 0xf4, 0x70, 0xe5, 0xc7, 0x57, 0x52, 0xe2, 0xef, 0x57, 0x52, 0x42, 0xde, 0x07, 0x71, 0x3a,
+ 0x45, 0x05, 0x9b, 0x3d, 0xa2, 0x9b, 0x58, 0xfe, 0x2d, 0x0d, 0x5b, 0xe3, 0xee, 0x73, 0x63, 0x38,
+ 0xdf, 0x06, 0xbe, 0x81, 0x6c, 0xcf, 0xc0, 0x03, 0x8d, 0xf4, 0xcd, 0x1a, 0x4f, 0xcb, 0x9d, 0x98,
+ 0xa2, 0x13, 0x0b, 0x8e, 0x2d, 0x89, 0x7c, 0xe2, 0x34, 0x48, 0x56, 0xb6, 0x3c, 0x2b, 0xcf, 0x60,
+ 0x9c, 0x90, 0xf4, 0xfc, 0x84, 0x28, 0xb0, 0x8d, 0x48, 0x5f, 0xb7, 0xb0, 0xd1, 0xab, 0x1b, 0xd6,
+ 0xb0, 0x36, 0xc0, 0x86, 0xa9, 0x11, 0x3d, 0xb7, 0x44, 0xd3, 0x91, 0x1c, 0x5b, 0xca, 0xb3, 0x74,
+ 0xc2, 0x50, 0xb2, 0x92, 0x0d, 0x9a, 0xbf, 0x63, 0x56, 0xe1, 0x04, 0xa0, 0x67, 0x10, 0xf2, 0xac,
+ 0xa6, 0xe9, 0x9a, 0x95, 0xbb, 0x71, 0x98, 0x2c, 0xae, 0x57, 0x77, 0x1c, 0x5b, 0xda, 0xf2, 0x36,
+ 0xe6, 0xf9, 0x64, 0x65, 0x95, 0x3e, 0xd0, 0x2a, 0xb8, 0x80, 0x75, 0xe6, 0x69, 0x61, 0x4d, 0x6d,
+ 0x59, 0xb9, 0x0c, 0xdd, 0x8c, 0x18, 0xd8, 0x0c, 0xab, 0xb6, 0xc1, 0x51, 0xe9, 0x2b, 0x8a, 0xa8,
+ 0xe6, 0xdd, 0xad, 0x38, 0xb6, 0x94, 0x0d, 0xc6, 0x65, 0xb3, 0x65, 0x65, 0x8d, 0x3e, 0x32, 0x64,
+ 0x40, 0xf6, 0xe5, 0x08, 0xd9, 0xf3, 0x70, 0x6b, 0x4a, 0x57, 0x5f, 0xf5, 0xdf, 0xa7, 0x54, 0xaf,
+ 0xa0, 0xf6, 0x7c, 0xaa, 0x9f, 0x00, 0x4c, 0x89, 0x1d, 0xe0, 0x24, 0xa8, 0xf1, 0x2a, 0xf2, 0xb5,
+ 0xbd, 0x80, 0xbd, 0x31, 0xde, 0x03, 0x21, 0x68, 0xfd, 0x56, 0x65, 0xc7, 0x96, 0x0a, 0x21, 0x02,
+ 0x05, 0xe3, 0xed, 0x04, 0x3d, 0xa3, 0xba, 0x59, 0x84, 0xf2, 0x47, 0xc0, 0x04, 0xad, 0x59, 0xc6,
+ 0x90, 0x0b, 0xbf, 0xed, 0xd8, 0xd2, 0x66, 0x50, 0x20, 0xcb, 0x18, 0xca, 0xca, 0x0a, 0x1d, 0xbb,
+ 0xef, 0xce, 0x07, 0x26, 0x7b, 0x05, 0xb5, 0x7d, 0xd9, 0x7f, 0x49, 0xc1, 0xce, 0xb8, 0xf7, 0x94,
+ 0xe8, 0xcf, 0x34, 0xa3, 0x7b, 0x1d, 0xd2, 0xfb, 0x54, 0xd6, 0x51, 0x9b, 0x8a, 0x1d, 0x42, 0x65,
+ 0x1d, 0xb5, 0x3d, 0x2a, 0xdd, 0x82, 0x9c, 0xa4, 0x72, 0x69, 0x21, 0x54, 0xde, 0x88, 0xa0, 0x52,
+ 0x82, 0x83, 0x50, 0xb2, 0x7c, 0x3a, 0x7f, 0x4a, 0x42, 0x76, 0x84, 0x38, 0xed, 0x10, 0x13, 0xcf,
+ 0xdf, 0xfe, 0xdf, 0x8d, 0xcc, 0xd9, 0x6d, 0xff, 0x00, 0xf2, 0x21, 0xb9, 0xf9, 0xb9, 0xbf, 0x4e,
+ 0xc1, 0xee, 0x84, 0xff, 0x1a, 0x6b, 0x61, 0xbc, 0xa1, 0xa6, 0xdf, 0xb1, 0xa1, 0x5e, 0x6f, 0x39,
+ 0x1c, 0x42, 0x21, 0x9c, 0x30, 0x9f, 0xd3, 0x97, 0x29, 0xf8, 0xe8, 0xcc, 0x54, 0x15, 0x8c, 0x06,
+ 0x4f, 0xea, 0xa8, 0x8d, 0x2d, 0xe1, 0x73, 0xc8, 0xf4, 0xe8, 0x88, 0x32, 0xb9, 0x76, 0x9c, 0x0f,
+ 0x3d, 0xc9, 0x18, 0x98, 0x1f, 0x64, 0x7c, 0x82, 0xf0, 0x25, 0x6c, 0xb2, 0x74, 0x11, 0xe9, 0x76,
+ 0x35, 0xab, 0x8b, 0x75, 0x8b, 0xd2, 0xbb, 0x5e, 0xcd, 0x3b, 0xb6, 0xb4, 0x17, 0xdc, 0xd0, 0x08,
+ 0x21, 0x2b, 0x1b, 0xd4, 0x74, 0xea, 0x5b, 0xa6, 0x48, 0x4b, 0x2f, 0x84, 0xb4, 0xa5, 0x08, 0xd2,
+ 0xf6, 0x68, 0xc3, 0x19, 0x31, 0xe2, 0x73, 0xf5, 0x67, 0x0a, 0xe0, 0xcc, 0x54, 0xcf, 0xb5, 0x2e,
+ 0x26, 0xfd, 0xff, 0x86, 0xa8, 0xbe, 0x6e, 0x60, 0x84, 0xb5, 0x01, 0x6e, 0x46, 0x11, 0x35, 0x42,
+ 0x78, 0x44, 0x7d, 0xeb, 0x5b, 0x16, 0x4a, 0xd4, 0xd7, 0x20, 0xe8, 0xf8, 0x85, 0x55, 0x33, 0xf1,
+ 0x0f, 0x7d, 0xac, 0x23, 0x5c, 0x33, 0x30, 0x1a, 0x50, 0xd2, 0x96, 0xaa, 0x07, 0x8e, 0x2d, 0xdd,
+ 0x62, 0x11, 0xa6, 0x31, 0xb2, 0xb2, 0xe9, 0x1a, 0x9f, 0x72, 0x9b, 0x4b, 0x64, 0x8c, 0x52, 0xdd,
+ 0xa6, 0xb7, 0x52, 0xce, 0xed, 0xa8, 0x5d, 0xb1, 0x43, 0x9f, 0x9b, 0x1f, 0xeb, 0xb4, 0x86, 0x3f,
+ 0x04, 0xe6, 0x3f, 0x83, 0x35, 0x5e, 0xc8, 0x6e, 0x46, 0xbc, 0x1d, 0xec, 0x3a, 0xb6, 0x24, 0x8c,
+ 0x55, 0xb9, 0xeb, 0x94, 0x15, 0xd6, 0x38, 0x58, 0xee, 0x8b, 0x6c, 0x08, 0xe1, 0x92, 0xdd, 0x78,
+ 0x5f, 0xc9, 0x32, 0xff, 0x7a, 0x6e, 0x8f, 0x6b, 0xe3, 0x2b, 0xf7, 0x6b, 0x8a, 0x0a, 0x5a, 0x41,
+ 0x6d, 0x9d, 0x3c, 0xef, 0xe0, 0xa6, 0x8a, 0xe9, 0xab, 0xfd, 0x1e, 0xd2, 0x15, 0x61, 0xa3, 0x3e,
+ 0x1e, 0x8d, 0x29, 0xa7, 0x4c, 0x9a, 0x47, 0xe2, 0xb8, 0x13, 0x9b, 0x51, 0xe2, 0x50, 0xa7, 0x27,
+ 0x4e, 0xc5, 0x7d, 0xf8, 0x9f, 0xbb, 0x35, 0xfb, 0xea, 0x99, 0x60, 0xcc, 0x23, 0xf4, 0xf8, 0xf5,
+ 0x0a, 0xa4, 0xcf, 0x4c, 0x55, 0x68, 0xc3, 0xc6, 0xe4, 0xb7, 0xdb, 0x9d, 0x50, 0x12, 0xa7, 0xbf,
+ 0xa0, 0xc4, 0x72, 0x4c, 0xa0, 0xb7, 0xa8, 0xd0, 0x82, 0x9b, 0x13, 0x9f, 0x59, 0xb7, 0x63, 0x84,
+ 0x38, 0x37, 0x86, 0x62, 0x29, 0x1e, 0x2e, 0x62, 0x25, 0xf7, 0x26, 0x15, 0x67, 0xa5, 0x0a, 0x6a,
+ 0xc7, 0x5a, 0x29, 0x70, 0xa3, 0x14, 0x2c, 0x10, 0x42, 0x6e, 0x93, 0x77, 0x63, 0x44, 0xe1, 0x58,
+ 0xf1, 0x38, 0x3e, 0xd6, 0x5f, 0x55, 0x87, 0xcd, 0xa9, 0x4b, 0x57, 0x71, 0x46, 0x1c, 0x1f, 0x29,
+ 0xde, 0x8f, 0x8b, 0xf4, 0xd7, 0x7b, 0x0e, 0xd9, 0xd0, 0x8b, 0x52, 0x9c, 0x40, 0xde, 0x3e, 0x1f,
+ 0xcc, 0x01, 0xf6, 0x17, 0xfe, 0x1e, 0x20, 0x70, 0x9b, 0x90, 0xa3, 0x42, 0x8c, 0x30, 0xe2, 0xdd,
+ 0xd9, 0x18, 0x3f, 0xfa, 0x53, 0x58, 0xf6, 0xce, 0x5f, 0x29, 0x6a, 0x1a, 0x07, 0x88, 0x77, 0x66,
+ 0x00, 0x82, 0xb5, 0x37, 0x71, 0xc2, 0xdc, 0x9e, 0x31, 0x95, 0xe3, 0xa2, 0x6b, 0x2f, 0xbc, 0x2b,
+ 0xba, 0x2f, 0xef, 0x64, 0x47, 0x8c, 0xcc, 0x72, 0x02, 0x18, 0xfd, 0xf2, 0x46, 0x74, 0x8c, 0xea,
+ 0x93, 0x37, 0x97, 0x85, 0xe4, 0xdb, 0xcb, 0x42, 0xf2, 0xaf, 0xcb, 0x42, 0xf2, 0xe5, 0x55, 0x21,
+ 0xf1, 0xf6, 0xaa, 0x90, 0xf8, 0xe3, 0xaa, 0x90, 0xb8, 0xf8, 0x54, 0xd5, 0xac, 0x56, 0xbf, 0x51,
+ 0x42, 0xa4, 0x5b, 0x46, 0xc4, 0xec, 0x12, 0xb3, 0xac, 0x35, 0xd0, 0x3d, 0x95, 0x94, 0xbb, 0xa4,
+ 0xd9, 0xef, 0x60, 0x93, 0xfd, 0x43, 0xba, 0x7f, 0x72, 0xcf, 0xfb, 0x8d, 0x64, 0x0d, 0x7b, 0xd8,
+ 0x6c, 0x64, 0xe8, 0x2f, 0xa4, 0x07, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x40, 0xf7, 0xc0, 0x94,
+ 0xd1, 0x12, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -974,7 +974,7 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient {
func (c *msgClient) ChannelOpenInit(ctx context.Context, in *MsgChannelOpenInit, opts ...grpc.CallOption) (*MsgChannelOpenInitResponse, error) {
out := new(MsgChannelOpenInitResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenInit", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/ChannelOpenInit", in, out, opts...)
if err != nil {
return nil, err
}
@@ -983,7 +983,7 @@ func (c *msgClient) ChannelOpenInit(ctx context.Context, in *MsgChannelOpenInit,
func (c *msgClient) ChannelOpenTry(ctx context.Context, in *MsgChannelOpenTry, opts ...grpc.CallOption) (*MsgChannelOpenTryResponse, error) {
out := new(MsgChannelOpenTryResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenTry", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/ChannelOpenTry", in, out, opts...)
if err != nil {
return nil, err
}
@@ -992,7 +992,7 @@ func (c *msgClient) ChannelOpenTry(ctx context.Context, in *MsgChannelOpenTry, o
func (c *msgClient) ChannelOpenAck(ctx context.Context, in *MsgChannelOpenAck, opts ...grpc.CallOption) (*MsgChannelOpenAckResponse, error) {
out := new(MsgChannelOpenAckResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenAck", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/ChannelOpenAck", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1001,7 +1001,7 @@ func (c *msgClient) ChannelOpenAck(ctx context.Context, in *MsgChannelOpenAck, o
func (c *msgClient) ChannelOpenConfirm(ctx context.Context, in *MsgChannelOpenConfirm, opts ...grpc.CallOption) (*MsgChannelOpenConfirmResponse, error) {
out := new(MsgChannelOpenConfirmResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenConfirm", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/ChannelOpenConfirm", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1010,7 +1010,7 @@ func (c *msgClient) ChannelOpenConfirm(ctx context.Context, in *MsgChannelOpenCo
func (c *msgClient) ChannelCloseInit(ctx context.Context, in *MsgChannelCloseInit, opts ...grpc.CallOption) (*MsgChannelCloseInitResponse, error) {
out := new(MsgChannelCloseInitResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelCloseInit", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/ChannelCloseInit", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1019,7 +1019,7 @@ func (c *msgClient) ChannelCloseInit(ctx context.Context, in *MsgChannelCloseIni
func (c *msgClient) ChannelCloseConfirm(ctx context.Context, in *MsgChannelCloseConfirm, opts ...grpc.CallOption) (*MsgChannelCloseConfirmResponse, error) {
out := new(MsgChannelCloseConfirmResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelCloseConfirm", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/ChannelCloseConfirm", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1028,7 +1028,7 @@ func (c *msgClient) ChannelCloseConfirm(ctx context.Context, in *MsgChannelClose
func (c *msgClient) RecvPacket(ctx context.Context, in *MsgRecvPacket, opts ...grpc.CallOption) (*MsgRecvPacketResponse, error) {
out := new(MsgRecvPacketResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/RecvPacket", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/RecvPacket", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1037,7 +1037,7 @@ func (c *msgClient) RecvPacket(ctx context.Context, in *MsgRecvPacket, opts ...g
func (c *msgClient) Timeout(ctx context.Context, in *MsgTimeout, opts ...grpc.CallOption) (*MsgTimeoutResponse, error) {
out := new(MsgTimeoutResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/Timeout", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/Timeout", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1046,7 +1046,7 @@ func (c *msgClient) Timeout(ctx context.Context, in *MsgTimeout, opts ...grpc.Ca
func (c *msgClient) TimeoutOnClose(ctx context.Context, in *MsgTimeoutOnClose, opts ...grpc.CallOption) (*MsgTimeoutOnCloseResponse, error) {
out := new(MsgTimeoutOnCloseResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/TimeoutOnClose", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/TimeoutOnClose", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1055,7 +1055,7 @@ func (c *msgClient) TimeoutOnClose(ctx context.Context, in *MsgTimeoutOnClose, o
func (c *msgClient) Acknowledgement(ctx context.Context, in *MsgAcknowledgement, opts ...grpc.CallOption) (*MsgAcknowledgementResponse, error) {
out := new(MsgAcknowledgementResponse)
- err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/Acknowledgement", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.core.channel.v1.Msg/Acknowledgement", in, out, opts...)
if err != nil {
return nil, err
}
@@ -1136,7 +1136,7 @@ func _Msg_ChannelOpenInit_Handler(srv interface{}, ctx context.Context, dec func
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenInit",
+ FullMethod: "/ibc.core.channel.v1.Msg/ChannelOpenInit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ChannelOpenInit(ctx, req.(*MsgChannelOpenInit))
@@ -1154,7 +1154,7 @@ func _Msg_ChannelOpenTry_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenTry",
+ FullMethod: "/ibc.core.channel.v1.Msg/ChannelOpenTry",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ChannelOpenTry(ctx, req.(*MsgChannelOpenTry))
@@ -1172,7 +1172,7 @@ func _Msg_ChannelOpenAck_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenAck",
+ FullMethod: "/ibc.core.channel.v1.Msg/ChannelOpenAck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ChannelOpenAck(ctx, req.(*MsgChannelOpenAck))
@@ -1190,7 +1190,7 @@ func _Msg_ChannelOpenConfirm_Handler(srv interface{}, ctx context.Context, dec f
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenConfirm",
+ FullMethod: "/ibc.core.channel.v1.Msg/ChannelOpenConfirm",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ChannelOpenConfirm(ctx, req.(*MsgChannelOpenConfirm))
@@ -1208,7 +1208,7 @@ func _Msg_ChannelCloseInit_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelCloseInit",
+ FullMethod: "/ibc.core.channel.v1.Msg/ChannelCloseInit",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ChannelCloseInit(ctx, req.(*MsgChannelCloseInit))
@@ -1226,7 +1226,7 @@ func _Msg_ChannelCloseConfirm_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelCloseConfirm",
+ FullMethod: "/ibc.core.channel.v1.Msg/ChannelCloseConfirm",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).ChannelCloseConfirm(ctx, req.(*MsgChannelCloseConfirm))
@@ -1244,7 +1244,7 @@ func _Msg_RecvPacket_Handler(srv interface{}, ctx context.Context, dec func(inte
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/RecvPacket",
+ FullMethod: "/ibc.core.channel.v1.Msg/RecvPacket",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).RecvPacket(ctx, req.(*MsgRecvPacket))
@@ -1262,7 +1262,7 @@ func _Msg_Timeout_Handler(srv interface{}, ctx context.Context, dec func(interfa
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/Timeout",
+ FullMethod: "/ibc.core.channel.v1.Msg/Timeout",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Timeout(ctx, req.(*MsgTimeout))
@@ -1280,7 +1280,7 @@ func _Msg_TimeoutOnClose_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/TimeoutOnClose",
+ FullMethod: "/ibc.core.channel.v1.Msg/TimeoutOnClose",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).TimeoutOnClose(ctx, req.(*MsgTimeoutOnClose))
@@ -1298,7 +1298,7 @@ func _Msg_Acknowledgement_Handler(srv interface{}, ctx context.Context, dec func
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibcgo.core.channel.v1.Msg/Acknowledgement",
+ FullMethod: "/ibc.core.channel.v1.Msg/Acknowledgement",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Acknowledgement(ctx, req.(*MsgAcknowledgement))
@@ -1307,7 +1307,7 @@ func _Msg_Acknowledgement_Handler(srv interface{}, ctx context.Context, dec func
}
var _Msg_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibcgo.core.channel.v1.Msg",
+ ServiceName: "ibc.core.channel.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -1352,7 +1352,7 @@ var _Msg_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibcgo/core/channel/v1/tx.proto",
+ Metadata: "ibc/core/channel/v1/tx.proto",
}
func (m *MsgChannelOpenInit) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/23-commitment/types/codec.go b/modules/core/23-commitment/types/codec.go
index 931e629f..a8ebda9b 100644
--- a/modules/core/23-commitment/types/codec.go
+++ b/modules/core/23-commitment/types/codec.go
@@ -8,19 +8,19 @@ import (
// RegisterInterfaces registers the commitment interfaces to protobuf Any.
func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface(
- "ibcgo.core.commitment.v1.Root",
+ "ibc.core.commitment.v1.Root",
(*exported.Root)(nil),
)
registry.RegisterInterface(
- "ibcgo.core.commitment.v1.Prefix",
+ "ibc.core.commitment.v1.Prefix",
(*exported.Prefix)(nil),
)
registry.RegisterInterface(
- "ibcgo.core.commitment.v1.Path",
+ "ibc.core.commitment.v1.Path",
(*exported.Path)(nil),
)
registry.RegisterInterface(
- "ibcgo.core.commitment.v1.Proof",
+ "ibc.core.commitment.v1.Proof",
(*exported.Proof)(nil),
)
diff --git a/modules/core/23-commitment/types/commitment.pb.go b/modules/core/23-commitment/types/commitment.pb.go
index 0c88037f..7d94a5c9 100644
--- a/modules/core/23-commitment/types/commitment.pb.go
+++ b/modules/core/23-commitment/types/commitment.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/commitment/v1/commitment.proto
+// source: ibc/core/commitment/v1/commitment.proto
package types
@@ -34,7 +34,7 @@ func (m *MerkleRoot) Reset() { *m = MerkleRoot{} }
func (m *MerkleRoot) String() string { return proto.CompactTextString(m) }
func (*MerkleRoot) ProtoMessage() {}
func (*MerkleRoot) Descriptor() ([]byte, []int) {
- return fileDescriptor_eb23d5444771a147, []int{0}
+ return fileDescriptor_7921d88972a41469, []int{0}
}
func (m *MerkleRoot) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -74,7 +74,7 @@ func (m *MerklePrefix) Reset() { *m = MerklePrefix{} }
func (m *MerklePrefix) String() string { return proto.CompactTextString(m) }
func (*MerklePrefix) ProtoMessage() {}
func (*MerklePrefix) Descriptor() ([]byte, []int) {
- return fileDescriptor_eb23d5444771a147, []int{1}
+ return fileDescriptor_7921d88972a41469, []int{1}
}
func (m *MerklePrefix) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -120,7 +120,7 @@ type MerklePath struct {
func (m *MerklePath) Reset() { *m = MerklePath{} }
func (*MerklePath) ProtoMessage() {}
func (*MerklePath) Descriptor() ([]byte, []int) {
- return fileDescriptor_eb23d5444771a147, []int{2}
+ return fileDescriptor_7921d88972a41469, []int{2}
}
func (m *MerklePath) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -169,7 +169,7 @@ func (m *MerkleProof) Reset() { *m = MerkleProof{} }
func (m *MerkleProof) String() string { return proto.CompactTextString(m) }
func (*MerkleProof) ProtoMessage() {}
func (*MerkleProof) Descriptor() ([]byte, []int) {
- return fileDescriptor_eb23d5444771a147, []int{3}
+ return fileDescriptor_7921d88972a41469, []int{3}
}
func (m *MerkleProof) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -206,40 +206,40 @@ func (m *MerkleProof) GetProofs() []*_go.CommitmentProof {
}
func init() {
- proto.RegisterType((*MerkleRoot)(nil), "ibcgo.core.commitment.v1.MerkleRoot")
- proto.RegisterType((*MerklePrefix)(nil), "ibcgo.core.commitment.v1.MerklePrefix")
- proto.RegisterType((*MerklePath)(nil), "ibcgo.core.commitment.v1.MerklePath")
- proto.RegisterType((*MerkleProof)(nil), "ibcgo.core.commitment.v1.MerkleProof")
+ proto.RegisterType((*MerkleRoot)(nil), "ibc.core.commitment.v1.MerkleRoot")
+ proto.RegisterType((*MerklePrefix)(nil), "ibc.core.commitment.v1.MerklePrefix")
+ proto.RegisterType((*MerklePath)(nil), "ibc.core.commitment.v1.MerklePath")
+ proto.RegisterType((*MerkleProof)(nil), "ibc.core.commitment.v1.MerkleProof")
}
func init() {
- proto.RegisterFile("ibcgo/core/commitment/v1/commitment.proto", fileDescriptor_eb23d5444771a147)
+ proto.RegisterFile("ibc/core/commitment/v1/commitment.proto", fileDescriptor_7921d88972a41469)
}
-var fileDescriptor_eb23d5444771a147 = []byte{
- // 338 bytes of a gzipped FileDescriptorProto
+var fileDescriptor_7921d88972a41469 = []byte{
+ // 337 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbf, 0x4e, 0xeb, 0x30,
- 0x14, 0xc6, 0x13, 0xdd, 0xaa, 0x97, 0xba, 0x95, 0x10, 0x29, 0xa0, 0xaa, 0x43, 0x8a, 0x32, 0xa0,
- 0x32, 0xd4, 0x56, 0x5b, 0x16, 0x2a, 0xb1, 0x04, 0x56, 0xa4, 0x2a, 0x6c, 0x2c, 0x28, 0x31, 0x4e,
- 0x62, 0xb5, 0xe9, 0x89, 0x62, 0xb7, 0x22, 0x6f, 0xc0, 0xc8, 0xc8, 0xc8, 0xe3, 0x30, 0x76, 0x64,
- 0xaa, 0x50, 0xf3, 0x06, 0x7d, 0x02, 0x14, 0x9b, 0x42, 0xb6, 0x73, 0x7c, 0x7e, 0xe7, 0x8f, 0xbf,
- 0x0f, 0x5d, 0xf0, 0x80, 0x46, 0x40, 0x28, 0x64, 0x8c, 0x50, 0x48, 0x12, 0x2e, 0x13, 0xb6, 0x90,
- 0x64, 0x35, 0xac, 0x64, 0x38, 0xcd, 0x40, 0x82, 0xd5, 0x51, 0x28, 0x2e, 0x51, 0x5c, 0x29, 0xae,
- 0x86, 0xdd, 0xe3, 0x08, 0x22, 0x50, 0x10, 0x29, 0x23, 0xcd, 0x77, 0xdb, 0x14, 0x16, 0x21, 0x07,
- 0x92, 0x66, 0x00, 0xa1, 0xd0, 0x8f, 0xce, 0x39, 0x42, 0x77, 0x2c, 0x9b, 0xcd, 0x99, 0x07, 0x20,
- 0x2d, 0x0b, 0xd5, 0x62, 0x5f, 0xc4, 0x1d, 0xf3, 0xcc, 0xec, 0xb7, 0x3c, 0x15, 0x4f, 0x6a, 0x2f,
- 0xef, 0x3d, 0xc3, 0xb9, 0x45, 0x2d, 0xcd, 0x4d, 0x33, 0x16, 0xf2, 0x67, 0xeb, 0x12, 0xa1, 0x19,
- 0xcb, 0x1f, 0x53, 0x95, 0x69, 0xde, 0x3d, 0xd9, 0x6d, 0x7a, 0x47, 0xb9, 0x9f, 0xcc, 0x27, 0xce,
- 0x5f, 0xcd, 0xf1, 0x1a, 0x33, 0x96, 0xeb, 0x2e, 0xc7, 0xdd, 0x6f, 0x9b, 0xfa, 0x32, 0xb6, 0x30,
- 0x3a, 0x50, 0x9c, 0x2f, 0xcb, 0x8d, 0xff, 0xfa, 0x0d, 0xb7, 0xbd, 0xdb, 0xf4, 0x0e, 0x2b, 0x13,
- 0x7c, 0x19, 0x3b, 0xde, 0xff, 0xb2, 0xdf, 0x97, 0xf1, 0xa4, 0xf6, 0x56, 0x5e, 0x72, 0x8d, 0x9a,
- 0xfb, 0x4b, 0x00, 0x42, 0x0b, 0xa3, 0xba, 0xfe, 0x90, 0x1a, 0xd1, 0x1c, 0x9d, 0x62, 0x4e, 0xc5,
- 0x68, 0x8c, 0x6f, 0x7e, 0x15, 0x51, 0x9c, 0xf7, 0x43, 0xb9, 0xf7, 0x1f, 0x5b, 0xdb, 0x5c, 0x6f,
- 0x6d, 0xf3, 0x6b, 0x6b, 0x9b, 0xaf, 0x85, 0x6d, 0xac, 0x0b, 0xdb, 0xf8, 0x2c, 0x6c, 0xe3, 0xe1,
- 0x2a, 0xe2, 0x32, 0x5e, 0x06, 0xa5, 0x96, 0x84, 0x82, 0x48, 0x40, 0x10, 0x1e, 0xd0, 0x41, 0x04,
- 0x24, 0x81, 0xa7, 0xe5, 0x9c, 0x09, 0xed, 0xca, 0x68, 0x3c, 0xa8, 0x18, 0x23, 0xf3, 0x94, 0x89,
- 0xa0, 0xae, 0xc4, 0x1c, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x44, 0xcf, 0xe4, 0xd6, 0xbe, 0x01,
- 0x00, 0x00,
+ 0x14, 0xc6, 0x13, 0xdd, 0xaa, 0x97, 0xba, 0x95, 0x10, 0x29, 0x54, 0xa8, 0x43, 0x8a, 0x32, 0x40,
+ 0x97, 0xda, 0x6a, 0xcb, 0x42, 0x25, 0x96, 0xc0, 0x8a, 0x54, 0x85, 0x8d, 0x05, 0x25, 0xc6, 0x49,
+ 0xac, 0x36, 0x3d, 0x51, 0xec, 0x56, 0xe4, 0x0d, 0x18, 0x19, 0x19, 0x79, 0x1c, 0xc6, 0x8e, 0x4c,
+ 0x15, 0x6a, 0xde, 0xa0, 0x4f, 0x80, 0x62, 0x53, 0xc8, 0x76, 0x8e, 0xcf, 0xef, 0xfc, 0xf1, 0xf7,
+ 0xa1, 0x0b, 0x1e, 0x50, 0x42, 0x21, 0x63, 0x84, 0x42, 0x92, 0x70, 0x99, 0xb0, 0x85, 0x24, 0xab,
+ 0x61, 0x25, 0xc3, 0x69, 0x06, 0x12, 0xac, 0x0e, 0x0f, 0x28, 0x2e, 0x41, 0x5c, 0x29, 0xad, 0x86,
+ 0xdd, 0xe3, 0x08, 0x22, 0x50, 0x08, 0x29, 0x23, 0x4d, 0x77, 0xdb, 0x14, 0x16, 0x21, 0x07, 0x92,
+ 0x66, 0x00, 0xa1, 0xd0, 0x8f, 0xce, 0x39, 0x42, 0x77, 0x2c, 0x9b, 0xcd, 0x99, 0x07, 0x20, 0x2d,
+ 0x0b, 0xd5, 0x62, 0x5f, 0xc4, 0xa7, 0xe6, 0x99, 0xd9, 0x6f, 0x79, 0x2a, 0x9e, 0xd4, 0x5e, 0xde,
+ 0x7b, 0x86, 0x73, 0x8b, 0x5a, 0x9a, 0x9b, 0x66, 0x2c, 0xe4, 0xcf, 0xd6, 0x25, 0x42, 0x33, 0x96,
+ 0x3f, 0xa6, 0x2a, 0xd3, 0xbc, 0x7b, 0xb2, 0xdb, 0xf4, 0x8e, 0x72, 0x3f, 0x99, 0x4f, 0x9c, 0xbf,
+ 0x9a, 0xe3, 0x35, 0x66, 0x2c, 0xd7, 0x5d, 0x8e, 0xbb, 0xdf, 0x36, 0xf5, 0x65, 0x6c, 0x61, 0x74,
+ 0xa0, 0x38, 0x5f, 0x96, 0x1b, 0xff, 0xf5, 0x1b, 0x6e, 0x7b, 0xb7, 0xe9, 0x1d, 0x56, 0x26, 0xf8,
+ 0x32, 0x76, 0xbc, 0xff, 0x65, 0xbf, 0x2f, 0xe3, 0x49, 0xed, 0xad, 0xbc, 0xe4, 0x1a, 0x35, 0xf7,
+ 0x97, 0x00, 0x84, 0x16, 0x46, 0x75, 0xfd, 0x21, 0x35, 0xa2, 0x39, 0xea, 0x60, 0x4e, 0xc5, 0x68,
+ 0x8c, 0x6f, 0x7e, 0x15, 0x51, 0x9c, 0xf7, 0x43, 0xb9, 0xf7, 0x1f, 0x5b, 0xdb, 0x5c, 0x6f, 0x6d,
+ 0xf3, 0x6b, 0x6b, 0x9b, 0xaf, 0x85, 0x6d, 0xac, 0x0b, 0xdb, 0xf8, 0x2c, 0x6c, 0xe3, 0xe1, 0x2a,
+ 0xe2, 0x32, 0x5e, 0x06, 0xa5, 0x96, 0x84, 0x82, 0x48, 0x40, 0x10, 0x1e, 0xd0, 0x41, 0x04, 0x24,
+ 0x81, 0xa7, 0xe5, 0x9c, 0x09, 0xed, 0xc9, 0x68, 0x3c, 0xa8, 0xd8, 0x22, 0xf3, 0x94, 0x89, 0xa0,
+ 0xae, 0xc4, 0x1c, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x20, 0xd2, 0x3b, 0x32, 0xba, 0x01, 0x00,
+ 0x00,
}
func (m *MerkleRoot) Marshal() (dAtA []byte, err error) {
diff --git a/modules/core/types/genesis.pb.go b/modules/core/types/genesis.pb.go
index 8318bd26..a2601f85 100644
--- a/modules/core/types/genesis.pb.go
+++ b/modules/core/types/genesis.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/core/types/v1/genesis.proto
+// source: ibc/core/types/v1/genesis.proto
package types
@@ -40,7 +40,7 @@ func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_f0cf35a95987cc01, []int{0}
+ return fileDescriptor_b9a49c5663e6fc59, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -91,33 +91,33 @@ func (m *GenesisState) GetChannelGenesis() types2.GenesisState {
}
func init() {
- proto.RegisterType((*GenesisState)(nil), "ibcgo.core.types.v1.GenesisState")
+ proto.RegisterType((*GenesisState)(nil), "ibc.core.types.v1.GenesisState")
}
-func init() { proto.RegisterFile("ibcgo/core/types/v1/genesis.proto", fileDescriptor_f0cf35a95987cc01) }
+func init() { proto.RegisterFile("ibc/core/types/v1/genesis.proto", fileDescriptor_b9a49c5663e6fc59) }
-var fileDescriptor_f0cf35a95987cc01 = []byte{
+var fileDescriptor_b9a49c5663e6fc59 = []byte{
// 322 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcb, 0x4a, 0x33, 0x31,
- 0x18, 0x86, 0x67, 0xfa, 0xc3, 0xbf, 0x18, 0xb5, 0xe2, 0x78, 0x40, 0x0b, 0xa6, 0x36, 0x85, 0x22,
- 0x88, 0x09, 0xd5, 0x9d, 0xcb, 0x82, 0xb8, 0xaf, 0x3b, 0x37, 0xd2, 0x89, 0x61, 0x1a, 0x98, 0xc9,
- 0x57, 0x9a, 0xb4, 0xd8, 0xbb, 0xf0, 0xb2, 0xba, 0xec, 0xd2, 0x55, 0x29, 0xed, 0x1d, 0x78, 0x05,
- 0xd2, 0x24, 0xb6, 0x19, 0xb2, 0x1b, 0xde, 0x79, 0xbe, 0xf7, 0xc9, 0x29, 0x69, 0x89, 0x8c, 0xe5,
- 0x40, 0x19, 0x8c, 0x39, 0xd5, 0xb3, 0x11, 0x57, 0x74, 0xda, 0xa5, 0x39, 0x97, 0x5c, 0x09, 0x45,
- 0x46, 0x63, 0xd0, 0x90, 0x9e, 0x1a, 0x84, 0x6c, 0x11, 0x62, 0x10, 0x32, 0xed, 0x36, 0xce, 0x72,
- 0xc8, 0xc1, 0xfc, 0xa7, 0xdb, 0x2f, 0x8b, 0x36, 0xb0, 0xd7, 0xc6, 0x0a, 0xc1, 0xa5, 0x0e, 0xea,
- 0x1a, 0x1d, 0x9f, 0x01, 0x29, 0x39, 0xd3, 0x02, 0x64, 0xc8, 0xb5, 0x7d, 0x6e, 0x38, 0x90, 0x92,
- 0x17, 0x01, 0x84, 0x57, 0xb5, 0xe4, 0xf0, 0xc5, 0x26, 0xaf, 0x7a, 0xa0, 0x79, 0x3a, 0x4c, 0xea,
- 0x56, 0xfc, 0xee, 0xc0, 0xcb, 0xf8, 0x26, 0xbe, 0x3d, 0x78, 0xc0, 0xc4, 0xdb, 0x85, 0x25, 0xc8,
- 0xb4, 0x4b, 0xfc, 0xd9, 0xde, 0xf5, 0x7c, 0xd9, 0x8c, 0x7e, 0x96, 0xcd, 0xf3, 0xd9, 0xa0, 0x2c,
- 0x9e, 0x70, 0xb5, 0x07, 0xf7, 0x8f, 0x6c, 0xe0, 0x46, 0xd2, 0xcf, 0x24, 0xdd, 0x2f, 0x7f, 0x67,
- 0xab, 0x19, 0x5b, 0xa7, 0x62, 0xdb, 0x51, 0x81, 0xb1, 0xe5, 0x8c, 0x57, 0xce, 0x18, 0xf4, 0xe1,
- 0xfe, 0xc9, 0x3e, 0xfc, 0x33, 0x17, 0xc9, 0xb1, 0x3b, 0x90, 0x9d, 0xf6, 0x9f, 0xd1, 0xb6, 0x2b,
- 0x5a, 0x8b, 0x04, 0x4e, 0xe4, 0x9c, 0x17, 0xce, 0x59, 0x6d, 0xc2, 0xfd, 0xba, 0x4b, 0xdc, 0x50,
- 0xef, 0x79, 0xbe, 0x46, 0xf1, 0x62, 0x8d, 0xe2, 0xd5, 0x1a, 0xc5, 0x5f, 0x1b, 0x14, 0x2d, 0x36,
- 0x28, 0xfa, 0xde, 0xa0, 0xe8, 0xed, 0x2e, 0x17, 0x7a, 0x38, 0xc9, 0x08, 0x83, 0x92, 0x32, 0x50,
- 0x25, 0x28, 0x2a, 0x32, 0x76, 0x9f, 0x03, 0x2d, 0xe1, 0x63, 0x52, 0x70, 0xe5, 0x3d, 0xab, 0xec,
- 0xbf, 0xb9, 0xb0, 0xc7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe0, 0x5d, 0xb8, 0x0a, 0x71, 0x02,
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xc1, 0x4a, 0xeb, 0x40,
+ 0x14, 0x86, 0x93, 0x5e, 0xb8, 0x8b, 0xa8, 0x95, 0x06, 0x15, 0x2d, 0x38, 0x6d, 0x43, 0x17, 0x82,
+ 0x38, 0x43, 0x75, 0xe7, 0xb2, 0x20, 0xee, 0xe3, 0xce, 0x8d, 0x24, 0xe3, 0x98, 0x8e, 0x24, 0x73,
+ 0x4a, 0x67, 0x1a, 0xe8, 0x5b, 0xf8, 0x58, 0x5d, 0x76, 0x29, 0x2e, 0x8a, 0x24, 0x6f, 0xe0, 0x13,
+ 0x48, 0x33, 0x63, 0x92, 0x32, 0xbb, 0xf0, 0x9f, 0xef, 0xfc, 0xdf, 0x21, 0x89, 0x37, 0xe0, 0x31,
+ 0x25, 0x14, 0x16, 0x8c, 0xa8, 0xd5, 0x9c, 0x49, 0x92, 0x4f, 0x48, 0xc2, 0x04, 0x93, 0x5c, 0xe2,
+ 0xf9, 0x02, 0x14, 0xf8, 0x3d, 0x1e, 0x53, 0xbc, 0x03, 0x70, 0x05, 0xe0, 0x7c, 0xd2, 0x3f, 0x49,
+ 0x20, 0x81, 0x6a, 0x4a, 0x76, 0x4f, 0x1a, 0xec, 0x0f, 0xeb, 0x26, 0x9a, 0x72, 0x26, 0x94, 0x55,
+ 0xd5, 0x1f, 0x37, 0x04, 0x08, 0xc1, 0xa8, 0xe2, 0x20, 0x6c, 0x6a, 0xd4, 0x50, 0xb3, 0x48, 0x08,
+ 0x96, 0x5a, 0x48, 0xf0, 0xd5, 0xf1, 0x0e, 0x1f, 0x75, 0xf2, 0xa4, 0x22, 0xc5, 0xfc, 0x37, 0xaf,
+ 0xab, 0xa5, 0x2f, 0x06, 0x3c, 0x77, 0x87, 0xee, 0xd5, 0xc1, 0xed, 0x10, 0xd7, 0xd7, 0xeb, 0x39,
+ 0xce, 0x27, 0xb8, 0xbd, 0x39, 0xbd, 0x5c, 0x6f, 0x07, 0xce, 0xcf, 0x76, 0x70, 0xba, 0x8a, 0xb2,
+ 0xf4, 0x3e, 0xd8, 0x6f, 0x09, 0xc2, 0x23, 0x1d, 0x98, 0x15, 0x3f, 0xf7, 0xfc, 0xe6, 0xf4, 0xda,
+ 0xd5, 0xa9, 0x5c, 0xe3, 0x96, 0xab, 0x66, 0x2c, 0xdf, 0xc8, 0xf8, 0x2e, 0x8c, 0xcf, 0x6a, 0x0b,
+ 0xc2, 0x5e, 0x13, 0xfe, 0x79, 0xdf, 0xbd, 0x63, 0xf3, 0x32, 0x6a, 0xe9, 0xbf, 0x4a, 0x3a, 0x6a,
+ 0x49, 0x35, 0x60, 0x19, 0x91, 0x31, 0x9e, 0x19, 0xe3, 0x7e, 0x4f, 0x10, 0x76, 0x4d, 0x62, 0x96,
+ 0xa6, 0x0f, 0xeb, 0x02, 0xb9, 0x9b, 0x02, 0xb9, 0xdf, 0x05, 0x72, 0x3f, 0x4a, 0xe4, 0x6c, 0x4a,
+ 0xe4, 0x7c, 0x96, 0xc8, 0x79, 0xbe, 0x4e, 0xb8, 0x9a, 0x2d, 0x63, 0x4c, 0x21, 0x23, 0x14, 0x64,
+ 0x06, 0x92, 0xf0, 0x98, 0xde, 0x24, 0x40, 0x32, 0x78, 0x5d, 0xa6, 0x4c, 0xb6, 0x7e, 0xa4, 0xf8,
+ 0x7f, 0xf5, 0xa9, 0xee, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x98, 0x8f, 0xc1, 0x09, 0x61, 0x02,
0x00, 0x00,
}
diff --git a/modules/light-clients/06-solomachine/types/solomachine.pb.go b/modules/light-clients/06-solomachine/types/solomachine.pb.go
index 3374af0d..1bdca7b8 100644
--- a/modules/light-clients/06-solomachine/types/solomachine.pb.go
+++ b/modules/light-clients/06-solomachine/types/solomachine.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/lightclients/solomachine/v1/solomachine.proto
+// source: ibc/lightclients/solomachine/v1/solomachine.proto
package types
@@ -84,7 +84,7 @@ func (x DataType) String() string {
}
func (DataType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{0}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{0}
}
// ClientState defines a solo machine client that tracks the current consensus
@@ -104,7 +104,7 @@ func (m *ClientState) Reset() { *m = ClientState{} }
func (m *ClientState) String() string { return proto.CompactTextString(m) }
func (*ClientState) ProtoMessage() {}
func (*ClientState) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{0}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{0}
}
func (m *ClientState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -150,7 +150,7 @@ func (m *ConsensusState) Reset() { *m = ConsensusState{} }
func (m *ConsensusState) String() string { return proto.CompactTextString(m) }
func (*ConsensusState) ProtoMessage() {}
func (*ConsensusState) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{1}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{1}
}
func (m *ConsensusState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -193,7 +193,7 @@ func (m *Header) Reset() { *m = Header{} }
func (m *Header) String() string { return proto.CompactTextString(m) }
func (*Header) ProtoMessage() {}
func (*Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{2}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{2}
}
func (m *Header) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -235,7 +235,7 @@ func (m *Misbehaviour) Reset() { *m = Misbehaviour{} }
func (m *Misbehaviour) String() string { return proto.CompactTextString(m) }
func (*Misbehaviour) ProtoMessage() {}
func (*Misbehaviour) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{3}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{3}
}
func (m *Misbehaviour) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -268,7 +268,7 @@ var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo
// signature.
type SignatureAndData struct {
Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"`
- DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibcgo.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
@@ -277,7 +277,7 @@ func (m *SignatureAndData) Reset() { *m = SignatureAndData{} }
func (m *SignatureAndData) String() string { return proto.CompactTextString(m) }
func (*SignatureAndData) ProtoMessage() {}
func (*SignatureAndData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{4}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{4}
}
func (m *SignatureAndData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -317,7 +317,7 @@ func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureDa
func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) }
func (*TimestampedSignatureData) ProtoMessage() {}
func (*TimestampedSignatureData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{5}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{5}
}
func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -352,7 +352,7 @@ type SignBytes struct {
Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"`
// type of the data used
- DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibcgo.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
// marshaled data
Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"`
}
@@ -361,7 +361,7 @@ func (m *SignBytes) Reset() { *m = SignBytes{} }
func (m *SignBytes) String() string { return proto.CompactTextString(m) }
func (*SignBytes) ProtoMessage() {}
func (*SignBytes) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{6}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{6}
}
func (m *SignBytes) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -402,7 +402,7 @@ func (m *HeaderData) Reset() { *m = HeaderData{} }
func (m *HeaderData) String() string { return proto.CompactTextString(m) }
func (*HeaderData) ProtoMessage() {}
func (*HeaderData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{7}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{7}
}
func (m *HeaderData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -441,7 +441,7 @@ func (m *ClientStateData) Reset() { *m = ClientStateData{} }
func (m *ClientStateData) String() string { return proto.CompactTextString(m) }
func (*ClientStateData) ProtoMessage() {}
func (*ClientStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{8}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{8}
}
func (m *ClientStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -481,7 +481,7 @@ func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} }
func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) }
func (*ConsensusStateData) ProtoMessage() {}
func (*ConsensusStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{9}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{9}
}
func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -521,7 +521,7 @@ func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} }
func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) }
func (*ConnectionStateData) ProtoMessage() {}
func (*ConnectionStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{10}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{10}
}
func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -561,7 +561,7 @@ func (m *ChannelStateData) Reset() { *m = ChannelStateData{} }
func (m *ChannelStateData) String() string { return proto.CompactTextString(m) }
func (*ChannelStateData) ProtoMessage() {}
func (*ChannelStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{11}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{11}
}
func (m *ChannelStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -601,7 +601,7 @@ func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} }
func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) }
func (*PacketCommitmentData) ProtoMessage() {}
func (*PacketCommitmentData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{12}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{12}
}
func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -655,7 +655,7 @@ func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgement
func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) }
func (*PacketAcknowledgementData) ProtoMessage() {}
func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{13}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{13}
}
func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -708,7 +708,7 @@ func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceDa
func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) }
func (*PacketReceiptAbsenceData) ProtoMessage() {}
func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{14}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{14}
}
func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -755,7 +755,7 @@ func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} }
func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) }
func (*NextSequenceRecvData) ProtoMessage() {}
func (*NextSequenceRecvData) Descriptor() ([]byte, []int) {
- return fileDescriptor_39862ff634781870, []int{15}
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{15}
}
func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -799,117 +799,117 @@ func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 {
}
func init() {
- proto.RegisterEnum("ibcgo.lightclients.solomachine.v1.DataType", DataType_name, DataType_value)
- proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.solomachine.v1.ClientState")
- proto.RegisterType((*ConsensusState)(nil), "ibcgo.lightclients.solomachine.v1.ConsensusState")
- proto.RegisterType((*Header)(nil), "ibcgo.lightclients.solomachine.v1.Header")
- proto.RegisterType((*Misbehaviour)(nil), "ibcgo.lightclients.solomachine.v1.Misbehaviour")
- proto.RegisterType((*SignatureAndData)(nil), "ibcgo.lightclients.solomachine.v1.SignatureAndData")
- proto.RegisterType((*TimestampedSignatureData)(nil), "ibcgo.lightclients.solomachine.v1.TimestampedSignatureData")
- proto.RegisterType((*SignBytes)(nil), "ibcgo.lightclients.solomachine.v1.SignBytes")
- proto.RegisterType((*HeaderData)(nil), "ibcgo.lightclients.solomachine.v1.HeaderData")
- proto.RegisterType((*ClientStateData)(nil), "ibcgo.lightclients.solomachine.v1.ClientStateData")
- proto.RegisterType((*ConsensusStateData)(nil), "ibcgo.lightclients.solomachine.v1.ConsensusStateData")
- proto.RegisterType((*ConnectionStateData)(nil), "ibcgo.lightclients.solomachine.v1.ConnectionStateData")
- proto.RegisterType((*ChannelStateData)(nil), "ibcgo.lightclients.solomachine.v1.ChannelStateData")
- proto.RegisterType((*PacketCommitmentData)(nil), "ibcgo.lightclients.solomachine.v1.PacketCommitmentData")
- proto.RegisterType((*PacketAcknowledgementData)(nil), "ibcgo.lightclients.solomachine.v1.PacketAcknowledgementData")
- proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibcgo.lightclients.solomachine.v1.PacketReceiptAbsenceData")
- proto.RegisterType((*NextSequenceRecvData)(nil), "ibcgo.lightclients.solomachine.v1.NextSequenceRecvData")
+ proto.RegisterEnum("ibc.lightclients.solomachine.v1.DataType", DataType_name, DataType_value)
+ proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v1.ClientState")
+ proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v1.ConsensusState")
+ proto.RegisterType((*Header)(nil), "ibc.lightclients.solomachine.v1.Header")
+ proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v1.Misbehaviour")
+ proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v1.SignatureAndData")
+ proto.RegisterType((*TimestampedSignatureData)(nil), "ibc.lightclients.solomachine.v1.TimestampedSignatureData")
+ proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v1.SignBytes")
+ proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v1.HeaderData")
+ proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v1.ClientStateData")
+ proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v1.ConsensusStateData")
+ proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v1.ConnectionStateData")
+ proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v1.ChannelStateData")
+ proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v1.PacketCommitmentData")
+ proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementData")
+ proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData")
+ proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v1.NextSequenceRecvData")
}
func init() {
- proto.RegisterFile("ibcgo/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_39862ff634781870)
-}
-
-var fileDescriptor_39862ff634781870 = []byte{
- // 1370 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x5f, 0x8f, 0xda, 0xd6,
- 0x12, 0x5f, 0x13, 0xb2, 0x59, 0x86, 0x0d, 0xcb, 0x75, 0x48, 0xc2, 0x3a, 0x11, 0xf8, 0x3a, 0x52,
- 0xee, 0xde, 0x7b, 0x1b, 0xe8, 0x26, 0x6a, 0x14, 0x45, 0x55, 0x5b, 0x63, 0xdc, 0x84, 0x64, 0xd7,
- 0x4b, 0x8d, 0xb7, 0x6d, 0x22, 0xb5, 0x96, 0x31, 0x67, 0xc1, 0x0a, 0xf8, 0x50, 0x6c, 0x20, 0x54,
- 0xaa, 0x54, 0xf5, 0x29, 0x45, 0x7d, 0xe8, 0x17, 0x40, 0xaa, 0x5a, 0xf5, 0xbb, 0x44, 0xea, 0x4b,
- 0xa4, 0xbe, 0xf4, 0x09, 0xb5, 0xc9, 0x37, 0xe0, 0x13, 0x54, 0xf6, 0x39, 0x60, 0x9b, 0xcd, 0x92,
- 0xf4, 0xdf, 0xdb, 0x39, 0x33, 0xbf, 0xf9, 0xcd, 0x9c, 0x99, 0xf1, 0x9c, 0x63, 0xb8, 0x61, 0xd5,
- 0xcd, 0x26, 0x2e, 0xb6, 0xad, 0x66, 0xcb, 0x35, 0xdb, 0x16, 0xb2, 0x5d, 0xa7, 0xe8, 0xe0, 0x36,
- 0xee, 0x18, 0x66, 0xcb, 0xb2, 0x51, 0x71, 0xb0, 0x1b, 0xde, 0x16, 0xba, 0x3d, 0xec, 0x62, 0xf6,
- 0xdf, 0xbe, 0x51, 0x21, 0x6c, 0x54, 0x08, 0xa3, 0x06, 0xbb, 0xdc, 0x7f, 0x09, 0xaf, 0x89, 0x7b,
- 0xa8, 0x68, 0x62, 0xdb, 0x46, 0xa6, 0x6b, 0x61, 0xdb, 0xa3, 0x0b, 0x76, 0x84, 0x8d, 0xbb, 0x12,
- 0x86, 0xb6, 0x0c, 0xdb, 0x46, 0x6d, 0x1f, 0x47, 0x96, 0x14, 0x94, 0x69, 0xe2, 0x26, 0xf6, 0x97,
- 0x45, 0x6f, 0x45, 0xa5, 0xdb, 0x4d, 0x8c, 0x9b, 0x6d, 0x54, 0xf4, 0x77, 0xf5, 0xfe, 0x51, 0xd1,
- 0xb0, 0x47, 0x44, 0x25, 0xfc, 0x1c, 0x83, 0xa4, 0xe4, 0xc7, 0x56, 0x73, 0x0d, 0x17, 0xb1, 0x1c,
- 0x6c, 0x38, 0xe8, 0xb3, 0x3e, 0xb2, 0x4d, 0x94, 0x65, 0x78, 0x66, 0x27, 0xae, 0x2e, 0xf6, 0xac,
- 0x04, 0x5b, 0x47, 0x3d, 0xfc, 0x39, 0xb2, 0xf5, 0x05, 0x24, 0xe6, 0x41, 0x4a, 0xdc, 0x6c, 0x9a,
- 0xbf, 0x30, 0x32, 0x3a, 0xed, 0xdb, 0xc2, 0x12, 0x40, 0x50, 0x53, 0x44, 0x52, 0x9b, 0x93, 0x0c,
- 0x60, 0xcb, 0xc4, 0xb6, 0x83, 0x6c, 0xa7, 0xef, 0xe8, 0x8e, 0xe7, 0x33, 0x7b, 0x8a, 0x67, 0x76,
- 0x92, 0xd7, 0x77, 0x0b, 0xaf, 0x4c, 0x57, 0x41, 0x9a, 0x5b, 0xfa, 0xc1, 0x86, 0xfd, 0x2e, 0x71,
- 0x0a, 0x6a, 0xca, 0x8c, 0x60, 0x59, 0x04, 0x97, 0x8c, 0x76, 0x1b, 0x0f, 0xf5, 0x7e, 0xb7, 0x61,
- 0xb8, 0x48, 0x37, 0x8e, 0x5c, 0xd4, 0xd3, 0xbb, 0x3d, 0xdc, 0xc5, 0x8e, 0xd1, 0xce, 0xc6, 0x79,
- 0x66, 0x67, 0xa3, 0x74, 0x75, 0x36, 0xcd, 0x0b, 0x84, 0x70, 0x05, 0x58, 0x50, 0xb3, 0xbe, 0xf6,
- 0xd0, 0x57, 0x8a, 0x9e, 0xae, 0x4a, 0x55, 0xb7, 0xe3, 0x4f, 0xbe, 0xcb, 0xaf, 0x09, 0xdf, 0x33,
- 0x90, 0x8a, 0xc6, 0xca, 0xde, 0x03, 0xe8, 0xf6, 0xeb, 0x6d, 0xcb, 0xd4, 0x1f, 0xa1, 0x91, 0x9f,
- 0xda, 0xe4, 0xf5, 0x4c, 0x81, 0x14, 0xa6, 0x30, 0x2f, 0x4c, 0x41, 0xb4, 0x47, 0xa5, 0xf3, 0xb3,
- 0x69, 0xfe, 0x5f, 0x24, 0x88, 0xc0, 0x42, 0x50, 0x13, 0x64, 0x73, 0x1f, 0x8d, 0x58, 0x1e, 0x92,
- 0x0d, 0x6b, 0x80, 0x7a, 0x8e, 0x75, 0x64, 0xa1, 0x9e, 0x5f, 0x84, 0x84, 0x1a, 0x16, 0xb1, 0x97,
- 0x21, 0xe1, 0x5a, 0x1d, 0xe4, 0xb8, 0x46, 0xa7, 0xeb, 0xe7, 0x37, 0xae, 0x06, 0x02, 0x1a, 0xe4,
- 0x57, 0x31, 0x58, 0xbf, 0x8b, 0x8c, 0x06, 0xea, 0xad, 0xac, 0x7a, 0x84, 0x2a, 0xb6, 0x44, 0xe5,
- 0x69, 0x1d, 0xab, 0x69, 0x1b, 0x6e, 0xbf, 0x47, 0x0a, 0xb9, 0xa9, 0x06, 0x02, 0xf6, 0x10, 0x52,
- 0x36, 0x1a, 0xea, 0xa1, 0x83, 0xc7, 0x57, 0x1c, 0x7c, 0x7b, 0x36, 0xcd, 0x9f, 0x27, 0x07, 0x8f,
- 0x5a, 0x09, 0xea, 0xa6, 0x8d, 0x86, 0xd5, 0xc5, 0xf9, 0x25, 0xd8, 0xf2, 0x00, 0xe1, 0x1c, 0x9c,
- 0xf6, 0x72, 0x10, 0x6e, 0x88, 0x25, 0x80, 0xa0, 0x7a, 0x91, 0x94, 0x03, 0x01, 0x4d, 0xc2, 0x4f,
- 0x31, 0xd8, 0xdc, 0xb7, 0x9c, 0x3a, 0x6a, 0x19, 0x03, 0x0b, 0xf7, 0x7b, 0xec, 0x2e, 0x24, 0x48,
- 0xf3, 0xe9, 0x56, 0xc3, 0xcf, 0x45, 0xa2, 0x94, 0x99, 0x4d, 0xf3, 0x69, 0xda, 0x66, 0x73, 0x95,
- 0xa0, 0x6e, 0x90, 0x75, 0xa5, 0x11, 0xc9, 0x5e, 0x6c, 0x29, 0x7b, 0x3d, 0x38, 0xbb, 0x48, 0x87,
- 0x8e, 0xed, 0x79, 0xb3, 0xdf, 0x78, 0x8d, 0x66, 0xaf, 0xcd, 0xed, 0x44, 0xbb, 0x51, 0x36, 0x5c,
- 0xa3, 0x94, 0x9d, 0x4d, 0xf3, 0x19, 0x12, 0x47, 0x84, 0x53, 0x50, 0x37, 0x17, 0xfb, 0x03, 0x7b,
- 0xc9, 0xa7, 0x3b, 0xc4, 0x34, 0xe9, 0x7f, 0x9f, 0x4f, 0x77, 0x88, 0xc3, 0x3e, 0xb5, 0x21, 0xa6,
- 0xd9, 0x7c, 0xca, 0x40, 0x7a, 0x99, 0x22, 0xda, 0x22, 0xcc, 0x72, 0x8b, 0x7c, 0x0a, 0x89, 0x86,
- 0xe1, 0x1a, 0xba, 0x3b, 0xea, 0x92, 0xec, 0xa5, 0xae, 0xff, 0xff, 0x35, 0x02, 0xf5, 0x98, 0xb5,
- 0x51, 0x17, 0x85, 0x8b, 0xb3, 0xe0, 0x11, 0xd4, 0x8d, 0x06, 0xd5, 0xb3, 0x2c, 0xc4, 0xbd, 0x35,
- 0xed, 0x4d, 0x7f, 0x1d, 0x6d, 0xe9, 0xf8, 0xcb, 0xbf, 0x8e, 0x2f, 0x19, 0xc8, 0x6a, 0x73, 0x19,
- 0x6a, 0x2c, 0x4e, 0xe5, 0x1f, 0xe9, 0x3d, 0x48, 0x05, 0xd9, 0xf0, 0xe9, 0xfd, 0x73, 0x85, 0x3b,
- 0x38, 0xaa, 0x17, 0xd4, 0xa0, 0x24, 0xe5, 0x63, 0x21, 0xc4, 0x5e, 0x1e, 0xc2, 0x6f, 0x0c, 0x24,
- 0x3c, 0xbf, 0xa5, 0x91, 0x8b, 0x9c, 0xbf, 0xf0, 0x8d, 0x2e, 0x8d, 0x8b, 0x53, 0xc7, 0xc7, 0x45,
- 0xa4, 0x08, 0xf1, 0x7f, 0xae, 0x08, 0xa7, 0x83, 0x22, 0xd0, 0x33, 0xfe, 0xc8, 0x00, 0x90, 0x21,
- 0xe4, 0xa7, 0x65, 0x0f, 0x92, 0xf4, 0xd3, 0x7f, 0xe5, 0x98, 0xbc, 0x30, 0x9b, 0xe6, 0xd9, 0xc8,
- 0xb4, 0xa0, 0x73, 0x92, 0x8c, 0x8a, 0x13, 0xe6, 0x44, 0xec, 0x4f, 0xce, 0x89, 0x2f, 0x60, 0x2b,
- 0x74, 0x4d, 0xfa, 0xb1, 0xb2, 0x10, 0xef, 0x1a, 0x6e, 0x8b, 0xb6, 0xb4, 0xbf, 0x66, 0xab, 0xb0,
- 0x49, 0x47, 0x04, 0xb9, 0xda, 0x62, 0x2b, 0x0e, 0x70, 0x71, 0x36, 0xcd, 0x9f, 0x8b, 0x8c, 0x15,
- 0x7a, 0x75, 0x25, 0xcd, 0xc0, 0x13, 0x75, 0xff, 0x35, 0x03, 0x6c, 0xf4, 0x42, 0x39, 0x31, 0x84,
- 0x07, 0xc7, 0x2f, 0xd8, 0x55, 0x51, 0xfc, 0x81, 0x3b, 0x94, 0xc6, 0xf2, 0x18, 0xce, 0x49, 0x8b,
- 0xc7, 0xc9, 0xea, 0x58, 0xee, 0x00, 0x04, 0xef, 0x18, 0x1a, 0xc6, 0x7f, 0x68, 0x63, 0x79, 0x0f,
- 0x99, 0x42, 0xe8, 0x95, 0x43, 0xae, 0x77, 0xba, 0x93, 0xed, 0x86, 0x1a, 0x32, 0xa5, 0x9e, 0x8f,
- 0x20, 0x2d, 0x91, 0xe7, 0xce, 0x6a, 0xb7, 0xb7, 0xe0, 0x0c, 0x7d, 0x16, 0x51, 0x9f, 0xb9, 0x88,
- 0x4f, 0xfa, 0x62, 0xf2, 0x1c, 0x92, 0xa5, 0x3a, 0x87, 0x53, 0x3f, 0xf7, 0x20, 0x53, 0x35, 0xcc,
- 0x47, 0xc8, 0x95, 0x70, 0xa7, 0x63, 0xb9, 0x1d, 0x64, 0xbb, 0x27, 0xfa, 0xca, 0x79, 0x47, 0x9c,
- 0xa3, 0x7c, 0x77, 0x9b, 0x6a, 0x48, 0x22, 0x3c, 0x80, 0x6d, 0xc2, 0x25, 0x9a, 0x8f, 0x6c, 0x3c,
- 0x6c, 0xa3, 0x46, 0x13, 0xad, 0x24, 0xdc, 0x81, 0x2d, 0x23, 0x0a, 0xa5, 0xac, 0xcb, 0x62, 0xa1,
- 0x00, 0x59, 0x42, 0xad, 0x22, 0x13, 0x59, 0x5d, 0x57, 0xac, 0x3b, 0xde, 0x34, 0x38, 0x89, 0x59,
- 0x68, 0x41, 0x46, 0x41, 0x8f, 0xdd, 0xf9, 0x53, 0x4c, 0x45, 0xe6, 0xe0, 0xc4, 0x28, 0xde, 0x86,
- 0xb3, 0x36, 0x7a, 0xec, 0x7a, 0x0f, 0x39, 0xbd, 0x87, 0xcc, 0x01, 0x7d, 0xe9, 0x85, 0xae, 0x83,
- 0x88, 0x5a, 0x50, 0x93, 0x36, 0xa1, 0xf6, 0x58, 0xff, 0xf7, 0x4d, 0x1c, 0x36, 0xe6, 0xc3, 0x81,
- 0xbd, 0x05, 0x57, 0xca, 0xa2, 0x26, 0xea, 0xda, 0x83, 0xaa, 0xac, 0x1f, 0x2a, 0x15, 0xa5, 0xa2,
- 0x55, 0xc4, 0xbd, 0xca, 0x43, 0xb9, 0xac, 0x1f, 0x2a, 0xb5, 0xaa, 0x2c, 0x55, 0xde, 0xaf, 0xc8,
- 0xe5, 0xf4, 0x1a, 0xb7, 0x35, 0x9e, 0xf0, 0xc9, 0x90, 0x88, 0xbd, 0x0a, 0x17, 0x02, 0x4b, 0x69,
- 0xaf, 0x22, 0x2b, 0x9a, 0x5e, 0xd3, 0x44, 0x4d, 0x4e, 0x33, 0x1c, 0x8c, 0x27, 0xfc, 0x3a, 0x91,
- 0xb1, 0x6f, 0xc0, 0x76, 0x08, 0x77, 0xa0, 0xd4, 0x64, 0xa5, 0x76, 0x58, 0xa3, 0xd0, 0x18, 0x77,
- 0x76, 0x3c, 0xe1, 0x13, 0x0b, 0x31, 0x5b, 0x00, 0x2e, 0x82, 0x56, 0x64, 0x49, 0xab, 0x1c, 0x28,
- 0x14, 0x7e, 0x8a, 0x4b, 0x8d, 0x27, 0x3c, 0x04, 0x72, 0x76, 0x07, 0x2e, 0x86, 0xf0, 0x77, 0x45,
- 0x45, 0x91, 0xf7, 0x28, 0x38, 0xce, 0x25, 0xc7, 0x13, 0xfe, 0x0c, 0x15, 0xb2, 0x6f, 0xc1, 0xa5,
- 0x00, 0x59, 0x15, 0xa5, 0xfb, 0xb2, 0xa6, 0x4b, 0x07, 0xfb, 0xfb, 0x15, 0x6d, 0x5f, 0x56, 0xb4,
- 0xf4, 0x69, 0x2e, 0x33, 0x9e, 0xf0, 0x69, 0xa2, 0x08, 0xe4, 0xec, 0xbb, 0xc0, 0x1f, 0x33, 0x13,
- 0xa5, 0xfb, 0xca, 0xc1, 0x47, 0x7b, 0x72, 0xf9, 0x8e, 0xec, 0xdb, 0xae, 0x73, 0xdb, 0xe3, 0x09,
- 0x7f, 0x9e, 0x68, 0x97, 0x94, 0xec, 0x3b, 0x2f, 0x21, 0x50, 0x65, 0x49, 0xae, 0x54, 0x35, 0x5d,
- 0x2c, 0xd5, 0x64, 0x45, 0x92, 0xd3, 0x67, 0xb8, 0xec, 0x78, 0xc2, 0x67, 0x88, 0x96, 0x2a, 0xa9,
- 0x8e, 0xbd, 0x09, 0x97, 0x03, 0x7b, 0x45, 0xfe, 0x58, 0xd3, 0x6b, 0xf2, 0x07, 0x87, 0x9e, 0xca,
- 0xa3, 0xf9, 0x30, 0xbd, 0x41, 0x02, 0xf7, 0x34, 0x73, 0x85, 0x27, 0x67, 0x79, 0x48, 0x07, 0x76,
- 0x77, 0x65, 0xb1, 0x2c, 0xab, 0xe9, 0x04, 0xa9, 0x0c, 0xd9, 0x71, 0xf1, 0x27, 0x3f, 0xe4, 0xd6,
- 0x4a, 0x9f, 0x3c, 0x7d, 0x9e, 0x63, 0x9e, 0x3d, 0xcf, 0x31, 0xbf, 0x3e, 0xcf, 0x31, 0xdf, 0xbe,
- 0xc8, 0xad, 0x3d, 0x7b, 0x91, 0x5b, 0xfb, 0xe5, 0x45, 0x6e, 0xed, 0xa1, 0xd4, 0xb4, 0xdc, 0x56,
- 0xbf, 0x5e, 0x30, 0x71, 0xa7, 0x68, 0x62, 0xa7, 0x83, 0x9d, 0xa2, 0x55, 0x37, 0xaf, 0x35, 0x71,
- 0xb1, 0x83, 0x1b, 0xfd, 0x36, 0x72, 0xc8, 0x2f, 0xd7, 0xb5, 0xf9, 0x3f, 0xd7, 0x9b, 0x37, 0xaf,
- 0x85, 0x7f, 0xbb, 0xbc, 0x7b, 0xc6, 0xa9, 0xaf, 0xfb, 0x03, 0xed, 0xc6, 0xef, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x36, 0x5b, 0x28, 0xf2, 0xa5, 0x0d, 0x00, 0x00,
+ proto.RegisterFile("ibc/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_6cc2ee18f7f86d4e)
+}
+
+var fileDescriptor_6cc2ee18f7f86d4e = []byte{
+ // 1364 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdf, 0x8e, 0xdb, 0x54,
+ 0x13, 0x5f, 0xa7, 0xe9, 0x76, 0x33, 0xd9, 0x66, 0xf3, 0xb9, 0x69, 0x9b, 0x75, 0xab, 0xc4, 0x9f,
+ 0x11, 0x65, 0x41, 0x34, 0x61, 0x8b, 0xa8, 0x50, 0x85, 0x00, 0xc7, 0x31, 0x34, 0xed, 0xae, 0x37,
+ 0x38, 0x5e, 0xa0, 0x15, 0xc8, 0x72, 0x9c, 0xb3, 0x89, 0xd5, 0xc4, 0x27, 0xc4, 0x4e, 0xd2, 0x20,
+ 0x21, 0x21, 0xae, 0x4a, 0xc4, 0x05, 0x2f, 0x10, 0x09, 0x81, 0x78, 0x15, 0xe0, 0xb2, 0xdc, 0x71,
+ 0x15, 0x50, 0xfb, 0x06, 0x79, 0x02, 0x64, 0x9f, 0x93, 0xd8, 0xce, 0xee, 0x66, 0xc5, 0xbf, 0xbb,
+ 0x73, 0x66, 0x7e, 0xf3, 0x9b, 0x39, 0x33, 0xe3, 0x39, 0xc7, 0xb0, 0x6b, 0xd5, 0xcd, 0x62, 0xdb,
+ 0x6a, 0xb6, 0x5c, 0xb3, 0x6d, 0x21, 0xdb, 0x75, 0x8a, 0x0e, 0x6e, 0xe3, 0x8e, 0x61, 0xb6, 0x2c,
+ 0x1b, 0x15, 0x07, 0xbb, 0xe1, 0x6d, 0xa1, 0xdb, 0xc3, 0x2e, 0x66, 0xf3, 0x56, 0xdd, 0x2c, 0x84,
+ 0x4d, 0x0a, 0x61, 0xcc, 0x60, 0x97, 0x7b, 0xc9, 0xe3, 0x34, 0x71, 0x0f, 0x15, 0x4d, 0x6c, 0xdb,
+ 0xc8, 0x74, 0x2d, 0x6c, 0x7b, 0x54, 0xc1, 0x8e, 0x30, 0x71, 0xff, 0x0f, 0x80, 0x2d, 0xc3, 0xb6,
+ 0x51, 0xdb, 0x47, 0x91, 0x25, 0x85, 0x64, 0x9a, 0xb8, 0x89, 0xfd, 0x65, 0xd1, 0x5b, 0x51, 0xe9,
+ 0x76, 0x13, 0xe3, 0x66, 0x1b, 0x15, 0xfd, 0x5d, 0xbd, 0x7f, 0x54, 0x34, 0xec, 0x11, 0x51, 0x09,
+ 0xbf, 0xc6, 0x20, 0x29, 0xf9, 0x71, 0xd5, 0x5c, 0xc3, 0x45, 0x2c, 0x07, 0x1b, 0x0e, 0xfa, 0xac,
+ 0x8f, 0x6c, 0x13, 0x65, 0x19, 0x9e, 0xd9, 0x89, 0xab, 0x8b, 0x3d, 0x2b, 0xc1, 0xd6, 0x51, 0x0f,
+ 0x7f, 0x8e, 0x6c, 0x7d, 0x01, 0x89, 0x79, 0x90, 0x12, 0x37, 0x9b, 0xe6, 0xaf, 0x8c, 0x8c, 0x4e,
+ 0xfb, 0x8e, 0xb0, 0x04, 0x10, 0xd4, 0x14, 0x91, 0xd4, 0xe6, 0x24, 0x2e, 0x6c, 0x99, 0xd8, 0x76,
+ 0x90, 0xed, 0xf4, 0x1d, 0xdd, 0xf1, 0x7c, 0x66, 0xcf, 0xf1, 0xcc, 0x4e, 0xf2, 0x56, 0xb1, 0x70,
+ 0x46, 0xa2, 0x0a, 0xd2, 0xdc, 0xce, 0x0f, 0x35, 0xec, 0x75, 0x89, 0x51, 0x50, 0x53, 0x66, 0x04,
+ 0xcb, 0x22, 0xb8, 0x66, 0xb4, 0xdb, 0x78, 0xa8, 0xf7, 0xbb, 0x0d, 0xc3, 0x45, 0xba, 0x71, 0xe4,
+ 0xa2, 0x9e, 0xde, 0xed, 0xe1, 0x2e, 0x76, 0x8c, 0x76, 0x36, 0xce, 0x33, 0x3b, 0x1b, 0xa5, 0x1b,
+ 0xb3, 0x69, 0x5e, 0x20, 0x84, 0x2b, 0xc0, 0x82, 0x9a, 0xf5, 0xb5, 0x87, 0xbe, 0x52, 0xf4, 0x74,
+ 0x55, 0xaa, 0xba, 0x13, 0x7f, 0xf2, 0x5d, 0x7e, 0x4d, 0xf8, 0x9e, 0x81, 0x54, 0x34, 0x56, 0xf6,
+ 0x1e, 0x40, 0xb7, 0x5f, 0x6f, 0x5b, 0xa6, 0xfe, 0x08, 0x8d, 0xfc, 0xc4, 0x26, 0x6f, 0x65, 0x0a,
+ 0xa4, 0x2c, 0x85, 0x79, 0x59, 0x0a, 0xa2, 0x3d, 0x2a, 0x5d, 0x9e, 0x4d, 0xf3, 0xff, 0x23, 0x41,
+ 0x04, 0x16, 0x82, 0x9a, 0x20, 0x9b, 0xfb, 0x68, 0xc4, 0xf2, 0x90, 0x6c, 0x58, 0x03, 0xd4, 0x73,
+ 0xac, 0x23, 0x0b, 0xf5, 0xfc, 0x12, 0x24, 0xd4, 0xb0, 0x88, 0xbd, 0x0e, 0x09, 0xd7, 0xea, 0x20,
+ 0xc7, 0x35, 0x3a, 0x5d, 0x3f, 0xbb, 0x71, 0x35, 0x10, 0xd0, 0x20, 0xbf, 0x8a, 0xc1, 0xfa, 0x5d,
+ 0x64, 0x34, 0x50, 0x6f, 0x65, 0xcd, 0x23, 0x54, 0xb1, 0x25, 0x2a, 0x4f, 0xeb, 0x58, 0x4d, 0xdb,
+ 0x70, 0xfb, 0x3d, 0x52, 0xc6, 0x4d, 0x35, 0x10, 0xb0, 0x87, 0x90, 0xb2, 0xd1, 0x50, 0x0f, 0x1d,
+ 0x3c, 0xbe, 0xe2, 0xe0, 0xdb, 0xb3, 0x69, 0xfe, 0x32, 0x39, 0x78, 0xd4, 0x4a, 0x50, 0x37, 0x6d,
+ 0x34, 0xac, 0x2e, 0xce, 0x2f, 0xc1, 0x96, 0x07, 0x08, 0xe7, 0xe0, 0xbc, 0x97, 0x83, 0x70, 0x43,
+ 0x2c, 0x01, 0x04, 0xd5, 0x8b, 0xa4, 0x1c, 0x08, 0x68, 0x12, 0x7e, 0x8e, 0xc1, 0xe6, 0xbe, 0xe5,
+ 0xd4, 0x51, 0xcb, 0x18, 0x58, 0xb8, 0xdf, 0x63, 0x77, 0x21, 0x41, 0x9a, 0x4f, 0xb7, 0x1a, 0x7e,
+ 0x2e, 0x12, 0xa5, 0xcc, 0x6c, 0x9a, 0x4f, 0xd3, 0x36, 0x9b, 0xab, 0x04, 0x75, 0x83, 0xac, 0x2b,
+ 0x8d, 0x48, 0xf6, 0x62, 0x4b, 0xd9, 0xeb, 0xc2, 0xc5, 0x45, 0x3a, 0x74, 0x6c, 0xcf, 0x5b, 0x7d,
+ 0xf7, 0xcc, 0x56, 0xaf, 0xcd, 0xad, 0x44, 0xbb, 0x51, 0x36, 0x5c, 0xa3, 0x94, 0x9d, 0x4d, 0xf3,
+ 0x19, 0x12, 0x45, 0x84, 0x51, 0x50, 0x37, 0x17, 0xfb, 0x03, 0x7b, 0xc9, 0xa3, 0x3b, 0xc4, 0x34,
+ 0xe5, 0xff, 0x96, 0x47, 0x77, 0x88, 0xc3, 0x1e, 0xb5, 0x21, 0xa6, 0x99, 0xfc, 0x89, 0x81, 0xf4,
+ 0x32, 0x45, 0xb4, 0x3d, 0x98, 0xe5, 0xf6, 0xf8, 0x04, 0x12, 0x0d, 0xc3, 0x35, 0x74, 0x77, 0xd4,
+ 0x25, 0x99, 0x4b, 0xdd, 0x7a, 0xf9, 0xcc, 0x30, 0x3d, 0x5e, 0x6d, 0xd4, 0x45, 0xe1, 0xb2, 0x2c,
+ 0x58, 0x04, 0x75, 0xa3, 0x41, 0xf5, 0x2c, 0x0b, 0x71, 0x6f, 0x4d, 0xbb, 0xd2, 0x5f, 0x47, 0x9b,
+ 0x39, 0x7e, 0xf2, 0x77, 0xf1, 0x25, 0x03, 0x59, 0x6d, 0x2e, 0x43, 0x8d, 0xc5, 0x99, 0xfc, 0x03,
+ 0xbd, 0x0b, 0xa9, 0x20, 0x17, 0x3e, 0xbd, 0x7f, 0xaa, 0x70, 0xef, 0x46, 0xf5, 0x82, 0x1a, 0x94,
+ 0xa3, 0x7c, 0x2c, 0x84, 0xd8, 0xc9, 0x21, 0xfc, 0xce, 0x40, 0xc2, 0xf3, 0x5b, 0x1a, 0xb9, 0xc8,
+ 0xf9, 0x07, 0x5f, 0xe7, 0xd2, 0xa0, 0x38, 0x77, 0x7c, 0x50, 0x44, 0x4a, 0x10, 0xff, 0xaf, 0x4a,
+ 0x70, 0x3e, 0x28, 0x01, 0x3d, 0xe1, 0x8f, 0x0c, 0x00, 0x19, 0x3e, 0x7e, 0x52, 0xf6, 0x20, 0x49,
+ 0x3f, 0xf9, 0x33, 0xc7, 0xe3, 0x95, 0xd9, 0x34, 0xcf, 0x46, 0xa6, 0x04, 0x9d, 0x8f, 0x64, 0x44,
+ 0x9c, 0x32, 0x1f, 0x62, 0x7f, 0x73, 0x3e, 0x7c, 0x01, 0x5b, 0xa1, 0xcb, 0xd1, 0x8f, 0x95, 0x85,
+ 0x78, 0xd7, 0x70, 0x5b, 0xb4, 0x9d, 0xfd, 0x35, 0x5b, 0x85, 0x4d, 0x3a, 0x1a, 0xc8, 0x85, 0x16,
+ 0x5b, 0x71, 0x80, 0xab, 0xb3, 0x69, 0xfe, 0x52, 0x64, 0x9c, 0xd0, 0x2b, 0x2b, 0x69, 0x06, 0x9e,
+ 0xa8, 0xfb, 0xaf, 0x19, 0x60, 0xa3, 0x17, 0xc9, 0xa9, 0x21, 0x3c, 0x38, 0x7e, 0xad, 0xae, 0x8a,
+ 0xe2, 0x2f, 0xdc, 0x9d, 0x34, 0x96, 0x01, 0x5c, 0x92, 0x16, 0x0f, 0x92, 0xd5, 0xb1, 0xc8, 0x00,
+ 0xc1, 0xdb, 0x85, 0x86, 0xf1, 0xa2, 0xdf, 0x56, 0xde, 0xe3, 0xa5, 0x10, 0x7a, 0xd7, 0x90, 0x4b,
+ 0x9d, 0xee, 0x64, 0xbb, 0xa1, 0x86, 0x0c, 0xa9, 0xdf, 0x06, 0xa4, 0x25, 0xf2, 0xc4, 0x59, 0xed,
+ 0xf4, 0x36, 0x5c, 0xa0, 0x4f, 0x21, 0xea, 0xf1, 0x7a, 0xc8, 0x23, 0x7d, 0x23, 0x79, 0xee, 0xc8,
+ 0x52, 0x9d, 0x83, 0xa9, 0x97, 0x7b, 0x90, 0xa9, 0x1a, 0xe6, 0x23, 0xe4, 0x4a, 0xb8, 0xd3, 0xb1,
+ 0xdc, 0x0e, 0xb2, 0xdd, 0x53, 0x3d, 0xe5, 0xbc, 0xe3, 0xcd, 0x51, 0xbe, 0xb3, 0x4d, 0x35, 0x24,
+ 0x11, 0x1e, 0xc0, 0x36, 0xe1, 0x12, 0xcd, 0x47, 0x36, 0x1e, 0xb6, 0x51, 0xa3, 0x89, 0x56, 0x12,
+ 0xee, 0xc0, 0x96, 0x11, 0x85, 0x52, 0xd6, 0x65, 0xb1, 0x50, 0x80, 0x2c, 0xa1, 0x56, 0x91, 0x89,
+ 0xac, 0xae, 0x2b, 0xd6, 0x1d, 0x6f, 0x0e, 0x9c, 0xc6, 0x2c, 0xb4, 0x20, 0xa3, 0xa0, 0xc7, 0xee,
+ 0xfc, 0xf1, 0xa5, 0x22, 0x73, 0x70, 0x6a, 0x14, 0x6f, 0xc1, 0x45, 0x1b, 0x3d, 0x76, 0xbd, 0xa7,
+ 0x9b, 0xde, 0x43, 0xe6, 0x80, 0xbe, 0xed, 0x42, 0xd7, 0x40, 0x44, 0x2d, 0xa8, 0x49, 0x9b, 0x50,
+ 0x7b, 0xac, 0xaf, 0x7c, 0x13, 0x87, 0x8d, 0xf9, 0x60, 0x60, 0xdf, 0x84, 0x17, 0xca, 0xa2, 0x26,
+ 0xea, 0xda, 0x83, 0xaa, 0xac, 0x1f, 0x2a, 0x15, 0xa5, 0xa2, 0x55, 0xc4, 0xbd, 0xca, 0x43, 0xb9,
+ 0xac, 0x1f, 0x2a, 0xb5, 0xaa, 0x2c, 0x55, 0xde, 0xab, 0xc8, 0xe5, 0xf4, 0x1a, 0xb7, 0x35, 0x9e,
+ 0xf0, 0xc9, 0x90, 0x88, 0xbd, 0x01, 0x57, 0x02, 0x4b, 0x69, 0xaf, 0x22, 0x2b, 0x9a, 0x5e, 0xd3,
+ 0x44, 0x4d, 0x4e, 0x33, 0x1c, 0x8c, 0x27, 0xfc, 0x3a, 0x91, 0xb1, 0xaf, 0xc2, 0x76, 0x08, 0x77,
+ 0xa0, 0xd4, 0x64, 0xa5, 0x76, 0x58, 0xa3, 0xd0, 0x18, 0x77, 0x71, 0x3c, 0xe1, 0x13, 0x0b, 0x31,
+ 0x5b, 0x00, 0x2e, 0x82, 0x56, 0x64, 0x49, 0xab, 0x1c, 0x28, 0x14, 0x7e, 0x8e, 0x4b, 0x8d, 0x27,
+ 0x3c, 0x04, 0x72, 0x76, 0x07, 0xae, 0x86, 0xf0, 0x77, 0x45, 0x45, 0x91, 0xf7, 0x28, 0x38, 0xce,
+ 0x25, 0xc7, 0x13, 0xfe, 0x02, 0x15, 0xb2, 0x6f, 0xc0, 0xb5, 0x00, 0x59, 0x15, 0xa5, 0xfb, 0xb2,
+ 0xa6, 0x4b, 0x07, 0xfb, 0xfb, 0x15, 0x6d, 0x5f, 0x56, 0xb4, 0xf4, 0x79, 0x2e, 0x33, 0x9e, 0xf0,
+ 0x69, 0xa2, 0x08, 0xe4, 0xec, 0x3b, 0xc0, 0x1f, 0x33, 0x13, 0xa5, 0xfb, 0xca, 0xc1, 0x47, 0x7b,
+ 0x72, 0xf9, 0x7d, 0xd9, 0xb7, 0x5d, 0xe7, 0xb6, 0xc7, 0x13, 0xfe, 0x32, 0xd1, 0x2e, 0x29, 0xd9,
+ 0xb7, 0x4f, 0x20, 0x50, 0x65, 0x49, 0xae, 0x54, 0x35, 0x5d, 0x2c, 0xd5, 0x64, 0x45, 0x92, 0xd3,
+ 0x17, 0xb8, 0xec, 0x78, 0xc2, 0x67, 0x88, 0x96, 0x2a, 0xa9, 0x8e, 0xbd, 0x0d, 0xd7, 0x03, 0x7b,
+ 0x45, 0xfe, 0x58, 0xd3, 0x6b, 0xf2, 0x07, 0x87, 0x9e, 0xca, 0xa3, 0xf9, 0x30, 0xbd, 0x41, 0x02,
+ 0xf7, 0x34, 0x73, 0x85, 0x27, 0x67, 0x79, 0x48, 0x07, 0x76, 0x77, 0x65, 0xb1, 0x2c, 0xab, 0xe9,
+ 0x04, 0xa9, 0x0c, 0xd9, 0x71, 0xf1, 0x27, 0x3f, 0xe4, 0xd6, 0x4a, 0x9f, 0xfe, 0xf2, 0x2c, 0xc7,
+ 0x3c, 0x7d, 0x96, 0x63, 0xfe, 0x78, 0x96, 0x63, 0xbe, 0x7d, 0x9e, 0x5b, 0x7b, 0xfa, 0x3c, 0xb7,
+ 0xf6, 0xdb, 0xf3, 0xdc, 0xda, 0x43, 0xa9, 0x69, 0xb9, 0xad, 0x7e, 0xbd, 0x60, 0xe2, 0x4e, 0xd1,
+ 0xc4, 0x4e, 0x07, 0x3b, 0x45, 0xab, 0x6e, 0xde, 0x6c, 0xe2, 0x62, 0x07, 0x37, 0xfa, 0x6d, 0xe4,
+ 0x90, 0xdf, 0xab, 0x9b, 0xf3, 0xff, 0xab, 0xd7, 0x6e, 0xdf, 0x0c, 0xff, 0x62, 0x79, 0x77, 0x8c,
+ 0x53, 0x5f, 0xf7, 0x87, 0xd9, 0xeb, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xbe, 0x52, 0x62,
+ 0x8f, 0x0d, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go
index fac455d6..84a79b66 100644
--- a/modules/light-clients/07-tendermint/types/tendermint.pb.go
+++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/lightclients/tendermint/v1/tendermint.proto
+// source: ibc/lightclients/tendermint/v1/tendermint.proto
package types
@@ -71,7 +71,7 @@ func (m *ClientState) Reset() { *m = ClientState{} }
func (m *ClientState) String() string { return proto.CompactTextString(m) }
func (*ClientState) ProtoMessage() {}
func (*ClientState) Descriptor() ([]byte, []int) {
- return fileDescriptor_868940ee8c1cf959, []int{0}
+ return fileDescriptor_c6d6cf2b288949be, []int{0}
}
func (m *ClientState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -114,7 +114,7 @@ func (m *ConsensusState) Reset() { *m = ConsensusState{} }
func (m *ConsensusState) String() string { return proto.CompactTextString(m) }
func (*ConsensusState) ProtoMessage() {}
func (*ConsensusState) Descriptor() ([]byte, []int) {
- return fileDescriptor_868940ee8c1cf959, []int{1}
+ return fileDescriptor_c6d6cf2b288949be, []int{1}
}
func (m *ConsensusState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -155,7 +155,7 @@ func (m *Misbehaviour) Reset() { *m = Misbehaviour{} }
func (m *Misbehaviour) String() string { return proto.CompactTextString(m) }
func (*Misbehaviour) ProtoMessage() {}
func (*Misbehaviour) Descriptor() ([]byte, []int) {
- return fileDescriptor_868940ee8c1cf959, []int{2}
+ return fileDescriptor_c6d6cf2b288949be, []int{2}
}
func (m *Misbehaviour) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -207,7 +207,7 @@ func (m *Header) Reset() { *m = Header{} }
func (m *Header) String() string { return proto.CompactTextString(m) }
func (*Header) ProtoMessage() {}
func (*Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_868940ee8c1cf959, []int{3}
+ return fileDescriptor_c6d6cf2b288949be, []int{3}
}
func (m *Header) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -268,7 +268,7 @@ func (m *Fraction) Reset() { *m = Fraction{} }
func (m *Fraction) String() string { return proto.CompactTextString(m) }
func (*Fraction) ProtoMessage() {}
func (*Fraction) Descriptor() ([]byte, []int) {
- return fileDescriptor_868940ee8c1cf959, []int{4}
+ return fileDescriptor_c6d6cf2b288949be, []int{4}
}
func (m *Fraction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -312,87 +312,87 @@ func (m *Fraction) GetDenominator() uint64 {
}
func init() {
- proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.tendermint.v1.ClientState")
- proto.RegisterType((*ConsensusState)(nil), "ibcgo.lightclients.tendermint.v1.ConsensusState")
- proto.RegisterType((*Misbehaviour)(nil), "ibcgo.lightclients.tendermint.v1.Misbehaviour")
- proto.RegisterType((*Header)(nil), "ibcgo.lightclients.tendermint.v1.Header")
- proto.RegisterType((*Fraction)(nil), "ibcgo.lightclients.tendermint.v1.Fraction")
+ proto.RegisterType((*ClientState)(nil), "ibc.lightclients.tendermint.v1.ClientState")
+ proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.tendermint.v1.ConsensusState")
+ proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.tendermint.v1.Misbehaviour")
+ proto.RegisterType((*Header)(nil), "ibc.lightclients.tendermint.v1.Header")
+ proto.RegisterType((*Fraction)(nil), "ibc.lightclients.tendermint.v1.Fraction")
}
func init() {
- proto.RegisterFile("ibcgo/lightclients/tendermint/v1/tendermint.proto", fileDescriptor_868940ee8c1cf959)
+ proto.RegisterFile("ibc/lightclients/tendermint/v1/tendermint.proto", fileDescriptor_c6d6cf2b288949be)
}
-var fileDescriptor_868940ee8c1cf959 = []byte{
- // 1088 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
- 0x14, 0x6e, 0xda, 0xb2, 0x4d, 0x26, 0xe9, 0xb6, 0x78, 0x4b, 0x37, 0x2d, 0xd9, 0x38, 0x98, 0x15,
- 0x0a, 0x2b, 0xd5, 0x26, 0x59, 0x24, 0xa4, 0x1e, 0x90, 0x70, 0x17, 0xd4, 0x22, 0x56, 0xaa, 0x5c,
- 0x7e, 0x48, 0x08, 0x64, 0x4d, 0xec, 0x89, 0x33, 0x5a, 0xdb, 0x63, 0x3c, 0xe3, 0xd0, 0xf2, 0x17,
- 0xc0, 0x6d, 0xb9, 0x71, 0xe0, 0xc0, 0x89, 0xbf, 0x65, 0x8f, 0x3d, 0x72, 0x32, 0xa8, 0xbd, 0x73,
- 0xc8, 0x91, 0x13, 0xf2, 0xcc, 0xd8, 0x99, 0xb4, 0x5d, 0x95, 0xe5, 0x12, 0xcd, 0x7b, 0xef, 0x7b,
- 0xdf, 0x97, 0x79, 0xf3, 0xe6, 0x8d, 0xc1, 0x00, 0x8f, 0xbc, 0x80, 0x58, 0x21, 0x0e, 0x26, 0xcc,
- 0x0b, 0x31, 0x8a, 0x19, 0xb5, 0x18, 0x8a, 0x7d, 0x94, 0x46, 0x38, 0x66, 0xd6, 0x74, 0xa0, 0x58,
- 0x66, 0x92, 0x12, 0x46, 0xb4, 0x1e, 0x4f, 0x31, 0xd5, 0x14, 0x53, 0x01, 0x4d, 0x07, 0xbb, 0x3d,
- 0x85, 0x81, 0x9d, 0x25, 0x88, 0x5a, 0x53, 0x18, 0x62, 0x1f, 0x32, 0x92, 0x0a, 0x8e, 0xdd, 0xce,
- 0x35, 0x04, 0xff, 0x95, 0xd1, 0x7b, 0x1e, 0x89, 0xc7, 0x98, 0x58, 0x49, 0x4a, 0xc8, 0xb8, 0x74,
- 0x76, 0x03, 0x42, 0x82, 0x10, 0x59, 0xdc, 0x1a, 0x65, 0x63, 0xcb, 0xcf, 0x52, 0xc8, 0x30, 0x89,
- 0x65, 0x5c, 0xbf, 0x1a, 0x67, 0x38, 0x42, 0x94, 0xc1, 0x28, 0x91, 0x80, 0xb7, 0xc4, 0x56, 0x3d,
- 0x92, 0x22, 0x4b, 0xfc, 0xef, 0x62, 0x7b, 0x62, 0x25, 0x21, 0xef, 0xaa, 0x10, 0x12, 0x45, 0x98,
- 0x45, 0x25, 0xac, 0xb2, 0x24, 0x74, 0x2b, 0x20, 0x01, 0xe1, 0x4b, 0xab, 0x58, 0x09, 0xaf, 0xf1,
- 0xf7, 0x1a, 0x68, 0x1e, 0x70, 0xc6, 0x13, 0x06, 0x19, 0xd2, 0x76, 0x40, 0xdd, 0x9b, 0x40, 0x1c,
- 0xbb, 0xd8, 0x6f, 0xd7, 0x7a, 0xb5, 0x7e, 0xc3, 0x59, 0xe3, 0xf6, 0x91, 0xaf, 0x05, 0xa0, 0xc9,
- 0xd2, 0x8c, 0x32, 0x37, 0x44, 0x53, 0x14, 0xb6, 0x97, 0x7b, 0xb5, 0x7e, 0x73, 0xf8, 0xc8, 0xbc,
- 0xad, 0xb8, 0xe6, 0x27, 0x29, 0xf4, 0x8a, 0x6d, 0xdb, 0xbb, 0x2f, 0x72, 0x7d, 0x69, 0x96, 0xeb,
- 0xda, 0x19, 0x8c, 0xc2, 0x7d, 0x43, 0x21, 0x33, 0x1c, 0xc0, 0xad, 0xcf, 0x0a, 0x43, 0x1b, 0x83,
- 0x0d, 0x6e, 0xe1, 0x38, 0x70, 0x13, 0x94, 0x62, 0xe2, 0xb7, 0x57, 0xb8, 0xd8, 0x8e, 0x29, 0x4a,
- 0x66, 0x96, 0x25, 0x33, 0x9f, 0xc8, 0x92, 0xda, 0x86, 0xe4, 0xde, 0x56, 0xb8, 0xe7, 0xf9, 0xc6,
- 0x2f, 0x7f, 0xea, 0x35, 0xe7, 0x6e, 0xe9, 0x3d, 0xe6, 0x4e, 0x0d, 0x83, 0xcd, 0x2c, 0x1e, 0x91,
- 0xd8, 0x57, 0x84, 0x56, 0x6f, 0x13, 0x7a, 0x5b, 0x0a, 0xdd, 0x17, 0x42, 0x57, 0x09, 0x84, 0xd2,
- 0x46, 0xe5, 0x96, 0x52, 0x08, 0x6c, 0x44, 0xf0, 0xd4, 0xf5, 0x42, 0xe2, 0x3d, 0x73, 0xfd, 0x14,
- 0x8f, 0x59, 0xfb, 0xb5, 0x57, 0xdc, 0xd2, 0x95, 0x7c, 0x21, 0xb4, 0x1e, 0xc1, 0xd3, 0x83, 0xc2,
- 0xf9, 0xa4, 0xf0, 0x69, 0x2e, 0x58, 0x1f, 0xa7, 0xe4, 0x07, 0x14, 0xbb, 0x13, 0x54, 0x1c, 0x48,
- 0xfb, 0x0e, 0x17, 0xe9, 0xc8, 0x43, 0x2a, 0xda, 0xc4, 0x94, 0xfd, 0x33, 0x1d, 0x98, 0x87, 0x1c,
- 0x63, 0x77, 0xa4, 0xce, 0x96, 0xd0, 0x59, 0x20, 0x30, 0x9c, 0x96, 0xb0, 0x05, 0xb6, 0x10, 0x08,
- 0x21, 0x43, 0x94, 0x95, 0x02, 0x6b, 0xaf, 0x2e, 0xb0, 0x40, 0x60, 0x38, 0x2d, 0x61, 0x4b, 0x81,
- 0x23, 0xd0, 0xe4, 0x97, 0xc8, 0xa5, 0x09, 0xf2, 0x68, 0xbb, 0xde, 0x5b, 0xe9, 0x37, 0x87, 0x9b,
- 0x26, 0xf6, 0xe8, 0xf0, 0xb1, 0x79, 0x5c, 0x44, 0x4e, 0x12, 0xe4, 0xd9, 0xdb, 0xf3, 0x36, 0x52,
- 0xe0, 0x86, 0x03, 0x92, 0x12, 0x42, 0xb5, 0x7d, 0xd0, 0xca, 0x92, 0x20, 0x85, 0x3e, 0x72, 0x13,
- 0xc8, 0x26, 0xed, 0x46, 0x6f, 0xa5, 0xdf, 0xb0, 0xef, 0xcf, 0x72, 0xfd, 0x9e, 0x3c, 0x3b, 0x25,
- 0x6a, 0x38, 0x4d, 0x69, 0x1e, 0x43, 0x36, 0xd1, 0x5c, 0xb0, 0x03, 0xc3, 0x90, 0x7c, 0xef, 0x66,
- 0x89, 0x0f, 0x19, 0x72, 0xe1, 0x98, 0xa1, 0xd4, 0x45, 0xa7, 0x09, 0x4e, 0xcf, 0xda, 0xa0, 0x57,
- 0xeb, 0xd7, 0xed, 0x87, 0xb3, 0x5c, 0xef, 0x09, 0xa2, 0x97, 0x42, 0x0d, 0x67, 0x9b, 0xc7, 0xbe,
- 0xe0, 0xa1, 0x8f, 0x8a, 0xc8, 0xc7, 0x3c, 0xa0, 0x7d, 0x07, 0xf4, 0x1b, 0xb2, 0x22, 0x4c, 0x47,
- 0x68, 0x02, 0xa7, 0x98, 0x64, 0x69, 0xbb, 0xc9, 0x65, 0x1e, 0xcd, 0x72, 0xfd, 0x9d, 0x97, 0xca,
- 0xa8, 0x09, 0x86, 0xd3, 0xb9, 0x2a, 0xf6, 0x54, 0x09, 0xef, 0xaf, 0xfe, 0xf8, 0x9b, 0xbe, 0x64,
- 0xfc, 0xbe, 0x0c, 0xee, 0x1e, 0x90, 0x98, 0xa2, 0x98, 0x66, 0x54, 0xdc, 0x79, 0x1b, 0x34, 0xaa,
- 0xd1, 0xc3, 0x2f, 0x7d, 0x73, 0xb8, 0x7b, 0xad, 0x2d, 0x3f, 0x2f, 0x11, 0x76, 0xbd, 0x38, 0xce,
- 0xe7, 0x45, 0xf7, 0xcd, 0xd3, 0xb4, 0x0f, 0xc1, 0x6a, 0x4a, 0x08, 0x93, 0x53, 0xe1, 0xe1, 0x42,
- 0x3f, 0xcc, 0x27, 0xd1, 0x74, 0x60, 0x3e, 0x45, 0xe9, 0xb3, 0x10, 0x39, 0x84, 0x30, 0x7b, 0xb5,
- 0x20, 0x72, 0x78, 0x9e, 0xf6, 0x53, 0x0d, 0x6c, 0xc5, 0xe8, 0x94, 0xb9, 0xd5, 0xe0, 0xa5, 0xee,
- 0x04, 0xd2, 0x09, 0xbf, 0xf9, 0x2d, 0xfb, 0xab, 0x59, 0xae, 0xbf, 0x29, 0xaa, 0x70, 0x13, 0xca,
- 0xf8, 0x27, 0xd7, 0xdf, 0x0f, 0x30, 0x9b, 0x64, 0xa3, 0x42, 0x4e, 0x7d, 0x10, 0x94, 0x65, 0x88,
- 0x47, 0xd4, 0x1a, 0x9d, 0x31, 0x44, 0xcd, 0x43, 0x74, 0x6a, 0x17, 0x0b, 0x47, 0x2b, 0xe8, 0xbe,
- 0xac, 0xd8, 0x0e, 0x21, 0x9d, 0xc8, 0x42, 0xfd, 0xbc, 0x0c, 0x5a, 0x6a, 0xfd, 0xb4, 0x01, 0x68,
- 0x88, 0xd6, 0xae, 0x66, 0xa3, 0xbd, 0x35, 0xcb, 0xf5, 0x4d, 0xf1, 0xb7, 0xaa, 0x90, 0xe1, 0xd4,
- 0xc5, 0xfa, 0xc8, 0xd7, 0x3c, 0x50, 0x9f, 0x20, 0xe8, 0xa3, 0xd4, 0x1d, 0xc8, 0xca, 0xf4, 0x6f,
- 0x9f, 0x97, 0x87, 0x3c, 0xc3, 0xee, 0x5e, 0xe4, 0xfa, 0x9a, 0x58, 0x0f, 0x66, 0xb9, 0xbe, 0x21,
- 0x64, 0x4a, 0x3a, 0xc3, 0x59, 0x13, 0xcb, 0x81, 0x22, 0x32, 0x94, 0x73, 0xf2, 0x7f, 0x89, 0x0c,
- 0xaf, 0x89, 0x0c, 0x2b, 0x91, 0xa1, 0xac, 0xc9, 0xaf, 0x2b, 0xe0, 0x8e, 0x40, 0x6b, 0x10, 0xac,
- 0x53, 0x1c, 0xc4, 0xc8, 0x77, 0x05, 0x44, 0x36, 0x4e, 0x57, 0xd5, 0x11, 0x4f, 0xe4, 0x09, 0x87,
- 0x49, 0xc1, 0xce, 0x79, 0xae, 0xd7, 0xe6, 0xb3, 0x60, 0x81, 0xc2, 0x70, 0x5a, 0x54, 0xc1, 0x6a,
- 0xdf, 0x82, 0xf5, 0xea, 0x9c, 0x5d, 0x8a, 0xca, 0xe6, 0xba, 0x41, 0xa2, 0x3a, 0xc0, 0x13, 0xc4,
- 0xec, 0xf6, 0x9c, 0x7e, 0x21, 0xdd, 0x70, 0x5a, 0x53, 0x05, 0xa7, 0x8d, 0x80, 0x78, 0x10, 0xb8,
- 0x3e, 0x1f, 0x66, 0x2b, 0xff, 0x61, 0x98, 0x3d, 0x90, 0xc3, 0xec, 0x0d, 0xe5, 0xa1, 0xa9, 0x18,
- 0x0c, 0x67, 0x5d, 0x3a, 0xe4, 0x38, 0x0b, 0x81, 0x56, 0x22, 0xe6, 0x2d, 0x2b, 0x1f, 0x99, 0xdb,
- 0xf6, 0xf1, 0x60, 0x96, 0xeb, 0x3b, 0x8b, 0x2a, 0x73, 0x0e, 0xc3, 0x79, 0x5d, 0x3a, 0xe7, 0xcd,
- 0x6b, 0x7c, 0x0a, 0xea, 0xe5, 0x63, 0xab, 0x75, 0x40, 0x23, 0xce, 0x22, 0x94, 0x16, 0x11, 0x7e,
- 0x36, 0xab, 0xce, 0xdc, 0xa1, 0xf5, 0x40, 0xd3, 0x47, 0x31, 0x89, 0x70, 0xcc, 0xe3, 0xcb, 0x3c,
- 0xae, 0xba, 0xec, 0x6f, 0x5e, 0x5c, 0x74, 0x6b, 0xe7, 0x17, 0xdd, 0xda, 0x5f, 0x17, 0xdd, 0xda,
- 0xf3, 0xcb, 0xee, 0xd2, 0xf9, 0x65, 0x77, 0xe9, 0x8f, 0xcb, 0xee, 0xd2, 0xd7, 0xb6, 0x72, 0xd1,
- 0x3c, 0x42, 0x23, 0x42, 0x2d, 0x3c, 0xf2, 0xf6, 0x02, 0x62, 0x45, 0xc4, 0xcf, 0x42, 0x44, 0xc5,
- 0xc7, 0xd9, 0x5e, 0xf9, 0x75, 0xf6, 0xde, 0x07, 0x7b, 0x57, 0x3f, 0x9e, 0x46, 0x77, 0xf8, 0x5c,
- 0x79, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xd6, 0x80, 0x90, 0xce, 0x09, 0x00, 0x00,
+var fileDescriptor_c6d6cf2b288949be = []byte{
+ // 1084 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcf, 0x6f, 0xe3, 0xc4,
+ 0x17, 0x6f, 0xda, 0x7e, 0xb7, 0xc9, 0x24, 0xdd, 0xf6, 0xeb, 0x2d, 0xdd, 0xb4, 0x74, 0xe3, 0xc8,
+ 0xa0, 0x25, 0x42, 0xaa, 0x4d, 0xb2, 0x48, 0x48, 0x15, 0x17, 0xdc, 0x05, 0xb5, 0x88, 0x95, 0x2a,
+ 0x97, 0x1f, 0x12, 0x02, 0x99, 0x89, 0x3d, 0x49, 0x46, 0x6b, 0x7b, 0x8c, 0x67, 0x12, 0x5a, 0xfe,
+ 0x02, 0x38, 0x20, 0xed, 0x11, 0x71, 0xe2, 0xc0, 0x1f, 0xb3, 0xc7, 0x1e, 0x39, 0x19, 0xd4, 0x5e,
+ 0x38, 0xe7, 0xc8, 0x09, 0xcd, 0x0f, 0xdb, 0xd3, 0x6c, 0x97, 0x6a, 0xb9, 0x44, 0xf3, 0xde, 0xfb,
+ 0xbc, 0xcf, 0x27, 0xf3, 0xe6, 0xcd, 0x1b, 0x03, 0x07, 0x0f, 0x03, 0x27, 0xc2, 0xe3, 0x09, 0x0b,
+ 0x22, 0x8c, 0x12, 0x46, 0x1d, 0x86, 0x92, 0x10, 0x65, 0x31, 0x4e, 0x98, 0x33, 0xeb, 0x6b, 0x96,
+ 0x9d, 0x66, 0x84, 0x11, 0xa3, 0x83, 0x87, 0x81, 0xad, 0x27, 0xd8, 0x1a, 0x64, 0xd6, 0xdf, 0xed,
+ 0x6a, 0xf9, 0xec, 0x3c, 0x45, 0xd4, 0x99, 0xc1, 0x08, 0x87, 0x90, 0x91, 0x4c, 0x32, 0xec, 0xee,
+ 0xbd, 0x80, 0x10, 0xbf, 0x2a, 0x7a, 0x2f, 0x20, 0xc9, 0x08, 0x13, 0x27, 0xcd, 0x08, 0x19, 0x15,
+ 0xce, 0xce, 0x98, 0x90, 0x71, 0x84, 0x1c, 0x61, 0x0d, 0xa7, 0x23, 0x27, 0x9c, 0x66, 0x90, 0x61,
+ 0x92, 0xa8, 0xb8, 0xb9, 0x18, 0x67, 0x38, 0x46, 0x94, 0xc1, 0x38, 0x2d, 0x00, 0x7c, 0x9b, 0x01,
+ 0xc9, 0x90, 0x23, 0xff, 0x35, 0xdf, 0x9a, 0x5c, 0x29, 0xc0, 0x5b, 0x15, 0x80, 0xc4, 0x31, 0x66,
+ 0x71, 0x01, 0x2a, 0x2d, 0x05, 0xdc, 0x1a, 0x93, 0x31, 0x11, 0x4b, 0x87, 0xaf, 0xa4, 0xd7, 0xfa,
+ 0x6b, 0x0d, 0x34, 0x0f, 0x05, 0xdf, 0x29, 0x83, 0x0c, 0x19, 0x3b, 0xa0, 0x1e, 0x4c, 0x20, 0x4e,
+ 0x7c, 0x1c, 0xb6, 0x6b, 0xdd, 0x5a, 0xaf, 0xe1, 0xad, 0x09, 0xfb, 0x38, 0x34, 0x10, 0x68, 0xb2,
+ 0x6c, 0x4a, 0x99, 0x1f, 0xa1, 0x19, 0x8a, 0xda, 0xcb, 0xdd, 0x5a, 0xaf, 0x39, 0xe8, 0xd9, 0xff,
+ 0x5e, 0x56, 0xfb, 0xa3, 0x0c, 0x06, 0x7c, 0xc3, 0xee, 0xee, 0xf3, 0xdc, 0x5c, 0x9a, 0xe7, 0xa6,
+ 0x71, 0x0e, 0xe3, 0xe8, 0xc0, 0xd2, 0xa8, 0x2c, 0x0f, 0x08, 0xeb, 0x13, 0x6e, 0x18, 0x23, 0xb0,
+ 0x21, 0x2c, 0x9c, 0x8c, 0xfd, 0x14, 0x65, 0x98, 0x84, 0xed, 0x15, 0x21, 0xb5, 0x63, 0xcb, 0x62,
+ 0xd9, 0x45, 0xb1, 0xec, 0xc7, 0xaa, 0x98, 0xae, 0xa5, 0xb8, 0xb7, 0x35, 0xee, 0x2a, 0xdf, 0xfa,
+ 0xf9, 0x0f, 0xb3, 0xe6, 0xdd, 0x2d, 0xbc, 0x27, 0xc2, 0x69, 0x60, 0xb0, 0x39, 0x4d, 0x86, 0x24,
+ 0x09, 0x35, 0xa1, 0xd5, 0xdb, 0x84, 0xde, 0x50, 0x42, 0xf7, 0xa5, 0xd0, 0x22, 0x81, 0x54, 0xda,
+ 0x28, 0xdd, 0x4a, 0x0a, 0x81, 0x8d, 0x18, 0x9e, 0xf9, 0x41, 0x44, 0x82, 0xa7, 0x7e, 0x98, 0xe1,
+ 0x11, 0x6b, 0xff, 0xef, 0x15, 0xb7, 0xb4, 0x90, 0x2f, 0x85, 0xd6, 0x63, 0x78, 0x76, 0xc8, 0x9d,
+ 0x8f, 0xb9, 0xcf, 0xf8, 0x1a, 0xac, 0x8f, 0x32, 0xf2, 0x3d, 0x4a, 0xfc, 0x09, 0xe2, 0x07, 0xd2,
+ 0xbe, 0x23, 0x44, 0x76, 0xc5, 0x11, 0xf1, 0x16, 0xb1, 0x55, 0xe7, 0xcc, 0xfa, 0xf6, 0x91, 0x40,
+ 0xb8, 0x7b, 0x4a, 0x65, 0x4b, 0xaa, 0x5c, 0x4b, 0xb7, 0xbc, 0x96, 0xb4, 0x25, 0x96, 0xd3, 0x47,
+ 0x90, 0x21, 0xca, 0x0a, 0xfa, 0xb5, 0x57, 0xa5, 0xbf, 0x96, 0x6e, 0x79, 0x2d, 0x69, 0x2b, 0xfa,
+ 0x63, 0xd0, 0x14, 0x57, 0xc7, 0xa7, 0x29, 0x0a, 0x68, 0xbb, 0xde, 0x5d, 0xe9, 0x35, 0x07, 0x9b,
+ 0x36, 0x0e, 0xe8, 0xe0, 0x91, 0x7d, 0xc2, 0x23, 0xa7, 0x29, 0x0a, 0xdc, 0xed, 0xaa, 0x85, 0x34,
+ 0xb8, 0xe5, 0x81, 0xb4, 0x80, 0x50, 0xe3, 0x00, 0xb4, 0xa6, 0xe9, 0x38, 0x83, 0x21, 0xf2, 0x53,
+ 0xc8, 0x26, 0xed, 0x46, 0x77, 0xa5, 0xd7, 0x70, 0xef, 0xcf, 0x73, 0xf3, 0x9e, 0x3a, 0x37, 0x2d,
+ 0x6a, 0x79, 0x4d, 0x65, 0x9e, 0x40, 0x36, 0x31, 0x7c, 0xb0, 0x03, 0xa3, 0x88, 0x7c, 0xe7, 0x4f,
+ 0xd3, 0x10, 0x32, 0xe4, 0xc3, 0x11, 0x43, 0x99, 0x8f, 0xce, 0x52, 0x9c, 0x9d, 0xb7, 0x41, 0xb7,
+ 0xd6, 0xab, 0xbb, 0x6f, 0xce, 0x73, 0xb3, 0x2b, 0x89, 0x5e, 0x0a, 0xb5, 0xbc, 0x6d, 0x11, 0xfb,
+ 0x4c, 0x84, 0x3e, 0xe0, 0x91, 0x0f, 0x45, 0xc0, 0xf8, 0x16, 0x98, 0x37, 0x64, 0xc5, 0x98, 0x0e,
+ 0xd1, 0x04, 0xce, 0x30, 0x99, 0x66, 0xed, 0xa6, 0x90, 0x79, 0x7b, 0x9e, 0x9b, 0x0f, 0x5f, 0x2a,
+ 0xa3, 0x27, 0x58, 0xde, 0xde, 0xa2, 0xd8, 0x13, 0x2d, 0x7c, 0xb0, 0xfa, 0xc3, 0xaf, 0xe6, 0x92,
+ 0xf5, 0xdb, 0x32, 0xb8, 0x7b, 0x48, 0x12, 0x8a, 0x12, 0x3a, 0xa5, 0xf2, 0xb6, 0xbb, 0xa0, 0x51,
+ 0x0e, 0x1c, 0x71, 0xdd, 0xf9, 0x71, 0x2e, 0xb6, 0xe4, 0xa7, 0x05, 0xc2, 0xad, 0xf3, 0xe3, 0x7c,
+ 0xc6, 0x3b, 0xaf, 0x4a, 0x33, 0xde, 0x07, 0xab, 0x19, 0x21, 0x4c, 0xcd, 0x03, 0x4b, 0xeb, 0x86,
+ 0x6a, 0x02, 0xcd, 0xfa, 0xf6, 0x13, 0x94, 0x3d, 0x8d, 0x90, 0x47, 0x08, 0x73, 0x57, 0x39, 0x8d,
+ 0x27, 0xb2, 0x8c, 0x1f, 0x6b, 0x60, 0x2b, 0x41, 0x67, 0xcc, 0x2f, 0x87, 0x2d, 0xf5, 0x27, 0x90,
+ 0x4e, 0xc4, 0x9d, 0x6f, 0xb9, 0x5f, 0xcc, 0x73, 0xf3, 0x75, 0x59, 0x83, 0x9b, 0x50, 0xd6, 0xdf,
+ 0xb9, 0xf9, 0xee, 0x18, 0xb3, 0xc9, 0x74, 0xc8, 0xe5, 0xf4, 0x27, 0x40, 0x5b, 0x46, 0x78, 0x48,
+ 0x9d, 0xe1, 0x39, 0x43, 0xd4, 0x3e, 0x42, 0x67, 0x2e, 0x5f, 0x78, 0x06, 0xa7, 0xfb, 0xbc, 0x64,
+ 0x3b, 0x82, 0x74, 0xa2, 0xca, 0xf4, 0xd3, 0x32, 0x68, 0xe9, 0xd5, 0x33, 0xfa, 0xa0, 0x21, 0x1b,
+ 0xbb, 0x9c, 0x89, 0xee, 0xd6, 0x3c, 0x37, 0x37, 0xe5, 0xdf, 0x2a, 0x43, 0x96, 0x57, 0x97, 0xeb,
+ 0xe3, 0xd0, 0x80, 0xa0, 0x3e, 0x41, 0x30, 0x44, 0x99, 0xdf, 0x57, 0x75, 0x79, 0x78, 0xdb, 0x9c,
+ 0x3c, 0x12, 0x78, 0xb7, 0x73, 0x99, 0x9b, 0x6b, 0x72, 0xdd, 0x9f, 0xe7, 0xe6, 0x86, 0x14, 0x29,
+ 0xc8, 0x2c, 0x6f, 0x4d, 0x2e, 0xfb, 0x9a, 0xc4, 0x40, 0xcd, 0xc7, 0xff, 0x20, 0x31, 0x78, 0x41,
+ 0x62, 0x50, 0x4a, 0x0c, 0x54, 0x3d, 0x7e, 0x59, 0x01, 0x77, 0x24, 0xda, 0x80, 0x60, 0x9d, 0xe2,
+ 0x71, 0x82, 0x42, 0x5f, 0x42, 0x54, 0xcb, 0x74, 0x74, 0x1d, 0xf9, 0x24, 0x9e, 0x0a, 0x98, 0x12,
+ 0xdc, 0xbb, 0xc8, 0xcd, 0x5a, 0x35, 0x05, 0xae, 0x51, 0x58, 0x5e, 0x8b, 0x6a, 0x58, 0x3e, 0x64,
+ 0xca, 0x33, 0xf6, 0x29, 0x2a, 0xda, 0xea, 0x06, 0x89, 0xf2, 0xf0, 0x4e, 0x11, 0x73, 0xdb, 0x15,
+ 0xfd, 0xb5, 0x74, 0xcb, 0x6b, 0xcd, 0x34, 0x9c, 0xf1, 0x0d, 0x90, 0xcf, 0x80, 0xd0, 0x17, 0x43,
+ 0x6c, 0xe5, 0xd6, 0x21, 0xf6, 0x40, 0x0d, 0xb1, 0xd7, 0xb4, 0xc7, 0xa5, 0xcc, 0xb7, 0xbc, 0x75,
+ 0xe5, 0x50, 0x63, 0x2c, 0x02, 0x46, 0x81, 0xa8, 0x9a, 0x55, 0x3d, 0x2c, 0xb7, 0xed, 0xe2, 0xc1,
+ 0x3c, 0x37, 0x77, 0xae, 0xab, 0x54, 0x1c, 0x96, 0xf7, 0x7f, 0xe5, 0xac, 0xda, 0xd6, 0xfa, 0x18,
+ 0xd4, 0x8b, 0x07, 0xd6, 0xd8, 0x03, 0x8d, 0x64, 0x1a, 0xa3, 0x8c, 0x47, 0xc4, 0xc9, 0xac, 0x7a,
+ 0x95, 0xc3, 0xe8, 0x82, 0x66, 0x88, 0x12, 0x12, 0xe3, 0x44, 0xc4, 0x97, 0x45, 0x5c, 0x77, 0xb9,
+ 0x5f, 0x3d, 0xbf, 0xec, 0xd4, 0x2e, 0x2e, 0x3b, 0xb5, 0x3f, 0x2f, 0x3b, 0xb5, 0x67, 0x57, 0x9d,
+ 0xa5, 0x8b, 0xab, 0xce, 0xd2, 0xef, 0x57, 0x9d, 0xa5, 0x2f, 0x5d, 0xed, 0x8a, 0x05, 0x84, 0xc6,
+ 0x84, 0xf2, 0xaf, 0xaf, 0xfd, 0x31, 0x71, 0x62, 0x12, 0x4e, 0x23, 0x44, 0xe5, 0x87, 0xd8, 0x7e,
+ 0xf1, 0x25, 0xf6, 0xce, 0x7b, 0xfb, 0x8b, 0x9f, 0x4a, 0xc3, 0x3b, 0x62, 0x9e, 0x3c, 0xfa, 0x27,
+ 0x00, 0x00, 0xff, 0xff, 0x37, 0x53, 0x91, 0x3d, 0xb8, 0x09, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/light-clients/09-localhost/types/localhost.pb.go b/modules/light-clients/09-localhost/types/localhost.pb.go
index c8793d62..bad8a41f 100644
--- a/modules/light-clients/09-localhost/types/localhost.pb.go
+++ b/modules/light-clients/09-localhost/types/localhost.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibcgo/lightclients/localhost/v1/localhost.proto
+// source: ibc/lightclients/localhost/v1/localhost.proto
package types
@@ -37,7 +37,7 @@ func (m *ClientState) Reset() { *m = ClientState{} }
func (m *ClientState) String() string { return proto.CompactTextString(m) }
func (*ClientState) ProtoMessage() {}
func (*ClientState) Descriptor() ([]byte, []int) {
- return fileDescriptor_1a6dbd867337bf2e, []int{0}
+ return fileDescriptor_acd9f5b22d41bf6d, []int{0}
}
func (m *ClientState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -67,33 +67,33 @@ func (m *ClientState) XXX_DiscardUnknown() {
var xxx_messageInfo_ClientState proto.InternalMessageInfo
func init() {
- proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.localhost.v1.ClientState")
+ proto.RegisterType((*ClientState)(nil), "ibc.lightclients.localhost.v1.ClientState")
}
func init() {
- proto.RegisterFile("ibcgo/lightclients/localhost/v1/localhost.proto", fileDescriptor_1a6dbd867337bf2e)
+ proto.RegisterFile("ibc/lightclients/localhost/v1/localhost.proto", fileDescriptor_acd9f5b22d41bf6d)
}
-var fileDescriptor_1a6dbd867337bf2e = []byte{
+var fileDescriptor_acd9f5b22d41bf6d = []byte{
// 285 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcf, 0x4c, 0x4a, 0x4e,
- 0xcf, 0xd7, 0xcf, 0xc9, 0x4c, 0xcf, 0x28, 0x49, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0x29, 0xd6, 0xcf,
- 0xc9, 0x4f, 0x4e, 0xcc, 0xc9, 0xc8, 0x2f, 0x2e, 0xd1, 0x2f, 0x33, 0x44, 0x70, 0xf4, 0x0a, 0x8a,
- 0xf2, 0x4b, 0xf2, 0x85, 0xe4, 0xc1, 0x1a, 0xf4, 0x90, 0x35, 0xe8, 0x21, 0xd4, 0x94, 0x19, 0x4a,
- 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0xd5, 0xea, 0x83, 0x58, 0x10, 0x6d, 0x52, 0x8a, 0x10, 0x7b,
- 0x92, 0xf3, 0x8b, 0x52, 0xf5, 0x21, 0xda, 0x40, 0x86, 0x43, 0x58, 0x10, 0x25, 0x4a, 0xf5, 0x5c,
- 0xdc, 0xce, 0x60, 0x7e, 0x70, 0x49, 0x62, 0x49, 0xaa, 0x90, 0x1e, 0x17, 0x47, 0x72, 0x46, 0x62,
- 0x66, 0x5e, 0x7c, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf0, 0xa7, 0x7b, 0xf2,
- 0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x19, 0xa5, 0x20, 0x76, 0x30, 0xd3, 0x33, 0x45,
- 0xc8, 0x8a, 0x8b, 0x2d, 0x23, 0x15, 0xe4, 0x2a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x19,
- 0x3d, 0x88, 0x4b, 0x41, 0x56, 0xea, 0x41, 0x2d, 0x2a, 0x33, 0xd4, 0xf3, 0x00, 0xab, 0x71, 0x62,
- 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xc3, 0x8a, 0xa5, 0x63, 0x81, 0x3c, 0x83, 0x53, 0xf4,
- 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c,
- 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x39, 0xa6, 0x67, 0x96, 0x64, 0x94,
- 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0x83, 0xc2, 0x4d, 0x37,
- 0x3d, 0x5f, 0x3f, 0x37, 0x3f, 0xa5, 0x34, 0x27, 0xb5, 0x18, 0x12, 0x80, 0xba, 0xb0, 0x10, 0x34,
- 0xb0, 0xd4, 0x45, 0x04, 0x62, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x93, 0xc6, 0x80,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0xf3, 0x24, 0x25, 0x71, 0x01, 0x00, 0x00,
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcd, 0x4c, 0x4a, 0xd6,
+ 0xcf, 0xc9, 0x4c, 0xcf, 0x28, 0x49, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0x29, 0xd6, 0xcf, 0xc9, 0x4f,
+ 0x4e, 0xcc, 0xc9, 0xc8, 0x2f, 0x2e, 0xd1, 0x2f, 0x33, 0x44, 0x70, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b,
+ 0xf2, 0x85, 0x64, 0x33, 0x93, 0x92, 0xf5, 0x90, 0x95, 0xeb, 0x21, 0x54, 0x94, 0x19, 0x4a, 0x89,
+ 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x52, 0xf2, 0x20, 0x3b, 0x92,
+ 0xf3, 0x8b, 0x52, 0xf5, 0x21, 0x9a, 0x40, 0x06, 0x43, 0x58, 0x10, 0x05, 0x4a, 0xb5, 0x5c, 0xdc,
+ 0xce, 0x60, 0x7e, 0x70, 0x49, 0x62, 0x49, 0xaa, 0x90, 0x1e, 0x17, 0x47, 0x72, 0x46, 0x62, 0x66,
+ 0x5e, 0x7c, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf0, 0xa7, 0x7b, 0xf2, 0xfc,
+ 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x19, 0xa5, 0x20, 0x76, 0x30, 0xd3, 0x33, 0x45, 0xc8,
+ 0x82, 0x8b, 0x2d, 0x23, 0x15, 0xe4, 0x26, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x29, 0x3d,
+ 0x90, 0x2b, 0x41, 0x16, 0xea, 0x41, 0xad, 0x29, 0x33, 0xd4, 0xf3, 0x00, 0xab, 0x70, 0x62, 0x39,
+ 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xde, 0x8a, 0xa5, 0x63, 0x81, 0x3c, 0x83, 0x53, 0xf4, 0x89,
+ 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3,
+ 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x39, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26,
+ 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0xeb, 0x67, 0x26, 0x25, 0xeb,
+ 0xa6, 0xe7, 0xeb, 0xe7, 0xe6, 0xa7, 0x94, 0xe6, 0xa4, 0x16, 0x43, 0x82, 0x4e, 0x17, 0x16, 0x76,
+ 0x06, 0x96, 0xba, 0x88, 0xe0, 0x2b, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x7b, 0xd1, 0x18,
+ 0x10, 0x00, 0x00, 0xff, 0xff, 0xee, 0xdf, 0xcc, 0xab, 0x69, 0x01, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
diff --git a/proto/ibcgo/apps/transfer/v1/genesis.proto b/proto/ibc/apps/transfer/v1/genesis.proto
similarity index 86%
rename from proto/ibcgo/apps/transfer/v1/genesis.proto
rename to proto/ibc/apps/transfer/v1/genesis.proto
index 70b6438c..39cf7a65 100644
--- a/proto/ibcgo/apps/transfer/v1/genesis.proto
+++ b/proto/ibc/apps/transfer/v1/genesis.proto
@@ -1,10 +1,10 @@
syntax = "proto3";
-package ibcgo.apps.transfer.v1;
+package ibc.apps.transfer.v1;
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
-import "ibcgo/apps/transfer/v1/transfer.proto";
+import "ibc/apps/transfer/v1/transfer.proto";
import "gogoproto/gogo.proto";
// GenesisState defines the ibc-transfer genesis state
diff --git a/proto/ibcgo/apps/transfer/v1/query.proto b/proto/ibc/apps/transfer/v1/query.proto
similarity index 96%
rename from proto/ibcgo/apps/transfer/v1/query.proto
rename to proto/ibc/apps/transfer/v1/query.proto
index d8be3728..f4014d62 100644
--- a/proto/ibcgo/apps/transfer/v1/query.proto
+++ b/proto/ibc/apps/transfer/v1/query.proto
@@ -1,10 +1,10 @@
syntax = "proto3";
-package ibcgo.apps.transfer.v1;
+package ibc.apps.transfer.v1;
import "gogoproto/gogo.proto";
import "cosmos/base/query/v1beta1/pagination.proto";
-import "ibcgo/apps/transfer/v1/transfer.proto";
+import "ibc/apps/transfer/v1/transfer.proto";
import "google/api/annotations.proto";
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
diff --git a/proto/ibcgo/apps/transfer/v1/transfer.proto b/proto/ibc/apps/transfer/v1/transfer.proto
similarity index 98%
rename from proto/ibcgo/apps/transfer/v1/transfer.proto
rename to proto/ibc/apps/transfer/v1/transfer.proto
index b3dda2f9..e43971ee 100644
--- a/proto/ibcgo/apps/transfer/v1/transfer.proto
+++ b/proto/ibc/apps/transfer/v1/transfer.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibcgo.apps.transfer.v1;
+package ibc.apps.transfer.v1;
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
diff --git a/proto/ibcgo/apps/transfer/v1/tx.proto b/proto/ibc/apps/transfer/v1/tx.proto
similarity index 92%
rename from proto/ibcgo/apps/transfer/v1/tx.proto
rename to proto/ibc/apps/transfer/v1/tx.proto
index 383f6e67..73887e79 100644
--- a/proto/ibcgo/apps/transfer/v1/tx.proto
+++ b/proto/ibc/apps/transfer/v1/tx.proto
@@ -1,12 +1,12 @@
syntax = "proto3";
-package ibcgo.apps.transfer.v1;
+package ibc.apps.transfer.v1;
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
import "gogoproto/gogo.proto";
import "cosmos/base/v1beta1/coin.proto";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
// Msg defines the ibc/transfer Msg service.
service Msg {
@@ -34,7 +34,7 @@ message MsgTransfer {
string receiver = 5;
// Timeout height relative to the current block height.
// The timeout is disabled when set to 0.
- ibcgo.core.client.v1.Height timeout_height = 6 [
+ ibc.core.client.v1.Height timeout_height = 6 [
(gogoproto.moretags) = "yaml:\"timeout_height\"",
(gogoproto.nullable) = false
];
diff --git a/proto/ibcgo/core/channel/v1/channel.proto b/proto/ibc/core/channel/v1/channel.proto
similarity index 98%
rename from proto/ibcgo/core/channel/v1/channel.proto
rename to proto/ibc/core/channel/v1/channel.proto
index eb1b627c..81a92a73 100644
--- a/proto/ibcgo/core/channel/v1/channel.proto
+++ b/proto/ibc/core/channel/v1/channel.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.core.channel.v1;
+package ibc.core.channel.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
// Channel defines pipeline for exactly-once packet delivery between specific
// modules on separate blockchains, which has at least one end capable of
@@ -115,7 +115,7 @@ message Packet {
// actual opaque bytes transferred directly to the application module
bytes data = 6;
// block height after which the packet times out
- ibcgo.core.client.v1.Height timeout_height = 7 [
+ ibc.core.client.v1.Height timeout_height = 7 [
(gogoproto.moretags) = "yaml:\"timeout_height\"",
(gogoproto.nullable) = false
];
diff --git a/proto/ibcgo/core/channel/v1/genesis.proto b/proto/ibc/core/channel/v1/genesis.proto
similarity index 94%
rename from proto/ibcgo/core/channel/v1/genesis.proto
rename to proto/ibc/core/channel/v1/genesis.proto
index c4d29781..00a7bcbb 100644
--- a/proto/ibcgo/core/channel/v1/genesis.proto
+++ b/proto/ibc/core/channel/v1/genesis.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.core.channel.v1;
+package ibc.core.channel.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/channel/v1/channel.proto";
+import "ibc/core/channel/v1/channel.proto";
// GenesisState defines the ibc channel submodule's genesis state.
message GenesisState {
diff --git a/proto/ibcgo/core/channel/v1/query.proto b/proto/ibc/core/channel/v1/query.proto
similarity index 89%
rename from proto/ibcgo/core/channel/v1/query.proto
rename to proto/ibc/core/channel/v1/query.proto
index e8628ff5..1c98fe2a 100644
--- a/proto/ibcgo/core/channel/v1/query.proto
+++ b/proto/ibc/core/channel/v1/query.proto
@@ -1,12 +1,12 @@
syntax = "proto3";
-package ibcgo.core.channel.v1;
+package ibc.core.channel.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
import "cosmos/base/query/v1beta1/pagination.proto";
-import "ibcgo/core/channel/v1/channel.proto";
+import "ibc/core/channel/v1/channel.proto";
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
import "gogoproto/gogo.proto";
@@ -128,11 +128,11 @@ message QueryChannelRequest {
// proof was retrieved.
message QueryChannelResponse {
// channel associated with the request identifiers
- ibcgo.core.channel.v1.Channel channel = 1;
+ ibc.core.channel.v1.Channel channel = 1;
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryChannelsRequest is the request type for the Query/Channels RPC method
@@ -144,11 +144,11 @@ message QueryChannelsRequest {
// QueryChannelsResponse is the response type for the Query/Channels RPC method.
message QueryChannelsResponse {
// list of stored channels of the chain.
- repeated ibcgo.core.channel.v1.IdentifiedChannel channels = 1;
+ repeated ibc.core.channel.v1.IdentifiedChannel channels = 1;
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
}
// QueryConnectionChannelsRequest is the request type for the
@@ -164,11 +164,11 @@ message QueryConnectionChannelsRequest {
// Query/QueryConnectionChannels RPC method
message QueryConnectionChannelsResponse {
// list of channels associated with a connection.
- repeated ibcgo.core.channel.v1.IdentifiedChannel channels = 1;
+ repeated ibc.core.channel.v1.IdentifiedChannel channels = 1;
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
}
// QueryChannelClientStateRequest is the request type for the Query/ClientState
@@ -184,11 +184,11 @@ message QueryChannelClientStateRequest {
// Query/QueryChannelClientState RPC method
message QueryChannelClientStateResponse {
// client state associated with the channel
- ibcgo.core.client.v1.IdentifiedClientState identified_client_state = 1;
+ ibc.core.client.v1.IdentifiedClientState identified_client_state = 1;
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryChannelConsensusStateRequest is the request type for the
@@ -214,7 +214,7 @@ message QueryChannelConsensusStateResponse {
// merkle proof of existence
bytes proof = 3;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
}
// QueryPacketCommitmentRequest is the request type for the
@@ -237,7 +237,7 @@ message QueryPacketCommitmentResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryPacketCommitmentsRequest is the request type for the
@@ -254,11 +254,11 @@ message QueryPacketCommitmentsRequest {
// QueryPacketCommitmentsResponse is the request type for the
// Query/QueryPacketCommitments RPC method
message QueryPacketCommitmentsResponse {
- repeated ibcgo.core.channel.v1.PacketState commitments = 1;
+ repeated ibc.core.channel.v1.PacketState commitments = 1;
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
}
// QueryPacketReceiptRequest is the request type for the
@@ -281,7 +281,7 @@ message QueryPacketReceiptResponse {
// merkle proof of existence
bytes proof = 3;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
}
// QueryPacketAcknowledgementRequest is the request type for the
@@ -304,7 +304,7 @@ message QueryPacketAcknowledgementResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryPacketAcknowledgementsRequest is the request type for the
@@ -321,11 +321,11 @@ message QueryPacketAcknowledgementsRequest {
// QueryPacketAcknowledgemetsResponse is the request type for the
// Query/QueryPacketAcknowledgements RPC method
message QueryPacketAcknowledgementsResponse {
- repeated ibcgo.core.channel.v1.PacketState acknowledgements = 1;
+ repeated ibc.core.channel.v1.PacketState acknowledgements = 1;
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
}
// QueryUnreceivedPacketsRequest is the request type for the
@@ -345,7 +345,7 @@ message QueryUnreceivedPacketsResponse {
// list of unreceived packet sequences
repeated uint64 sequences = 1;
// query block height
- ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
}
// QueryUnreceivedAcks is the request type for the
@@ -365,7 +365,7 @@ message QueryUnreceivedAcksResponse {
// list of unreceived acknowledgement sequences
repeated uint64 sequences = 1;
// query block height
- ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
}
// QueryNextSequenceReceiveRequest is the request type for the
@@ -385,5 +385,5 @@ message QueryNextSequenceReceiveResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
diff --git a/proto/ibcgo/core/channel/v1/tx.proto b/proto/ibc/core/channel/v1/tx.proto
similarity index 94%
rename from proto/ibcgo/core/channel/v1/tx.proto
rename to proto/ibc/core/channel/v1/tx.proto
index 9ee96e63..8fe5feb5 100644
--- a/proto/ibcgo/core/channel/v1/tx.proto
+++ b/proto/ibc/core/channel/v1/tx.proto
@@ -1,12 +1,12 @@
syntax = "proto3";
-package ibcgo.core.channel.v1;
+package ibc.core.channel.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/04-channel/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/client/v1/client.proto";
-import "ibcgo/core/channel/v1/channel.proto";
+import "ibc/core/client/v1/client.proto";
+import "ibc/core/channel/v1/channel.proto";
// Msg defines the ibc/channel Msg service.
service Msg {
@@ -74,7 +74,7 @@ message MsgChannelOpenTry {
string counterparty_version = 4
[ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ];
bytes proof_init = 5 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
- ibcgo.core.client.v1.Height proof_height = 6 [
+ ibc.core.client.v1.Height proof_height = 6 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -97,7 +97,7 @@ message MsgChannelOpenAck {
string counterparty_version = 4
[ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ];
bytes proof_try = 5 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ];
- ibcgo.core.client.v1.Height proof_height = 6 [
+ ibc.core.client.v1.Height proof_height = 6 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -116,7 +116,7 @@ message MsgChannelOpenConfirm {
string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
bytes proof_ack = 3 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ];
- ibcgo.core.client.v1.Height proof_height = 4 [
+ ibc.core.client.v1.Height proof_height = 4 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -150,7 +150,7 @@ message MsgChannelCloseConfirm {
string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
bytes proof_init = 3 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
- ibcgo.core.client.v1.Height proof_height = 4 [
+ ibc.core.client.v1.Height proof_height = 4 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -169,7 +169,7 @@ message MsgRecvPacket {
Packet packet = 1 [ (gogoproto.nullable) = false ];
bytes proof_commitment = 2
[ (gogoproto.moretags) = "yaml:\"proof_commitment\"" ];
- ibcgo.core.client.v1.Height proof_height = 3 [
+ ibc.core.client.v1.Height proof_height = 3 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -187,7 +187,7 @@ message MsgTimeout {
Packet packet = 1 [ (gogoproto.nullable) = false ];
bytes proof_unreceived = 2
[ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ];
- ibcgo.core.client.v1.Height proof_height = 3 [
+ ibc.core.client.v1.Height proof_height = 3 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -208,7 +208,7 @@ message MsgTimeoutOnClose {
bytes proof_unreceived = 2
[ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ];
bytes proof_close = 3 [ (gogoproto.moretags) = "yaml:\"proof_close\"" ];
- ibcgo.core.client.v1.Height proof_height = 4 [
+ ibc.core.client.v1.Height proof_height = 4 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -228,7 +228,7 @@ message MsgAcknowledgement {
Packet packet = 1 [ (gogoproto.nullable) = false ];
bytes acknowledgement = 2;
bytes proof_acked = 3 [ (gogoproto.moretags) = "yaml:\"proof_acked\"" ];
- ibcgo.core.client.v1.Height proof_height = 4 [
+ ibc.core.client.v1.Height proof_height = 4 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
diff --git a/proto/ibcgo/core/client/v1/client.proto b/proto/ibc/core/client/v1/client.proto
similarity index 99%
rename from proto/ibcgo/core/client/v1/client.proto
rename to proto/ibc/core/client/v1/client.proto
index 84b0fd18..92728700 100644
--- a/proto/ibcgo/core/client/v1/client.proto
+++ b/proto/ibc/core/client/v1/client.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibcgo.core.client.v1;
+package ibc.core.client.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
diff --git a/proto/ibcgo/core/client/v1/genesis.proto b/proto/ibc/core/client/v1/genesis.proto
similarity index 96%
rename from proto/ibcgo/core/client/v1/genesis.proto
rename to proto/ibc/core/client/v1/genesis.proto
index f34d1208..d6a74256 100644
--- a/proto/ibcgo/core/client/v1/genesis.proto
+++ b/proto/ibc/core/client/v1/genesis.proto
@@ -1,10 +1,10 @@
syntax = "proto3";
-package ibcgo.core.client.v1;
+package ibc.core.client.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
import "gogoproto/gogo.proto";
// GenesisState defines the ibc client submodule's genesis state.
diff --git a/proto/ibcgo/core/client/v1/query.proto b/proto/ibc/core/client/v1/query.proto
similarity index 96%
rename from proto/ibcgo/core/client/v1/query.proto
rename to proto/ibc/core/client/v1/query.proto
index 5672653d..915b6008 100644
--- a/proto/ibcgo/core/client/v1/query.proto
+++ b/proto/ibc/core/client/v1/query.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.core.client.v1;
+package ibc.core.client.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
import "cosmos/base/query/v1beta1/pagination.proto";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
import "google/protobuf/any.proto";
import "google/api/annotations.proto";
import "gogoproto/gogo.proto";
@@ -71,7 +71,7 @@ message QueryClientStateResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryClientStatesRequest is the request type for the Query/ClientStates RPC
@@ -116,7 +116,7 @@ message QueryConsensusStateResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
diff --git a/proto/ibcgo/core/client/v1/tx.proto b/proto/ibc/core/client/v1/tx.proto
similarity index 98%
rename from proto/ibcgo/core/client/v1/tx.proto
rename to proto/ibc/core/client/v1/tx.proto
index cde8e9f2..56f12911 100644
--- a/proto/ibcgo/core/client/v1/tx.proto
+++ b/proto/ibc/core/client/v1/tx.proto
@@ -1,12 +1,12 @@
syntax = "proto3";
-package ibcgo.core.client.v1;
+package ibc.core.client.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/types";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
// Msg defines the ibc/client Msg service.
service Msg {
diff --git a/proto/ibcgo/core/commitment/v1/commitment.proto b/proto/ibc/core/commitment/v1/commitment.proto
similarity index 97%
rename from proto/ibcgo/core/commitment/v1/commitment.proto
rename to proto/ibc/core/commitment/v1/commitment.proto
index 229ac212..4fc56fe8 100644
--- a/proto/ibcgo/core/commitment/v1/commitment.proto
+++ b/proto/ibc/core/commitment/v1/commitment.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibcgo.core.commitment.v1;
+package ibc.core.commitment.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/23-commitment/types";
diff --git a/proto/ibcgo/core/connection/v1/connection.proto b/proto/ibc/core/connection/v1/connection.proto
similarity index 96%
rename from proto/ibcgo/core/connection/v1/connection.proto
rename to proto/ibc/core/connection/v1/connection.proto
index 191d5515..c9112710 100644
--- a/proto/ibcgo/core/connection/v1/connection.proto
+++ b/proto/ibc/core/connection/v1/connection.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.core.connection.v1;
+package ibc.core.connection.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/commitment/v1/commitment.proto";
+import "ibc/core/commitment/v1/commitment.proto";
// ICS03 - Connection Data Structures as defined in
// https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics#data-structures
@@ -78,7 +78,7 @@ message Counterparty {
// given connection.
string connection_id = 2 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
// commitment merkle prefix of the counterparty chain.
- ibcgo.core.commitment.v1.MerklePrefix prefix = 3
+ ibc.core.commitment.v1.MerklePrefix prefix = 3
[ (gogoproto.nullable) = false ];
}
diff --git a/proto/ibcgo/core/connection/v1/genesis.proto b/proto/ibc/core/connection/v1/genesis.proto
similarity index 88%
rename from proto/ibcgo/core/connection/v1/genesis.proto
rename to proto/ibc/core/connection/v1/genesis.proto
index cbb5b0ae..8a9d9fd0 100644
--- a/proto/ibcgo/core/connection/v1/genesis.proto
+++ b/proto/ibc/core/connection/v1/genesis.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.core.connection.v1;
+package ibc.core.connection.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/connection/v1/connection.proto";
+import "ibc/core/connection/v1/connection.proto";
// GenesisState defines the ibc connection submodule's genesis state.
message GenesisState {
diff --git a/proto/ibcgo/core/connection/v1/query.proto b/proto/ibc/core/connection/v1/query.proto
similarity index 86%
rename from proto/ibcgo/core/connection/v1/query.proto
rename to proto/ibc/core/connection/v1/query.proto
index ec84f831..efd29c2a 100644
--- a/proto/ibcgo/core/connection/v1/query.proto
+++ b/proto/ibc/core/connection/v1/query.proto
@@ -1,13 +1,13 @@
syntax = "proto3";
-package ibcgo.core.connection.v1;
+package ibc.core.connection.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
import "cosmos/base/query/v1beta1/pagination.proto";
-import "ibcgo/core/client/v1/client.proto";
-import "ibcgo/core/connection/v1/connection.proto";
+import "ibc/core/client/v1/client.proto";
+import "ibc/core/connection/v1/connection.proto";
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
@@ -62,11 +62,11 @@ message QueryConnectionRequest {
// which the proof was retrieved.
message QueryConnectionResponse {
// connection associated with the request identifier
- ibcgo.core.connection.v1.ConnectionEnd connection = 1;
+ ibc.core.connection.v1.ConnectionEnd connection = 1;
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryConnectionsRequest is the request type for the Query/Connections RPC
@@ -79,11 +79,11 @@ message QueryConnectionsRequest {
// method.
message QueryConnectionsResponse {
// list of stored connections of the chain.
- repeated ibcgo.core.connection.v1.IdentifiedConnection connections = 1;
+ repeated ibc.core.connection.v1.IdentifiedConnection connections = 1;
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
}
// QueryClientConnectionsRequest is the request type for the
@@ -101,7 +101,7 @@ message QueryClientConnectionsResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was generated
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryConnectionClientStateRequest is the request type for the
@@ -115,11 +115,11 @@ message QueryConnectionClientStateRequest {
// Query/ConnectionClientState RPC method
message QueryConnectionClientStateResponse {
// client state associated with the channel
- ibcgo.core.client.v1.IdentifiedClientState identified_client_state = 1;
+ ibc.core.client.v1.IdentifiedClientState identified_client_state = 1;
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
}
// QueryConnectionConsensusStateRequest is the request type for the
@@ -141,5 +141,5 @@ message QueryConnectionConsensusStateResponse {
// merkle proof of existence
bytes proof = 3;
// height at which the proof was retrieved
- ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
}
diff --git a/proto/ibcgo/core/connection/v1/tx.proto b/proto/ibc/core/connection/v1/tx.proto
similarity index 93%
rename from proto/ibcgo/core/connection/v1/tx.proto
rename to proto/ibc/core/connection/v1/tx.proto
index 46375618..2a71469c 100644
--- a/proto/ibcgo/core/connection/v1/tx.proto
+++ b/proto/ibc/core/connection/v1/tx.proto
@@ -1,13 +1,13 @@
syntax = "proto3";
-package ibcgo.core.connection.v1;
+package ibc.core.connection.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/03-connection/types";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
-import "ibcgo/core/client/v1/client.proto";
-import "ibcgo/core/connection/v1/connection.proto";
+import "ibc/core/client/v1/client.proto";
+import "ibc/core/connection/v1/connection.proto";
// Msg defines the ibc/connection Msg service.
service Msg {
@@ -63,7 +63,7 @@ message MsgConnectionOpenTry {
uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
repeated Version counterparty_versions = 6
[ (gogoproto.moretags) = "yaml:\"counterparty_versions\"" ];
- ibcgo.core.client.v1.Height proof_height = 7 [
+ ibc.core.client.v1.Height proof_height = 7 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -75,7 +75,7 @@ message MsgConnectionOpenTry {
// proof of client consensus state
bytes proof_consensus = 10
[ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ];
- ibcgo.core.client.v1.Height consensus_height = 11 [
+ ibc.core.client.v1.Height consensus_height = 11 [
(gogoproto.moretags) = "yaml:\"consensus_height\"",
(gogoproto.nullable) = false
];
@@ -97,7 +97,7 @@ message MsgConnectionOpenAck {
Version version = 3;
google.protobuf.Any client_state = 4
[ (gogoproto.moretags) = "yaml:\"client_state\"" ];
- ibcgo.core.client.v1.Height proof_height = 5 [
+ ibc.core.client.v1.Height proof_height = 5 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
@@ -109,7 +109,7 @@ message MsgConnectionOpenAck {
// proof of client consensus state
bytes proof_consensus = 8
[ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ];
- ibcgo.core.client.v1.Height consensus_height = 9 [
+ ibc.core.client.v1.Height consensus_height = 9 [
(gogoproto.moretags) = "yaml:\"consensus_height\"",
(gogoproto.nullable) = false
];
@@ -128,7 +128,7 @@ message MsgConnectionOpenConfirm {
string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
// proof for the change of the connection state on Chain A: `INIT -> OPEN`
bytes proof_ack = 2 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ];
- ibcgo.core.client.v1.Height proof_height = 3 [
+ ibc.core.client.v1.Height proof_height = 3 [
(gogoproto.moretags) = "yaml:\"proof_height\"",
(gogoproto.nullable) = false
];
diff --git a/proto/ibcgo/core/types/v1/genesis.proto b/proto/ibc/core/types/v1/genesis.proto
similarity index 62%
rename from proto/ibcgo/core/types/v1/genesis.proto
rename to proto/ibc/core/types/v1/genesis.proto
index a2c7845c..2b5f9cd2 100644
--- a/proto/ibcgo/core/types/v1/genesis.proto
+++ b/proto/ibc/core/types/v1/genesis.proto
@@ -1,28 +1,28 @@
syntax = "proto3";
-package ibcgo.core.types.v1;
+package ibc.core.types.v1;
option go_package = "github.com/cosmos/ibc-go/modules/core/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/client/v1/genesis.proto";
-import "ibcgo/core/connection/v1/genesis.proto";
-import "ibcgo/core/channel/v1/genesis.proto";
+import "ibc/core/client/v1/genesis.proto";
+import "ibc/core/connection/v1/genesis.proto";
+import "ibc/core/channel/v1/genesis.proto";
// GenesisState defines the ibc module's genesis state.
message GenesisState {
// ICS002 - Clients genesis state
- ibcgo.core.client.v1.GenesisState client_genesis = 1 [
+ ibc.core.client.v1.GenesisState client_genesis = 1 [
(gogoproto.nullable) = false,
(gogoproto.moretags) = "yaml:\"client_genesis\""
];
// ICS003 - Connections genesis state
- ibcgo.core.connection.v1.GenesisState connection_genesis = 2 [
+ ibc.core.connection.v1.GenesisState connection_genesis = 2 [
(gogoproto.nullable) = false,
(gogoproto.moretags) = "yaml:\"connection_genesis\""
];
// ICS004 - Channel genesis state
- ibcgo.core.channel.v1.GenesisState channel_genesis = 3 [
+ ibc.core.channel.v1.GenesisState channel_genesis = 3 [
(gogoproto.nullable) = false,
(gogoproto.moretags) = "yaml:\"channel_genesis\""
];
diff --git a/proto/ibcgo/lightclients/localhost/v1/localhost.proto b/proto/ibc/lightclients/localhost/v1/localhost.proto
similarity index 73%
rename from proto/ibcgo/lightclients/localhost/v1/localhost.proto
rename to proto/ibc/lightclients/localhost/v1/localhost.proto
index ff37c7a1..e090e0de 100644
--- a/proto/ibcgo/lightclients/localhost/v1/localhost.proto
+++ b/proto/ibc/lightclients/localhost/v1/localhost.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.lightclients.localhost.v1;
+package ibc.lightclients.localhost.v1;
option go_package = "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types";
import "gogoproto/gogo.proto";
-import "ibcgo/core/client/v1/client.proto";
+import "ibc/core/client/v1/client.proto";
// ClientState defines a loopback (localhost) client. It requires (read-only)
// access to keys outside the client prefix.
@@ -14,5 +14,5 @@ message ClientState {
// self chain ID
string chain_id = 1 [ (gogoproto.moretags) = "yaml:\"chain_id\"" ];
// self latest block height
- ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
}
diff --git a/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v1/solomachine.proto
similarity index 96%
rename from proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
rename to proto/ibc/lightclients/solomachine/v1/solomachine.proto
index 965eb382..e7f2b022 100644
--- a/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto
+++ b/proto/ibc/lightclients/solomachine/v1/solomachine.proto
@@ -1,11 +1,11 @@
syntax = "proto3";
-package ibcgo.lightclients.solomachine.v1;
+package ibc.lightclients.solomachine.v1;
option go_package = "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types";
-import "ibcgo/core/connection/v1/connection.proto";
-import "ibcgo/core/channel/v1/channel.proto";
+import "ibc/core/connection/v1/connection.proto";
+import "ibc/core/channel/v1/channel.proto";
import "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
@@ -168,7 +168,7 @@ message ConnectionStateData {
option (gogoproto.goproto_getters) = false;
bytes path = 1;
- ibcgo.core.connection.v1.ConnectionEnd connection = 2;
+ ibc.core.connection.v1.ConnectionEnd connection = 2;
}
// ChannelStateData returns the SignBytes data for channel state
@@ -177,7 +177,7 @@ message ChannelStateData {
option (gogoproto.goproto_getters) = false;
bytes path = 1;
- ibcgo.core.channel.v1.Channel channel = 2;
+ ibc.core.channel.v1.Channel channel = 2;
}
// PacketCommitmentData returns the SignBytes data for packet commitment
diff --git a/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto b/proto/ibc/lightclients/tendermint/v1/tendermint.proto
similarity index 93%
rename from proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
rename to proto/ibc/lightclients/tendermint/v1/tendermint.proto
index b58561ac..de59589b 100644
--- a/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto
+++ b/proto/ibc/lightclients/tendermint/v1/tendermint.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibcgo.lightclients.tendermint.v1;
+package ibc.lightclients.tendermint.v1;
option go_package = "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types";
@@ -9,8 +9,8 @@ import "tendermint/types/types.proto";
import "confio/proofs.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
-import "ibcgo/core/client/v1/client.proto";
-import "ibcgo/core/commitment/v1/commitment.proto";
+import "ibc/core/client/v1/client.proto";
+import "ibc/core/commitment/v1/commitment.proto";
import "gogoproto/gogo.proto";
// ClientState from Tendermint tracks the current validator set, latest height,
@@ -43,12 +43,12 @@ message ClientState {
(gogoproto.moretags) = "yaml:\"max_clock_drift\""
];
// Block height when the client was frozen due to a misbehaviour
- ibcgo.core.client.v1.Height frozen_height = 6 [
+ ibc.core.client.v1.Height frozen_height = 6 [
(gogoproto.nullable) = false,
(gogoproto.moretags) = "yaml:\"frozen_height\""
];
// Latest height the client was updated to
- ibcgo.core.client.v1.Height latest_height = 7 [
+ ibc.core.client.v1.Height latest_height = 7 [
(gogoproto.nullable) = false,
(gogoproto.moretags) = "yaml:\"latest_height\""
];
@@ -86,7 +86,7 @@ message ConsensusState {
google.protobuf.Timestamp timestamp = 1
[ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ];
// commitment root (i.e app hash)
- ibcgo.core.commitment.v1.MerkleRoot root = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.commitment.v1.MerkleRoot root = 2 [ (gogoproto.nullable) = false ];
bytes next_validators_hash = 3 [
(gogoproto.casttype) =
"github.com/tendermint/tendermint/libs/bytes.HexBytes",
@@ -130,7 +130,7 @@ message Header {
.tendermint.types.ValidatorSet validator_set = 2
[ (gogoproto.moretags) = "yaml:\"validator_set\"" ];
- ibcgo.core.client.v1.Height trusted_height = 3 [
+ ibc.core.client.v1.Height trusted_height = 3 [
(gogoproto.nullable) = false,
(gogoproto.moretags) = "yaml:\"trusted_height\""
];
From a69a5bb9445f0c9225e85c9631d72687e0cbebbf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 11 Mar 2021 17:46:09 +0100
Subject: [PATCH 015/393] fix sdk test
---
testing/sdk_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/testing/sdk_test.go b/testing/sdk_test.go
index 41bd7066..2136dd07 100644
--- a/testing/sdk_test.go
+++ b/testing/sdk_test.go
@@ -199,7 +199,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
// a solo machine client state
clientStateJSON := testutil.WriteToNewTempFile(
s.T(),
- `{"@type":"/ibcgo.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}`,
+ `{"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}`,
)
// Write consensus json to temp file, used for an IBC message.
@@ -207,7 +207,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
// a solo machine consensus state
consensusJSON := testutil.WriteToNewTempFile(
s.T(),
- `{"@type":"/ibcgo.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
+ `{"@type":"/ibc.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
)
testCases := []struct {
From b8219eb0c611f99a90f844ded52a571a8d1a2b15 Mon Sep 17 00:00:00 2001
From: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
Date: Fri, 12 Mar 2021 09:03:50 -0300
Subject: [PATCH 016/393] changelog (#81)
* changelog
* template
---
CHANGELOG.md | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
create mode 100644 CHANGELOG.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..72e7ed25
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,37 @@
+
+
+# Changelog
+
+## [Unreleased]
From 7f97424b59c5029b5ce7bf1340d61059994f2857 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Fri, 12 Mar 2021 18:19:10 +0100
Subject: [PATCH 017/393] update changelog (#83)
* update changelog
* Update CHANGELOG.md
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
* Update CHANGELOG.md
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
---
CHANGELOG.md | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 72e7ed25..cf8e227a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -35,3 +35,20 @@ Ref: https://keepachangelog.com/en/1.0.0/
# Changelog
## [Unreleased]
+
+### State Machine Breaking
+
+* (modules/core/02-client) [\#8405](https://github.com/cosmos/cosmos-sdk/pull/8405) Refactor IBC client update governance proposals to use a substitute client to update a frozen or expired client.
+* (modules/core/02-client) [\#8673](https://github.com/cosmos/cosmos-sdk/pull/8673) IBC upgrade logic moved to 02-client and an IBC UpgradeProposal is added.
+
+### Improvements
+
+* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
+
+## IBC in the Cosmos SDK Repository
+
+The IBC module was originally released in [v0.40.0](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.40.0) of the SDK.
+Please see the [Release Notes](https://github.com/cosmos/cosmos-sdk/blob/v0.40.0/RELEASE_NOTES.md).
+
+The IBC module is also contained in the releases for [v0.41.x](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.41.0) and [v0.42.x](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.42.0).
+Please see the Release Notes for [v0.41.x](https://github.com/cosmos/cosmos-sdk/blob/release/v0.41.x/RELEASE_NOTES.md) and [v0.42.x](https://github.com/cosmos/cosmos-sdk/blob/release/v0.42.x/RELEASE_NOTES.md).
From 333c1f338b2a14a1928a6f8ab64c37123c0e97b6 Mon Sep 17 00:00:00 2001
From: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
Date: Fri, 12 Mar 2021 14:24:44 -0300
Subject: [PATCH 018/393] repo utils (#84)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* repo utils
* Update .mergify.yml
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
.clang-format | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++
.gitignore | 54 +++++++++++++++++++++++
.golangci.yml | 72 +++++++++++++++++++++++++++++++
.mergify.yml | 10 +++++
SECURITY.md | 32 ++++++++++++++
5 files changed, 284 insertions(+)
create mode 100644 .clang-format
create mode 100644 .gitignore
create mode 100644 .golangci.yml
create mode 100644 .mergify.yml
create mode 100644 SECURITY.md
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 00000000..7f662a4f
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,116 @@
+---
+Language: Proto
+# BasedOnStyle: LLVM
+AccessModifierOffset: -2
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: true
+AlignConsecutiveDeclarations: true
+AlignEscapedNewlines: Right
+AlignOperands: true
+AlignTrailingComments: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortBlocksOnASingleLine: true
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: Empty
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: false
+AlwaysBreakTemplateDeclarations: false
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+ AfterClass: false
+ AfterControlStatement: false
+ AfterEnum: false
+ AfterFunction: false
+ AfterNamespace: false
+ AfterObjCDeclaration: false
+ AfterStruct: false
+ AfterUnion: false
+ AfterExternBlock: false
+ BeforeCatch: false
+ BeforeElse: false
+ IndentBraces: false
+ SplitEmptyFunction: true
+ SplitEmptyRecord: true
+ SplitEmptyNamespace: true
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Attach
+BreakBeforeInheritanceComma: false
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: BeforeColon
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit: 120
+CommentPragmas: '^ IWYU pragma:'
+CompactNamespaces: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: true
+ForEachMacros:
+ - foreach
+ - Q_FOREACH
+ - BOOST_FOREACH
+IncludeBlocks: Preserve
+IncludeCategories:
+ - Regex: '^"(llvm|llvm-c|clang|clang-c)/'
+ Priority: 2
+ - Regex: '^(<|"(gtest|gmock|isl|json)/)'
+ Priority: 3
+ - Regex: '.*'
+ Priority: 1
+IncludeIsMainRegex: '(Test)?$'
+IndentCaseLabels: false
+IndentPPDirectives: None
+IndentWidth: 2
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: true
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakAssignment: 2
+PenaltyBreakBeforeFirstCallParameter: 19
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 60
+PointerAlignment: Right
+RawStringFormats:
+ - Delimiters:
+ - pb
+ Language: TextProto
+ BasedOnStyle: google
+ReflowComments: true
+SortIncludes: true
+SortUsingDeclarations: true
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeParens: ControlStatements
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp11
+TabWidth: 8
+UseTab: Never
+...
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..2bf18165
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,54 @@
+# OS
+.DS_Store
+*.swp
+*.swo
+*.swl
+*.swm
+*.swn
+*.pyc
+
+# private files
+private[.-]*
+private
+
+# Build
+vendor
+build
+docs/_build
+docs/tutorial
+docs/node_modules
+docs/modules
+dist
+tools-stamp
+buf-stamp
+artifacts
+
+# Data - ideally these don't exist
+baseapp/data/*
+client/lcd/keys/*
+mytestnet
+
+# Testing
+coverage.txt
+profile.out
+sim_log_file
+
+# Vagrant
+.vagrant/
+*.box
+*.log
+vagrant
+
+# IDE
+.idea
+*.iml
+.dir-locals.el
+.vscode
+
+# Graphviz
+dependency-graph.png
+
+# Latex
+*.aux
+*.out
+*.synctex.gz
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..34738ccf
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,72 @@
+run:
+ tests: false
+# # timeout for analysis, e.g. 30s, 5m, default is 1m
+# timeout: 5m
+
+linters:
+ disable-all: true
+ enable:
+ - bodyclose
+ - deadcode
+ - depguard
+ - dogsled
+ # - errcheck
+ - goconst
+ - gocritic
+ - gofmt
+ - goimports
+ - golint
+ - gosec
+ - gosimple
+ - govet
+ - ineffassign
+ - interfacer
+ - maligned
+ - misspell
+ - nakedret
+ - prealloc
+ - scopelint
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - typecheck
+ - unconvert
+ - unused
+ - unparam
+ - misspell
+ # - wsl
+ - nolintlint
+
+issues:
+ exclude-rules:
+ - text: "Use of weak random number generator"
+ linters:
+ - gosec
+ - text: "comment on exported var"
+ linters:
+ - golint
+ - text: "don't use an underscore in package name"
+ linters:
+ - golint
+ - text: "ST1003:"
+ linters:
+ - stylecheck
+ # FIXME: Disabled until golangci-lint updates stylecheck with this fix:
+ # https://github.com/dominikh/go-tools/issues/389
+ - text: "ST1016:"
+ linters:
+ - stylecheck
+ max-issues-per-linter: 10000
+ max-same-issues: 10000
+
+linters-settings:
+ dogsled:
+ max-blank-identifiers: 3
+ maligned:
+ # print struct with more effective memory layout or not, false by default
+ suggest-new: true
+ nolintlint:
+ allow-unused: false
+ allow-leading-space: true
+ require-explanation: false
+ require-specific: false
diff --git a/.mergify.yml b/.mergify.yml
new file mode 100644
index 00000000..ecc27fff
--- /dev/null
+++ b/.mergify.yml
@@ -0,0 +1,10 @@
+pull_request_rules:
+ - name: automerge to main with label automerge and branch protection passing
+ conditions:
+ - "#approved-reviews-by>1"
+ - base=main
+ - label=automerge
+ actions:
+ merge:
+ method: squash
+ strict: true
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000..d9094b65
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,32 @@
+# Security
+
+> **IMPORTANT**: If you find a security issue, you can contact our team directly at
+security@interchain.berlin, or report it to our [bug bounty program](https://hackerone.com/tendermint) on HackerOne. *DO NOT* open a public issue on the repository.
+
+## Bug Bounty
+
+As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), we operate a
+[bug bounty program](https://hackerone.com/tendermint) with Hacker One.
+
+See the policy linked above for more details on submissions and rewards and read
+this [blog post](https://blog.cosmos.network/bug-bounty-program-for-tendermint-cosmos-833c67693586) for the program scope.
+
+The following is a list of examples of the kinds of bugs we're most interested
+in for the IBC Golang repository. Please refer to the corresponding repositories for vulnerabilities on the [Cosmos SDK]((https://github.com/cosmos/cosmos-sdk/blob/master/SECURITY.md)) and [Tendermint](https://github.com/tendermint/tendermint/blob/master/SECURITY.md) repositories.
+
+### IBC Core
+
+- [`02-client`](https://github.com/cosmos/ibc-go/tree/modules/core/02-client)
+- [`03-connection`](https://github.com/cosmos/ibc-go/tree/modules/core/03-connection)
+- [`04-channel`](https://github.com/cosmos/ibc-go/tree/modules/core/04-channel)
+- [`05-port`](https://github.com/cosmos/ibc-go/tree/modules/core/05-port)
+- [`23-commitment`](https://github.com/cosmos/ibc-go/tree/modules/core/23-commitment)
+- [`24-host`](https://github.com/cosmos/ibc-go/tree/modules/core/24-host)
+
+### IBC Applications
+
+- [`transfer`](https://github.com/cosmos/ibc-go/tree/modules/apps/transfer)
+
+### Light Clients
+
+- [`07-tendermint`](https://github.com/cosmos/ibc-go/tree/modules/light-clients/07-tendermint)
From abb18240da7e9a2121ffcb0e938a0f9b29c351a8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 16 Mar 2021 18:51:01 +0100
Subject: [PATCH 019/393] add upgrade example and fix bug
Add a upgraded client state json example into cli help command. Fix parsing plan height bug which attempted to use non-existent flag
---
modules/core/02-client/client/cli/tx.go | 17 ++++++++++++++---
1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/modules/core/02-client/client/cli/tx.go b/modules/core/02-client/client/cli/tx.go
index b7908fed..78a66012 100644
--- a/modules/core/02-client/client/cli/tx.go
+++ b/modules/core/02-client/client/cli/tx.go
@@ -3,6 +3,7 @@ package cli
import (
"fmt"
"io/ioutil"
+ "strconv"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -16,9 +17,9 @@ import (
"github.com/cosmos/cosmos-sdk/version"
govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/exported"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// NewCreateClientCmd defines the command to create a new IBC light client.
@@ -335,7 +336,17 @@ func NewCmdSubmitUpgradeProposal() *cobra.Command {
Args: cobra.ExactArgs(3),
Short: "Submit an IBC upgrade proposal",
Long: "Submit an IBC client breaking upgrade proposal along with an initial deposit.\n" +
- "The client state specified is the upgraded client state representing the upgraded chain",
+ "The client state specified is the upgraded client state representing the upgraded chain\n" +
+ `Example Upgraded Client State JSON:
+{
+ "@type":"/ibc.lightclients.tendermint.v1.ClientState",
+ "chain_id":"testchain1",
+ "unbonding_period":"1814400s",
+ "latest_height":{"revision_number":"0","revision_height":"2"},
+ "proof_specs":[{"leaf_spec":{"hash":"SHA256","prehash_key":"NO_HASH","prehash_value":"SHA256","length":"VAR_PROTO","prefix":"AA=="},"inner_spec":{"child_order":[0,1],"child_size":33,"min_prefix_length":4,"max_prefix_length":12,"empty_child":null,"hash":"SHA256"},"max_depth":0,"min_depth":0},{"leaf_spec":{"hash":"SHA256","prehash_key":"NO_HASH","prehash_value":"SHA256","length":"VAR_PROTO","prefix":"AA=="},"inner_spec":{"child_order":[0,1],"child_size":32,"min_prefix_length":1,"max_prefix_length":1,"empty_child":null,"hash":"SHA256"},"max_depth":0,"min_depth":0}],
+ "upgrade_path":["upgrade","upgradedIBCState"],
+}
+ `,
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientTxContext(cmd)
if err != nil {
@@ -355,7 +366,7 @@ func NewCmdSubmitUpgradeProposal() *cobra.Command {
name := args[0]
- height, err := cmd.Flags().GetInt64(args[1])
+ height, err := strconv.ParseInt(args[1], 10, 64)
if err != nil {
return err
}
From e226f369c7adff58617aac58ac0909e6e632471c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 22 Mar 2021 18:33:22 +0100
Subject: [PATCH 020/393] Fix links in documentation (#90)
* fix links
* update README
* fix links
---
README.md | 2 +-
docs/custom.md | 18 +++++++++---------
docs/integration.md | 8 ++++----
docs/overview.md | 22 +++++++++++-----------
docs/relayer.md | 5 +++--
5 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/README.md b/README.md
index 0e6c4e01..6eaf9931 100644
--- a/README.md
+++ b/README.md
@@ -53,4 +53,4 @@ The localhost client is currently non-functional.
Please see our [documentation](docs/README.md) for more information.
-
+Checkout the [IBC website](https://ibcprotocol.org/).
diff --git a/docs/custom.md b/docs/custom.md
index 4d4c30c0..a5f75aad 100644
--- a/docs/custom.md
+++ b/docs/custom.md
@@ -31,7 +31,7 @@ module correctly.
### Implement `IBCModule` Interface and callbacks
The Cosmos SDK expects all IBC modules to implement the [`IBCModule`
-interface](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/05-port/types/module.go). This
+interface](https://github.com/cosmos/ibc-go/tree/main/modules/core/05-port/types/module.go). This
interface contains all of the callbacks IBC expects modules to implement. This section will describe
the callbacks that are called during channel handshake execution.
@@ -209,7 +209,7 @@ channel, as well as how they will encode/decode it. This process is not specifie
to each application module to determine how to implement this agreement. However, for most
applications this will happen as a version negotiation during the channel handshake. While more
complex version negotiation is possible to implement inside the channel opening handshake, a very
-simple version negotation is implemented in the [ibc-transfer module](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc-transfer/module.go).
+simple version negotation is implemented in the [ibc-transfer module](https://github.com/cosmos/ibc-go/tree/main/modules/apps/transfer/module.go).
Thus, a module must define its a custom packet data structure, along with a well-defined way to
encode and decode it to and from `[]byte`.
@@ -336,7 +336,7 @@ not want the packet processing to revert. Instead, we may want to encode this fa
acknowledgement and finish processing the packet. This will ensure the packet cannot be replayed,
and will also allow the sender module to potentially remediate the situation upon receiving the
acknowledgement. An example of this technique is in the `ibc-transfer` module's
-[`OnRecvPacket`](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc-transfer/module.go).
+[`OnRecvPacket`](https://github.com/cosmos/ibc-go/tree/main/modules/apps/transfer/module.go).
:::
### Acknowledgements
@@ -358,9 +358,9 @@ Thus, modules must agree on how to encode/decode acknowledgements. The process o
acknowledgement struct along with encoding and decoding it, is very similar to the packet data
example above. [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope)
specifies a recommended format for acknowledgements. This acknowledgement type can be imported from
-[channel types](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel/types).
+[channel types](https://github.com/cosmos/ibc-go/tree/main/modules/core/04-channel/types).
-While modules may choose arbitrary acknowledgement structs, a default acknowledgement types is provided by IBC [here](https://github.com/cosmos/cosmos-sdk/blob/master/proto/ibc/core/channel/v1/channel.proto):
+While modules may choose arbitrary acknowledgement structs, a default acknowledgement types is provided by IBC [here](https://github.com/cosmos/ibc-go/blob/main/proto/ibc/core/channel/v1/channel.proto):
```proto
// Acknowledgement is the recommended acknowledgement format to be used by
@@ -455,14 +455,14 @@ which implements everything discussed above.
Here are the useful parts of the module to look at:
[Binding to transfer
-port](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc-transfer/genesis.go)
+port](https://github.com/cosmos/ibc-go/blob/main/modules/apps/transfer/types/genesis.go)
[Sending transfer
-packets](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc-transfer/keeper/relay.go)
+packets](https://github.com/cosmos/ibc-go/blob/main/modules/apps/transfer/keeper/relay.go)
[Implementing IBC
-callbacks](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc-transfer/module.go)
+callbacks](https://github.com/cosmos/ibc-go/blob/main/modules/apps/transfer/module.go)
## Next {hide}
-Learn about [building modules](../building-modules/intro.md) {hide}
+Learn about [building modules](https://github.com/cosmos/cosmos-sdk/blob/master/docs/building-modules/intro.md) {hide}
diff --git a/docs/integration.md b/docs/integration.md
index 50bc983f..ec48126f 100644
--- a/docs/integration.md
+++ b/docs/integration.md
@@ -7,7 +7,7 @@ order: 2
Learn how to integrate IBC to your application and send data packets to other chains. {synopsis}
This document outlines the required steps to integrate and configure the [IBC
-module](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc) to your Cosmos SDK application and
+module](https://github.com/cosmos/ibc-go/tree/main/modules/core) to your Cosmos SDK application and
send fungible token transfers to other chains.
## Integrating the IBC module
@@ -120,7 +120,7 @@ IBC needs to know which module is bound to which port so that it can route packe
appropriate module and call the appropriate callbacks. The port to module name mapping is handled by
IBC's port `Keeper`. However, the mapping from module name to the relevant callbacks is accomplished
by the port
-[`Router`](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc//core/05-port/types/router.go) on the
+[`Router`](https://github.com/cosmos/ibc-go/blob/main/modules/core/05-port/types/router.go) on the
IBC module.
Adding the module routes allows the IBC handler to call the appropriate callback when processing a
@@ -204,7 +204,7 @@ past historical info at any given height in order to verify the light client `Co
connection handhake.
The IBC module also has
-[`BeginBlock`](https://github.com/cosmos/cosmos-sdk/blob/master/x/ibc/core/02-client/abci.go) logic as
+[`BeginBlock`](https://github.com/cosmos/ibc-go/blob/main/modules/core/02-client/abci.go) logic as
well. This is optional as it is only required if your application uses the [localhost
client](https://github.com/cosmos/ics/blob/master/spec/ics-009-loopback-client) to connect two
different modules from the same chain.
@@ -245,7 +245,7 @@ func NewApp(...args) *App {
That's it! You have now wired up the IBC module and are now able to send fungible tokens across
different chains. If you want to have a broader view of the changes take a look into the SDK's
-[`SimApp`](https://github.com/cosmos/cosmos-sdk/blob/master/simapp/app.go).
+[`SimApp`](https://github.com/cosmos/ibc-go/blob/main/testing/simapp/app.go).
## Next {hide}
diff --git a/docs/overview.md b/docs/overview.md
index ff915eee..dc5cc9d7 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -26,19 +26,19 @@ module correctly.
## Components Overview
-### [Clients](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/02-client)
+### [Clients](https://github.com/cosmos/ibc-go/blob/main/modules/core/02-client)
IBC Clients are light clients (identified by a unique client-id) that track the consensus states of
other blockchains, along with the proof spec necessary to properly verify proofs against the
client's consensus state. A client may be associated with any number of connections to multiple
chains. The supported IBC clients are:
-* [Solo Machine light client](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/light-clients/06-solomachine): devices such as phones, browsers, or laptops.
-* [Tendermint light client](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/light-clients/07-tendermint): The default for SDK-based chains,
-* [Localhost (loopback) client](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/light-clients/09-localhost): Useful for
+* [Solo Machine light client](https://github.com/cosmos/ibc-go/blob/main/modules/light-clients/06-solomachine): devices such as phones, browsers, or laptops.
+* [Tendermint light client](https://github.com/cosmos/ibc-go/blob/main/modules/light-clients/07-tendermint): The default for SDK-based chains,
+* [Localhost (loopback) client](https://github.com/cosmos/ibc-go/blob/main/modules/light-clients/09-localhost): Useful for
testing, simulation and relaying packets to modules on the same application.
-### [Connections](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/03-connection)
+### [Connections](https://github.com/cosmos/ibc-go/blob/main/modules/core/03-connection)
Connections encapsulate two `ConnectionEnd` objects on two seperate blockchains. Each
`ConnectionEnd` is associated with a client of the other blockchain (ie counterparty blockchain).
@@ -47,7 +47,7 @@ correct for their respective counterparties. Connections, once established, are
facilitation all cross-chain verification of IBC state. A connection may be associated with any
number of channels.
-### [Proofs](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/23-commitment) and [Paths](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/24-host)
+### [Proofs](https://github.com/cosmos/ibc-go/blob/main/modules/core/23-commitment) and [Paths](https://github.com/cosmos/ibc-go/blob/main/modules/core/24-host)
In IBC, blockchains do not directly pass messages to each other over the network. Instead, to
communicate, a blockchain will commit some state to a specifically defined path reserved for a
@@ -82,7 +82,7 @@ IBC will correctly route all packets to the relevant module using the (channelID
IBC module may also communicate with another IBC module over multiple ports, with each
`(portID<->portID)` packet stream being sent on a different unique channel.
-### [Ports](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/05-port)
+### [Ports](https://github.com/cosmos/ibc-go/blob/main/modules/core/05-port)
An IBC module may bind to any number of ports. Each port must be identified by a unique `portID`.
Since IBC is designed to be secure with mutually-distrusted modules operating on the same ledger,
@@ -91,7 +91,7 @@ binding a port will return a dynamic object capability. In order to take action
handler. This prevents a malicious module from opening channels with ports it does not own. Thus,
IBC modules are responsible for claiming the capability that is returned on `BindPort`.
-### [Channels](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+### [Channels](https://github.com/cosmos/ibc-go/blob/main/modules/core/04-channel)
An IBC channel can be established between 2 IBC ports. Currently, a port is exclusively owned by a
single module. IBC packets are sent over channels. Just as IP packets contain the destination IP
@@ -126,7 +126,7 @@ that the module **must** claim so that they can pass in a capability to authenti
like sending packets. The channel capability is passed into the callback on the first parts of the
handshake; either `OnChanOpenInit` on the initializing chain or `OnChanOpenTry` on the other chain.
-### [Packets](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+### [Packets](https://github.com/cosmos/ibc-go/blob/main/modules/core/04-channel)
Modules communicate with each other by sending packets over IBC channels. As mentioned above, all
IBC packets contain the destination `portID` and `channelID` along with the source `portID` and
@@ -141,7 +141,7 @@ Thus, packet data is completely opaque to IBC handlers. It is incumbent on a sen
their application-specific packet information into the `Data` field of packets, and the receiver
module to decode that `Data` back to the original application data.
-### [Receipts and Timeouts](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+### [Receipts and Timeouts](https://github.com/cosmos/ibc-go/blob/main/modules/core/04-channel)
Since IBC works over a distributed network and relies on potentially faulty relayers to relay messages between ledgers,
IBC must handle the case where a packet does not get sent to its destination in a timely manner or at all. Thus, packets must
@@ -157,7 +157,7 @@ In the UNORDERED case, packets may be received in any order. Thus, IBC will writ
For this reason, most modules should use UNORDERED channels as they require less liveness guarantees to function effectively for users of that channel.
-### [Acknowledgements](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/04-channel)
+### [Acknowledgements](https://github.com/cosmos/ibc-go/blob/main/modules/core/04-channel)
Modules may also choose to write application-specific acknowledgements upon processing a packet. This may either be done synchronously on `OnRecvPacket`, if the module processes packets as soon as they are received from IBC module. Or they may be done asynchronously if module processes packets at some later point after receiving the packet.
diff --git a/docs/relayer.md b/docs/relayer.md
index 15512125..29699d7c 100644
--- a/docs/relayer.md
+++ b/docs/relayer.md
@@ -7,14 +7,14 @@ order: 4
## Pre-requisites Readings
- [IBC Overview](./overview.md) {prereq}
-- [Events](../core/events.md) {prereq}
+- [Events](https://github.com/cosmos/cosmos-sdk/blob/master/docs/core/events.md) {prereq}
## Events
Events are emitted for every transaction processed by the base application to indicate the execution
of some logic clients may want to be aware of. This is extremely useful when relaying IBC packets.
Any message that uses IBC will emit events for the corresponding TAO logic executed as defined in
-the [IBC events spec](https://github.com/cosmos/cosmos-sdk/tree/master/x/ibc/core/spec/06_events.md).
+the [IBC events spec](https://github.com/cosmos/ibc-go/blob/main/modules/core/spec/06_events.md).
In the SDK, it can be assumed that for every message there is an event emitted with the type `message`,
attribute key `action`, and an attribute value representing the type of message sent
@@ -43,3 +43,4 @@ piece of information needed to relay a packet.
## Example Implementations
- [Golang Relayer](https://github.com/iqlusioninc/relayer)
+- [Hermes](https://github.com/informalsystems/ibc-rs/tree/master/relayer)
From 91162f01d28161407d95bb2c402eaff1e2710bf7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Colin=20Axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 22 Mar 2021 18:34:49 +0100
Subject: [PATCH 021/393] fix automerge
---
.mergify.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.mergify.yml b/.mergify.yml
index ecc27fff..72cd7245 100644
--- a/.mergify.yml
+++ b/.mergify.yml
@@ -1,7 +1,7 @@
pull_request_rules:
- name: automerge to main with label automerge and branch protection passing
conditions:
- - "#approved-reviews-by>1"
+ - "#approved-reviews-by>=1"
- base=main
- label=automerge
actions:
From 9c6edc4bb828e994645d32108bc4610fe065eb71 Mon Sep 17 00:00:00 2001
From: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
Date: Mon, 22 Mar 2021 19:16:40 -0300
Subject: [PATCH 022/393] client: upgraded consensus state (#82)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* client: upgraded consensus state
* remove fields from req
* tests
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
docs/ibc/proto-docs.md | 63 ++-
go.mod | 1 +
modules/core/02-client/keeper/grpc_query.go | 40 +-
.../core/02-client/keeper/grpc_query_test.go | 60 ++-
modules/core/02-client/keeper/keeper.go | 9 +-
modules/core/02-client/keeper/keeper_test.go | 4 +-
.../core/02-client/types/expected_keepers.go | 3 +-
modules/core/02-client/types/query.pb.go | 508 +++++++++++++-----
modules/core/02-client/types/query.pb.gw.go | 110 ++--
.../07-tendermint/types/tendermint.pb.go | 4 +-
proto/ibc/core/client/v1/query.proto | 29 +-
11 files changed, 613 insertions(+), 218 deletions(-)
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 28c9be8e..5cf16d3f 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -664,6 +664,14 @@
M QueryUpgradedClientStateResponse
+
+ M QueryUpgradedConsensusStateRequest
+
+
+
+ M QueryUpgradedConsensusStateResponse
+
+
@@ -4689,6 +4697,13 @@ ConsensusState
QueryUpgradedClientStateRequest is the request type for the
Query/UpgradedClientState RPC method
+
+
+
+ QueryUpgradedClientStateResponse
+ QueryUpgradedClientStateResponse is the response type for the
Query/UpgradedClientState RPC method.
+
+
Field Type Label Description
@@ -4696,18 +4711,10 @@ ConsensusState
- client_id
- string
-
- client state unique identifier
-
-
-
- plan_height
- int64
+ upgraded_client_state
+ google.protobuf.Any
- plan height of the current chain must be sent in request
-as this is the height under which upgraded client state is stored
+ client state associated with the request identifier
@@ -4717,8 +4724,15 @@ as this is the height under which upgraded client state is stored
- QueryUpgradedClientStateResponse
- QueryUpgradedClientStateResponse is the response type for the
Query/UpgradedClientState RPC method.
+ QueryUpgradedConsensusStateRequest
+ QueryUpgradedConsensusStateRequest is the request type for the
Query/UpgradedConsensusState RPC method
+
+
+
+
+
+ QueryUpgradedConsensusStateResponse
+ QueryUpgradedConsensusStateResponse is the response type for the
Query/UpgradedConsensusState RPC method.
@@ -4728,10 +4742,10 @@ as this is the height under which upgraded client state is stored
- upgraded_client_state
+ upgraded_consensus_state
google.protobuf.Any
- client state associated with the request identifier
+ Consensus state associated with the request identifier
@@ -4799,6 +4813,13 @@ client.
UpgradedClientState queries an Upgraded IBC light client.
+
+ UpgradedConsensusState
+ QueryUpgradedConsensusStateRequest
+ QueryUpgradedConsensusStateResponse
+ UpgradedConsensusState queries an Upgraded IBC consensus state.
+
+
@@ -4872,7 +4893,17 @@ client.
UpgradedClientState
GET
- /ibc/core/client/v1/upgraded_client_states/{client_id}
+ /ibc/core/client/v1/upgraded_client_states
+
+
+
+
+
+
+
+ UpgradedConsensusState
+ GET
+ /ibc/core/client/v1/upgraded_consensus_states
diff --git a/go.mod b/go.mod
index 3b9b5de9..7cf8a7b5 100644
--- a/go.mod
+++ b/go.mod
@@ -23,4 +23,5 @@ require (
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
google.golang.org/grpc v1.36.0
+ google.golang.org/protobuf v1.25.0
)
diff --git a/modules/core/02-client/keeper/grpc_query.go b/modules/core/02-client/keeper/grpc_query.go
index cb353a57..556ccad2 100644
--- a/modules/core/02-client/keeper/grpc_query.go
+++ b/modules/core/02-client/keeper/grpc_query.go
@@ -204,11 +204,8 @@ func (q Keeper) UpgradedClientState(c context.Context, req *types.QueryUpgradedC
return nil, status.Error(codes.InvalidArgument, "empty request")
}
- if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
- return nil, status.Error(codes.InvalidArgument, err.Error())
- }
-
ctx := sdk.UnwrapSDKContext(c)
+
plan, found := q.GetUpgradePlan(ctx)
if !found {
return nil, status.Error(
@@ -218,10 +215,7 @@ func (q Keeper) UpgradedClientState(c context.Context, req *types.QueryUpgradedC
bz, found := q.GetUpgradedClient(ctx, plan.Height)
if !found {
- return nil, status.Error(
- codes.NotFound,
- sdkerrors.Wrap(types.ErrClientNotFound, req.ClientId).Error(),
- )
+ return nil, status.Error(codes.NotFound, types.ErrClientNotFound.Error())
}
clientState, err := types.UnmarshalClientState(q.cdc, bz)
@@ -240,3 +234,33 @@ func (q Keeper) UpgradedClientState(c context.Context, req *types.QueryUpgradedC
UpgradedClientState: any,
}, nil
}
+
+// UpgradedConsensusState implements the Query/UpgradedConsensusState gRPC method
+func (q Keeper) UpgradedConsensusState(c context.Context, req *types.QueryUpgradedConsensusStateRequest) (*types.QueryUpgradedConsensusStateResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+
+ bz, found := q.GetUpgradedConsensusState(ctx, ctx.BlockHeight())
+ if !found {
+ return nil, status.Errorf(codes.NotFound, "%s, height %d", types.ErrConsensusStateNotFound.Error(), ctx.BlockHeight())
+ }
+
+ consensusState, err := types.UnmarshalConsensusState(q.cdc, bz)
+ if err != nil {
+ return nil, status.Error(
+ codes.Internal, err.Error(),
+ )
+ }
+
+ any, err := types.PackConsensusState(consensusState)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ return &types.QueryUpgradedConsensusStateResponse{
+ UpgradedConsensusState: any,
+ }, nil
+}
diff --git a/modules/core/02-client/keeper/grpc_query_test.go b/modules/core/02-client/keeper/grpc_query_test.go
index 5d3671df..41c128d9 100644
--- a/modules/core/02-client/keeper/grpc_query_test.go
+++ b/modules/core/02-client/keeper/grpc_query_test.go
@@ -6,8 +6,10 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
-
+ grpctypes "github.com/cosmos/cosmos-sdk/types/grpc"
"github.com/cosmos/cosmos-sdk/types/query"
+ "google.golang.org/grpc/metadata"
+
"github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
"github.com/cosmos/ibc-go/modules/core/exported"
@@ -373,6 +375,62 @@ func (suite *KeeperTestSuite) TestQueryConsensusStates() {
}
}
+func (suite *KeeperTestSuite) TestQueryUpgradedConsensusStates() {
+ var (
+ req *types.QueryUpgradedConsensusStateRequest
+ expConsensusState *codectypes.Any
+ height int64
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ }{
+ {
+ "no plan",
+ func() {
+ req = &types.QueryUpgradedConsensusStateRequest{}
+ },
+ false,
+ },
+ {
+ "valid consensus state",
+ func() {
+ req = &types.QueryUpgradedConsensusStateRequest{}
+ lastHeight := types.NewHeight(0, uint64(suite.ctx.BlockHeight()))
+ height = int64(lastHeight.GetRevisionHeight())
+ suite.ctx = suite.ctx.WithBlockHeight(height)
+
+ expConsensusState = types.MustPackConsensusState(suite.consensusState)
+ bz := types.MustMarshalConsensusState(suite.cdc, suite.consensusState)
+ err := suite.keeper.SetUpgradedConsensusState(suite.ctx, height, bz)
+ suite.Require().NoError(err)
+ },
+ true,
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+
+ ctx := sdk.WrapSDKContext(suite.ctx)
+ ctx = metadata.AppendToOutgoingContext(ctx, grpctypes.GRPCBlockHeightHeader, fmt.Sprintf("%d", height))
+
+ res, err := suite.queryClient.UpgradedConsensusState(ctx, req)
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().True(expConsensusState.Equal(res.UpgradedConsensusState))
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
func (suite *KeeperTestSuite) TestQueryParams() {
ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
expParams := types.DefaultParams()
diff --git a/modules/core/02-client/keeper/keeper.go b/modules/core/02-client/keeper/keeper.go
index 7bac00e5..aa3369bc 100644
--- a/modules/core/02-client/keeper/keeper.go
+++ b/modules/core/02-client/keeper/keeper.go
@@ -12,13 +12,13 @@ import (
"github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// Keeper represents a type that grants read and write permissions to any client
@@ -339,6 +339,11 @@ func (k Keeper) GetUpgradedClient(ctx sdk.Context, planHeight int64) ([]byte, bo
return k.upgradeKeeper.GetUpgradedClient(ctx, planHeight)
}
+// GetUpgradedConsensusState returns the upgraded consensus state
+func (k Keeper) GetUpgradedConsensusState(ctx sdk.Context, planHeight int64) ([]byte, bool) {
+ return k.upgradeKeeper.GetUpgradedConsensusState(ctx, planHeight)
+}
+
// SetUpgradedConsensusState executes the upgrade keeper SetUpgradedConsensusState function.
func (k Keeper) SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error {
return k.upgradeKeeper.SetUpgradedConsensusState(ctx, planHeight, bz)
diff --git a/modules/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go
index c6b3329c..b31972b3 100644
--- a/modules/core/02-client/keeper/keeper_test.go
+++ b/modules/core/02-client/keeper/keeper_test.go
@@ -13,8 +13,8 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
- "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/cosmos/ibc-go/modules/core/02-client/keeper"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
@@ -23,7 +23,7 @@ import (
localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
- stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/modules/core/02-client/types/expected_keepers.go b/modules/core/02-client/types/expected_keepers.go
index ad007fb8..f10c39b9 100644
--- a/modules/core/02-client/types/expected_keepers.go
+++ b/modules/core/02-client/types/expected_keepers.go
@@ -19,7 +19,8 @@ type UpgradeKeeper interface {
ClearIBCState(ctx sdk.Context, lastHeight int64)
GetUpgradePlan(ctx sdk.Context) (plan upgradetypes.Plan, havePlan bool)
GetUpgradedClient(ctx sdk.Context, height int64) ([]byte, bool)
- SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error
SetUpgradedClient(ctx sdk.Context, planHeight int64, bz []byte) error
+ GetUpgradedConsensusState(ctx sdk.Context, lastHeight int64) ([]byte, bool)
+ SetUpgradedConsensusState(ctx sdk.Context, planHeight int64, bz []byte) error
ScheduleUpgrade(ctx sdk.Context, plan upgradetypes.Plan) error
}
diff --git a/modules/core/02-client/types/query.pb.go b/modules/core/02-client/types/query.pb.go
index b89b1c2e..57908e13 100644
--- a/modules/core/02-client/types/query.pb.go
+++ b/modules/core/02-client/types/query.pb.go
@@ -588,11 +588,6 @@ func (m *QueryClientParamsResponse) GetParams() *Params {
// QueryUpgradedClientStateRequest is the request type for the
// Query/UpgradedClientState RPC method
type QueryUpgradedClientStateRequest struct {
- // client state unique identifier
- ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
- // plan height of the current chain must be sent in request
- // as this is the height under which upgraded client state is stored
- PlanHeight int64 `protobuf:"varint,2,opt,name=plan_height,json=planHeight,proto3" json:"plan_height,omitempty"`
}
func (m *QueryUpgradedClientStateRequest) Reset() { *m = QueryUpgradedClientStateRequest{} }
@@ -628,20 +623,6 @@ func (m *QueryUpgradedClientStateRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_QueryUpgradedClientStateRequest proto.InternalMessageInfo
-func (m *QueryUpgradedClientStateRequest) GetClientId() string {
- if m != nil {
- return m.ClientId
- }
- return ""
-}
-
-func (m *QueryUpgradedClientStateRequest) GetPlanHeight() int64 {
- if m != nil {
- return m.PlanHeight
- }
- return 0
-}
-
// QueryUpgradedClientStateResponse is the response type for the
// Query/UpgradedClientState RPC method.
type QueryUpgradedClientStateResponse struct {
@@ -689,6 +670,91 @@ func (m *QueryUpgradedClientStateResponse) GetUpgradedClientState() *types.Any {
return nil
}
+// QueryUpgradedConsensusStateRequest is the request type for the
+// Query/UpgradedConsensusState RPC method
+type QueryUpgradedConsensusStateRequest struct {
+}
+
+func (m *QueryUpgradedConsensusStateRequest) Reset() { *m = QueryUpgradedConsensusStateRequest{} }
+func (m *QueryUpgradedConsensusStateRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryUpgradedConsensusStateRequest) ProtoMessage() {}
+func (*QueryUpgradedConsensusStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_dc42cdfd1d52d76e, []int{12}
+}
+func (m *QueryUpgradedConsensusStateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUpgradedConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUpgradedConsensusStateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUpgradedConsensusStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUpgradedConsensusStateRequest.Merge(m, src)
+}
+func (m *QueryUpgradedConsensusStateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUpgradedConsensusStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUpgradedConsensusStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUpgradedConsensusStateRequest proto.InternalMessageInfo
+
+// QueryUpgradedConsensusStateResponse is the response type for the
+// Query/UpgradedConsensusState RPC method.
+type QueryUpgradedConsensusStateResponse struct {
+ // Consensus state associated with the request identifier
+ UpgradedConsensusState *types.Any `protobuf:"bytes,1,opt,name=upgraded_consensus_state,json=upgradedConsensusState,proto3" json:"upgraded_consensus_state,omitempty"`
+}
+
+func (m *QueryUpgradedConsensusStateResponse) Reset() { *m = QueryUpgradedConsensusStateResponse{} }
+func (m *QueryUpgradedConsensusStateResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryUpgradedConsensusStateResponse) ProtoMessage() {}
+func (*QueryUpgradedConsensusStateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_dc42cdfd1d52d76e, []int{13}
+}
+func (m *QueryUpgradedConsensusStateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryUpgradedConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryUpgradedConsensusStateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryUpgradedConsensusStateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryUpgradedConsensusStateResponse.Merge(m, src)
+}
+func (m *QueryUpgradedConsensusStateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryUpgradedConsensusStateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryUpgradedConsensusStateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryUpgradedConsensusStateResponse proto.InternalMessageInfo
+
+func (m *QueryUpgradedConsensusStateResponse) GetUpgradedConsensusState() *types.Any {
+ if m != nil {
+ return m.UpgradedConsensusState
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*QueryClientStateRequest)(nil), "ibc.core.client.v1.QueryClientStateRequest")
proto.RegisterType((*QueryClientStateResponse)(nil), "ibc.core.client.v1.QueryClientStateResponse")
@@ -702,69 +768,73 @@ func init() {
proto.RegisterType((*QueryClientParamsResponse)(nil), "ibc.core.client.v1.QueryClientParamsResponse")
proto.RegisterType((*QueryUpgradedClientStateRequest)(nil), "ibc.core.client.v1.QueryUpgradedClientStateRequest")
proto.RegisterType((*QueryUpgradedClientStateResponse)(nil), "ibc.core.client.v1.QueryUpgradedClientStateResponse")
+ proto.RegisterType((*QueryUpgradedConsensusStateRequest)(nil), "ibc.core.client.v1.QueryUpgradedConsensusStateRequest")
+ proto.RegisterType((*QueryUpgradedConsensusStateResponse)(nil), "ibc.core.client.v1.QueryUpgradedConsensusStateResponse")
}
func init() { proto.RegisterFile("ibc/core/client/v1/query.proto", fileDescriptor_dc42cdfd1d52d76e) }
var fileDescriptor_dc42cdfd1d52d76e = []byte{
- // 909 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x8f, 0xdb, 0x44,
- 0x14, 0xce, 0xec, 0x6e, 0xab, 0xed, 0x24, 0xdd, 0xa0, 0xe9, 0x96, 0xa6, 0x6e, 0x71, 0x82, 0x2b,
- 0xd1, 0x14, 0x1a, 0xcf, 0x26, 0xa5, 0xa5, 0x17, 0x90, 0xd8, 0x4a, 0xa5, 0xbd, 0xd0, 0x62, 0x84,
- 0x90, 0x90, 0x50, 0x64, 0x3b, 0x13, 0xc7, 0x52, 0xe2, 0x71, 0x3d, 0x76, 0xa4, 0x55, 0xb5, 0x97,
- 0x1e, 0x39, 0x21, 0x21, 0x71, 0xe5, 0xc4, 0x85, 0xc3, 0x8a, 0x1b, 0x57, 0x4e, 0x68, 0x8f, 0x2b,
- 0xc1, 0x81, 0x13, 0x8b, 0x76, 0xf9, 0x43, 0x90, 0x67, 0xc6, 0x59, 0x3b, 0x99, 0x28, 0x5e, 0x44,
- 0x6f, 0xde, 0xf7, 0xf3, 0x7b, 0xdf, 0xfb, 0xe6, 0x65, 0xa1, 0xee, 0x3b, 0x2e, 0x76, 0x69, 0x44,
- 0xb0, 0x3b, 0xf6, 0x49, 0x10, 0xe3, 0x69, 0x17, 0xbf, 0x48, 0x48, 0xb4, 0x67, 0x86, 0x11, 0x8d,
- 0x29, 0x42, 0xbe, 0xe3, 0x9a, 0xa9, 0xdf, 0x14, 0x7e, 0x73, 0xda, 0xd5, 0xde, 0x75, 0x29, 0x9b,
- 0x50, 0x86, 0x1d, 0x9b, 0x11, 0x11, 0x8c, 0xa7, 0x5d, 0x87, 0xc4, 0x76, 0x17, 0x87, 0xb6, 0xe7,
- 0x07, 0x76, 0xec, 0xd3, 0x40, 0xe4, 0x6b, 0x4d, 0x45, 0x7d, 0x59, 0x49, 0x04, 0x5c, 0xf7, 0x28,
- 0xf5, 0xc6, 0x04, 0xf3, 0xbf, 0x9c, 0x64, 0x88, 0xed, 0x40, 0xf6, 0xd6, 0x6e, 0x4a, 0x97, 0x1d,
- 0xfa, 0xd8, 0x0e, 0x02, 0x1a, 0xf3, 0xc2, 0x4c, 0x7a, 0xb7, 0x3d, 0xea, 0x51, 0xfe, 0x89, 0xd3,
- 0x2f, 0x61, 0x35, 0x1e, 0xc0, 0x6b, 0x9f, 0xa5, 0x88, 0x1e, 0xf1, 0x1e, 0x9f, 0xc7, 0x76, 0x4c,
- 0x2c, 0xf2, 0x22, 0x21, 0x2c, 0x46, 0x37, 0xe0, 0x25, 0xd1, 0xb9, 0xef, 0x0f, 0x1a, 0xa0, 0x05,
- 0xda, 0x97, 0xac, 0x4d, 0x61, 0x78, 0x3a, 0x30, 0x0e, 0x00, 0x6c, 0x2c, 0x26, 0xb2, 0x90, 0x06,
- 0x8c, 0xa0, 0x0f, 0x60, 0x4d, 0x66, 0xb2, 0xd4, 0xce, 0x93, 0xab, 0xbd, 0x6d, 0x53, 0xe0, 0x33,
- 0x33, 0xe8, 0xe6, 0xc7, 0xc1, 0x9e, 0x55, 0x75, 0xcf, 0x0a, 0xa0, 0x6d, 0x78, 0x21, 0x8c, 0x28,
- 0x1d, 0x36, 0xd6, 0x5a, 0xa0, 0x5d, 0xb3, 0xc4, 0x1f, 0xe8, 0x11, 0xac, 0xf1, 0x8f, 0xfe, 0x88,
- 0xf8, 0xde, 0x28, 0x6e, 0xac, 0xf3, 0x72, 0x9a, 0xb9, 0x48, 0xb5, 0xf9, 0x84, 0x47, 0xec, 0x6e,
- 0x1c, 0xfe, 0xd5, 0xac, 0x58, 0x55, 0x9e, 0x25, 0x4c, 0x86, 0xb3, 0x88, 0x97, 0x65, 0x93, 0x3e,
- 0x86, 0xf0, 0x6c, 0x11, 0x12, 0xed, 0x3b, 0xa6, 0xd8, 0x9a, 0x99, 0x6e, 0xcd, 0x14, 0x2b, 0x96,
- 0x5b, 0x33, 0x9f, 0xdb, 0x5e, 0xc6, 0x92, 0x95, 0xcb, 0x34, 0xfe, 0x00, 0xf0, 0xba, 0xa2, 0x89,
- 0x64, 0x25, 0x80, 0x97, 0xf3, 0xac, 0xb0, 0x06, 0x68, 0xad, 0xb7, 0xab, 0xbd, 0x3b, 0xaa, 0x39,
- 0x9e, 0x0e, 0x48, 0x10, 0xfb, 0x43, 0x9f, 0x0c, 0x72, 0xa5, 0x76, 0xf5, 0x74, 0xac, 0x9f, 0x8e,
- 0x9b, 0x6f, 0x2a, 0xdd, 0xcc, 0xaa, 0xe5, 0xb8, 0x64, 0xe8, 0x93, 0xc2, 0x54, 0x6b, 0x7c, 0xaa,
- 0xdb, 0x2b, 0xa7, 0x12, 0x60, 0x0b, 0x63, 0xfd, 0x0c, 0xa0, 0x26, 0xc6, 0x4a, 0x5d, 0x01, 0x4b,
- 0x58, 0x69, 0x9d, 0xa0, 0xdb, 0xb0, 0x1e, 0x91, 0xa9, 0xcf, 0x7c, 0x1a, 0xf4, 0x83, 0x64, 0xe2,
- 0x90, 0x88, 0x23, 0xd9, 0xb0, 0xb6, 0x32, 0xf3, 0xa7, 0xdc, 0x5a, 0x08, 0xcc, 0xed, 0x39, 0x17,
- 0x28, 0x16, 0x89, 0x6e, 0xc1, 0xcb, 0xe3, 0x74, 0xbe, 0x38, 0x0b, 0xdb, 0x68, 0x81, 0xf6, 0xa6,
- 0x55, 0x13, 0x46, 0xb9, 0xed, 0x5f, 0x00, 0xbc, 0xa1, 0x84, 0x2c, 0x77, 0xf1, 0x21, 0xac, 0xbb,
- 0x99, 0xa7, 0x84, 0x48, 0xb7, 0xdc, 0x42, 0x99, 0xd7, 0xa9, 0xd3, 0x57, 0x6a, 0xe4, 0xac, 0x14,
- 0xdb, 0x8f, 0x15, 0x2b, 0xff, 0x2f, 0x42, 0xfe, 0x0d, 0xc0, 0x9b, 0x6a, 0x10, 0x92, 0xbf, 0xaf,
- 0xe1, 0x1b, 0x73, 0xfc, 0x65, 0x72, 0xbe, 0xab, 0x1a, 0xb7, 0x58, 0xe6, 0x4b, 0x3f, 0x1e, 0x15,
- 0x08, 0xa8, 0x17, 0xe9, 0xfd, 0x1f, 0xa5, 0xab, 0x15, 0x5e, 0xfd, 0x73, 0x3b, 0xb2, 0x27, 0x19,
- 0x93, 0xc6, 0xb3, 0xc2, 0x63, 0xcd, 0x7c, 0x72, 0xc0, 0x1e, 0xbc, 0x18, 0x72, 0x8b, 0xd4, 0x85,
- 0x72, 0x8b, 0x32, 0x47, 0x46, 0x1a, 0x7d, 0xd8, 0xe4, 0x05, 0xbf, 0x08, 0xbd, 0xc8, 0x1e, 0x14,
- 0xde, 0x66, 0xa9, 0xed, 0x35, 0x61, 0x35, 0x1c, 0xdb, 0x33, 0xf9, 0xa7, 0x63, 0xaf, 0x5b, 0x30,
- 0x35, 0x49, 0x6d, 0x8c, 0x61, 0x6b, 0x79, 0x03, 0x09, 0xfc, 0x09, 0xbc, 0x9a, 0x48, 0x77, 0xbf,
- 0xf4, 0x11, 0xbe, 0x92, 0x2c, 0x56, 0xec, 0xfd, 0xb8, 0x09, 0x2f, 0xf0, 0x76, 0xe8, 0x07, 0x00,
- 0xab, 0x39, 0x0f, 0x7a, 0x4f, 0x45, 0xc6, 0x92, 0x9f, 0x11, 0xed, 0x6e, 0xb9, 0x60, 0x01, 0xdf,
- 0xb8, 0xff, 0xea, 0xf7, 0x7f, 0xbe, 0x5b, 0xc3, 0xa8, 0x83, 0x97, 0xfe, 0x10, 0x4a, 0xbd, 0xe1,
- 0x97, 0x33, 0x26, 0xf7, 0xd1, 0xf7, 0x00, 0xd6, 0xf2, 0xa7, 0x10, 0x95, 0xea, 0x9a, 0x49, 0x41,
- 0xeb, 0x94, 0x8c, 0x96, 0x20, 0xef, 0x70, 0x90, 0xb7, 0xd0, 0xdb, 0x2b, 0x41, 0xa2, 0x63, 0x00,
- 0xb7, 0x8a, 0xea, 0x47, 0xe6, 0xf2, 0x66, 0xaa, 0xfb, 0xaa, 0xe1, 0xd2, 0xf1, 0x12, 0xde, 0x98,
- 0xc3, 0x1b, 0xa2, 0x81, 0x12, 0xde, 0xdc, 0xb3, 0xcd, 0xd3, 0x88, 0xb3, 0x53, 0x8b, 0x5f, 0xce,
- 0x1d, 0xed, 0x7d, 0x2c, 0x44, 0x99, 0x73, 0x08, 0xc3, 0x3e, 0x3a, 0x00, 0xb0, 0x3e, 0x77, 0x26,
- 0x50, 0x59, 0xc8, 0xb3, 0x05, 0xec, 0x94, 0x4f, 0x90, 0x43, 0x3e, 0xe4, 0x43, 0xf6, 0xd0, 0xce,
- 0x79, 0x87, 0x44, 0xdf, 0xcc, 0xb4, 0x22, 0xde, 0xef, 0x4a, 0xad, 0x14, 0xce, 0xc6, 0x4a, 0xad,
- 0x14, 0x0f, 0x89, 0xf1, 0x16, 0xc7, 0x79, 0x0d, 0x5d, 0x15, 0x38, 0x67, 0x10, 0xc5, 0xcd, 0x40,
- 0xbf, 0x02, 0x78, 0x45, 0xf1, 0x9c, 0xd1, 0xbd, 0xa5, 0x5d, 0x96, 0x5f, 0x17, 0xed, 0xfd, 0xf3,
- 0x25, 0x49, 0x84, 0x1f, 0x71, 0x84, 0x0f, 0xd1, 0x03, 0x15, 0x93, 0xca, 0x5b, 0x52, 0xe0, 0x73,
- 0xf7, 0xd9, 0xe1, 0x89, 0x0e, 0x8e, 0x4e, 0x74, 0xf0, 0xf7, 0x89, 0x0e, 0xbe, 0x3d, 0xd5, 0x2b,
- 0x47, 0xa7, 0x7a, 0xe5, 0xcf, 0x53, 0xbd, 0xf2, 0xd5, 0x7d, 0xcf, 0x8f, 0x47, 0x89, 0x63, 0xba,
- 0x74, 0x82, 0xe5, 0xff, 0xc0, 0xbe, 0xe3, 0x76, 0x3c, 0x8a, 0x27, 0x74, 0x90, 0x8c, 0x09, 0x13,
- 0xdd, 0x76, 0x7a, 0x1d, 0xd9, 0x30, 0xde, 0x0b, 0x09, 0x73, 0x2e, 0xf2, 0xdb, 0x74, 0xef, 0xdf,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x94, 0x03, 0xe8, 0x6c, 0x0b, 0x00, 0x00,
+ // 942 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0xdc, 0x44,
+ 0x14, 0xce, 0xa4, 0x69, 0xd5, 0xbc, 0xdd, 0x26, 0x68, 0x9a, 0xa6, 0x5b, 0xb7, 0x38, 0x5b, 0x07,
+ 0xd1, 0xb4, 0x24, 0x9e, 0x64, 0x4b, 0x5b, 0x2e, 0x1c, 0x48, 0xa5, 0xd2, 0x5e, 0xda, 0x62, 0x84,
+ 0x90, 0x90, 0x50, 0x64, 0x7b, 0x27, 0x8e, 0xa5, 0x5d, 0x8f, 0xeb, 0xb1, 0x23, 0x45, 0x55, 0x2e,
+ 0x3d, 0x72, 0x42, 0x42, 0xe2, 0xca, 0x9d, 0x43, 0xc5, 0x01, 0x89, 0x2b, 0x27, 0xd4, 0x63, 0x25,
+ 0x38, 0x70, 0x40, 0x04, 0x25, 0xfc, 0x21, 0xc8, 0x33, 0xe3, 0x8d, 0xbd, 0x3b, 0xcb, 0x7a, 0x11,
+ 0xbd, 0x6d, 0xde, 0xcf, 0xef, 0x7d, 0xef, 0xcd, 0x67, 0x05, 0xcc, 0xd0, 0xf3, 0x89, 0xcf, 0x12,
+ 0x4a, 0xfc, 0x5e, 0x48, 0xa3, 0x94, 0xec, 0x6f, 0x91, 0x67, 0x19, 0x4d, 0x0e, 0xec, 0x38, 0x61,
+ 0x29, 0xc3, 0x38, 0xf4, 0x7c, 0x3b, 0xf7, 0xdb, 0xd2, 0x6f, 0xef, 0x6f, 0x19, 0xb7, 0x7c, 0xc6,
+ 0xfb, 0x8c, 0x13, 0xcf, 0xe5, 0x54, 0x06, 0x93, 0xfd, 0x2d, 0x8f, 0xa6, 0xee, 0x16, 0x89, 0xdd,
+ 0x20, 0x8c, 0xdc, 0x34, 0x64, 0x91, 0xcc, 0x37, 0x56, 0x34, 0xf5, 0x55, 0x25, 0x19, 0x70, 0x25,
+ 0x60, 0x2c, 0xe8, 0x51, 0x22, 0xfe, 0xf2, 0xb2, 0x5d, 0xe2, 0x46, 0xaa, 0xb7, 0x71, 0x4d, 0xb9,
+ 0xdc, 0x38, 0x24, 0x6e, 0x14, 0xb1, 0x54, 0x14, 0xe6, 0xca, 0xbb, 0x14, 0xb0, 0x80, 0x89, 0x9f,
+ 0x24, 0xff, 0x25, 0xad, 0xd6, 0x5d, 0xb8, 0xfc, 0x49, 0x8e, 0xe8, 0xbe, 0xe8, 0xf1, 0x69, 0xea,
+ 0xa6, 0xd4, 0xa1, 0xcf, 0x32, 0xca, 0x53, 0x7c, 0x15, 0xe6, 0x65, 0xe7, 0x9d, 0xb0, 0xdb, 0x42,
+ 0x6d, 0xb4, 0x36, 0xef, 0x9c, 0x97, 0x86, 0x47, 0x5d, 0xeb, 0x25, 0x82, 0xd6, 0x68, 0x22, 0x8f,
+ 0x59, 0xc4, 0x29, 0xbe, 0x07, 0x4d, 0x95, 0xc9, 0x73, 0xbb, 0x48, 0x6e, 0x74, 0x96, 0x6c, 0x89,
+ 0xcf, 0x2e, 0xa0, 0xdb, 0x1f, 0x45, 0x07, 0x4e, 0xc3, 0x3f, 0x2d, 0x80, 0x97, 0xe0, 0x6c, 0x9c,
+ 0x30, 0xb6, 0xdb, 0x9a, 0x6d, 0xa3, 0xb5, 0xa6, 0x23, 0xff, 0xc0, 0xf7, 0xa1, 0x29, 0x7e, 0xec,
+ 0xec, 0xd1, 0x30, 0xd8, 0x4b, 0x5b, 0x67, 0x44, 0x39, 0xc3, 0x1e, 0xa5, 0xda, 0x7e, 0x28, 0x22,
+ 0xb6, 0xe7, 0x5e, 0xfd, 0xb9, 0x32, 0xe3, 0x34, 0x44, 0x96, 0x34, 0x59, 0xde, 0x28, 0x5e, 0x5e,
+ 0x4c, 0xfa, 0x00, 0xe0, 0x74, 0x11, 0x0a, 0xed, 0xbb, 0xb6, 0xdc, 0x9a, 0x9d, 0x6f, 0xcd, 0x96,
+ 0x2b, 0x56, 0x5b, 0xb3, 0x9f, 0xba, 0x41, 0xc1, 0x92, 0x53, 0xca, 0xb4, 0x7e, 0x43, 0x70, 0x45,
+ 0xd3, 0x44, 0xb1, 0x12, 0xc1, 0x85, 0x32, 0x2b, 0xbc, 0x85, 0xda, 0x67, 0xd6, 0x1a, 0x9d, 0x9b,
+ 0xba, 0x39, 0x1e, 0x75, 0x69, 0x94, 0x86, 0xbb, 0x21, 0xed, 0x96, 0x4a, 0x6d, 0x9b, 0xf9, 0x58,
+ 0xdf, 0x1f, 0xad, 0x2c, 0x6b, 0xdd, 0xdc, 0x69, 0x96, 0xb8, 0xe4, 0xf8, 0xe3, 0xca, 0x54, 0xb3,
+ 0x62, 0xaa, 0x1b, 0x13, 0xa7, 0x92, 0x60, 0x2b, 0x63, 0xfd, 0x80, 0xc0, 0x90, 0x63, 0xe5, 0xae,
+ 0x88, 0x67, 0xbc, 0xf6, 0x9d, 0xe0, 0x1b, 0xb0, 0x98, 0xd0, 0xfd, 0x90, 0x87, 0x2c, 0xda, 0x89,
+ 0xb2, 0xbe, 0x47, 0x13, 0x81, 0x64, 0xce, 0x59, 0x28, 0xcc, 0x8f, 0x85, 0xb5, 0x12, 0x58, 0xda,
+ 0x73, 0x29, 0x50, 0x2e, 0x12, 0xaf, 0xc2, 0x85, 0x5e, 0x3e, 0x5f, 0x5a, 0x84, 0xcd, 0xb5, 0xd1,
+ 0xda, 0x79, 0xa7, 0x29, 0x8d, 0x6a, 0xdb, 0x3f, 0x21, 0xb8, 0xaa, 0x85, 0xac, 0x76, 0xf1, 0x21,
+ 0x2c, 0xfa, 0x85, 0xa7, 0xc6, 0x91, 0x2e, 0xf8, 0x95, 0x32, 0x6f, 0xf2, 0x4e, 0x5f, 0xe8, 0x91,
+ 0xf3, 0x5a, 0x6c, 0x3f, 0xd0, 0xac, 0xfc, 0xbf, 0x1c, 0xf2, 0x2f, 0x08, 0xae, 0xe9, 0x41, 0x28,
+ 0xfe, 0xbe, 0x84, 0xb7, 0x86, 0xf8, 0x2b, 0xce, 0x79, 0x5d, 0x37, 0x6e, 0xb5, 0xcc, 0xe7, 0x61,
+ 0xba, 0x57, 0x21, 0x60, 0xb1, 0x4a, 0xef, 0xff, 0x78, 0xba, 0x46, 0xe5, 0xd5, 0x3f, 0x75, 0x13,
+ 0xb7, 0x5f, 0x30, 0x69, 0x3d, 0xa9, 0x3c, 0xd6, 0xc2, 0xa7, 0x06, 0xec, 0xc0, 0xb9, 0x58, 0x58,
+ 0xd4, 0x5d, 0x68, 0xb7, 0xa8, 0x72, 0x54, 0xa4, 0x75, 0x1d, 0x56, 0x44, 0xc1, 0xcf, 0xe2, 0x20,
+ 0x71, 0xbb, 0x95, 0xb7, 0x59, 0xf4, 0xec, 0x41, 0x7b, 0x7c, 0x88, 0x6a, 0xfd, 0x10, 0x2e, 0x65,
+ 0xca, 0xbd, 0x53, 0x5b, 0x46, 0x2f, 0x66, 0xa3, 0x15, 0xad, 0x77, 0xc0, 0xaa, 0x76, 0xd3, 0xbd,
+ 0x5f, 0x2b, 0x83, 0xd5, 0x7f, 0x8d, 0x52, 0xb0, 0x1e, 0x43, 0xeb, 0x14, 0xd6, 0x14, 0x6f, 0x67,
+ 0x39, 0xd3, 0xd6, 0xed, 0xfc, 0x31, 0x0f, 0x67, 0x45, 0x5f, 0xfc, 0x1d, 0x82, 0x46, 0x09, 0x36,
+ 0x7e, 0x4f, 0xc7, 0xf5, 0x98, 0xaf, 0x94, 0xb1, 0x5e, 0x2f, 0x58, 0x0e, 0x61, 0xdd, 0x79, 0xf1,
+ 0xeb, 0xdf, 0xdf, 0xcc, 0x12, 0xbc, 0x41, 0xc6, 0x7e, 0x67, 0xd5, 0x39, 0x93, 0xe7, 0x83, 0x67,
+ 0x76, 0x88, 0xbf, 0x45, 0xd0, 0x2c, 0x2b, 0x2d, 0xae, 0xd5, 0xb5, 0xb8, 0x34, 0x63, 0xa3, 0x66,
+ 0xb4, 0x02, 0x79, 0x53, 0x80, 0x5c, 0xc5, 0xd7, 0x27, 0x82, 0xc4, 0x47, 0x08, 0x16, 0xaa, 0xbc,
+ 0x62, 0x7b, 0x7c, 0x33, 0xdd, 0xfa, 0x0d, 0x52, 0x3b, 0x5e, 0xc1, 0xeb, 0x09, 0x78, 0xbb, 0xb8,
+ 0xab, 0x85, 0x37, 0xa4, 0x0a, 0x65, 0x1a, 0x49, 0xa1, 0xe4, 0xe4, 0xf9, 0xd0, 0x37, 0xe1, 0x90,
+ 0x48, 0xc9, 0x2c, 0x39, 0xa4, 0xe1, 0x10, 0xbf, 0x44, 0xb0, 0x38, 0xa4, 0x42, 0xb8, 0x2e, 0xe4,
+ 0xc1, 0x02, 0x36, 0xeb, 0x27, 0xa8, 0x21, 0x3f, 0x10, 0x43, 0x76, 0xf0, 0xe6, 0xb4, 0x43, 0xe2,
+ 0xaf, 0x06, 0xb7, 0x22, 0xe5, 0x61, 0xe2, 0xad, 0x54, 0x54, 0x69, 0xe2, 0xad, 0x54, 0x75, 0xca,
+ 0x7a, 0x5b, 0xe0, 0xbc, 0x8c, 0x2f, 0x49, 0x9c, 0x03, 0x88, 0x52, 0x92, 0xf0, 0x8f, 0x08, 0x2e,
+ 0x6a, 0xb4, 0x06, 0xdf, 0x1e, 0xdb, 0x65, 0xbc, 0x78, 0x19, 0xef, 0x4f, 0x97, 0xa4, 0x10, 0x76,
+ 0x04, 0xc2, 0x75, 0x7c, 0x4b, 0xc7, 0xa4, 0x56, 0xe8, 0x38, 0xfe, 0x19, 0xc1, 0xb2, 0x5e, 0x8e,
+ 0xf0, 0xdd, 0xc9, 0x20, 0xb4, 0x67, 0x7e, 0x6f, 0xea, 0xbc, 0x3a, 0x92, 0x31, 0x4e, 0x11, 0xf9,
+ 0xf6, 0x93, 0x57, 0xc7, 0x26, 0x7a, 0x7d, 0x6c, 0xa2, 0xbf, 0x8e, 0x4d, 0xf4, 0xf5, 0x89, 0x39,
+ 0xf3, 0xfa, 0xc4, 0x9c, 0xf9, 0xfd, 0xc4, 0x9c, 0xf9, 0xe2, 0x4e, 0x10, 0xa6, 0x7b, 0x99, 0x67,
+ 0xfb, 0xac, 0x4f, 0xd4, 0x7f, 0x06, 0xa1, 0xe7, 0x6f, 0x04, 0x8c, 0xf4, 0x59, 0x37, 0xeb, 0x51,
+ 0x2e, 0x9b, 0x6c, 0x76, 0x36, 0x54, 0x9f, 0xf4, 0x20, 0xa6, 0xdc, 0x3b, 0x27, 0x54, 0xf5, 0xf6,
+ 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x5d, 0xec, 0x67, 0x82, 0x0c, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -793,6 +863,8 @@ type QueryClient interface {
ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error)
// UpgradedClientState queries an Upgraded IBC light client.
UpgradedClientState(ctx context.Context, in *QueryUpgradedClientStateRequest, opts ...grpc.CallOption) (*QueryUpgradedClientStateResponse, error)
+ // UpgradedConsensusState queries an Upgraded IBC consensus state.
+ UpgradedConsensusState(ctx context.Context, in *QueryUpgradedConsensusStateRequest, opts ...grpc.CallOption) (*QueryUpgradedConsensusStateResponse, error)
}
type queryClient struct {
@@ -857,6 +929,15 @@ func (c *queryClient) UpgradedClientState(ctx context.Context, in *QueryUpgraded
return out, nil
}
+func (c *queryClient) UpgradedConsensusState(ctx context.Context, in *QueryUpgradedConsensusStateRequest, opts ...grpc.CallOption) (*QueryUpgradedConsensusStateResponse, error) {
+ out := new(QueryUpgradedConsensusStateResponse)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/UpgradedConsensusState", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// QueryServer is the server API for Query service.
type QueryServer interface {
// ClientState queries an IBC light client.
@@ -873,6 +954,8 @@ type QueryServer interface {
ClientParams(context.Context, *QueryClientParamsRequest) (*QueryClientParamsResponse, error)
// UpgradedClientState queries an Upgraded IBC light client.
UpgradedClientState(context.Context, *QueryUpgradedClientStateRequest) (*QueryUpgradedClientStateResponse, error)
+ // UpgradedConsensusState queries an Upgraded IBC consensus state.
+ UpgradedConsensusState(context.Context, *QueryUpgradedConsensusStateRequest) (*QueryUpgradedConsensusStateResponse, error)
}
// UnimplementedQueryServer can be embedded to have forward compatible implementations.
@@ -897,6 +980,9 @@ func (*UnimplementedQueryServer) ClientParams(ctx context.Context, req *QueryCli
func (*UnimplementedQueryServer) UpgradedClientState(ctx context.Context, req *QueryUpgradedClientStateRequest) (*QueryUpgradedClientStateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpgradedClientState not implemented")
}
+func (*UnimplementedQueryServer) UpgradedConsensusState(ctx context.Context, req *QueryUpgradedConsensusStateRequest) (*QueryUpgradedConsensusStateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpgradedConsensusState not implemented")
+}
func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
s.RegisterService(&_Query_serviceDesc, srv)
@@ -1010,6 +1096,24 @@ func _Query_UpgradedClientState_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _Query_UpgradedConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryUpgradedConsensusStateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).UpgradedConsensusState(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibc.core.client.v1.Query/UpgradedConsensusState",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).UpgradedConsensusState(ctx, req.(*QueryUpgradedConsensusStateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _Query_serviceDesc = grpc.ServiceDesc{
ServiceName: "ibc.core.client.v1.Query",
HandlerType: (*QueryServer)(nil),
@@ -1038,6 +1142,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{
MethodName: "UpgradedClientState",
Handler: _Query_UpgradedClientState_Handler,
},
+ {
+ MethodName: "UpgradedConsensusState",
+ Handler: _Query_UpgradedConsensusState_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "ibc/core/client/v1/query.proto",
@@ -1480,18 +1588,6 @@ func (m *QueryUpgradedClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int
_ = i
var l int
_ = l
- if m.PlanHeight != 0 {
- i = encodeVarintQuery(dAtA, i, uint64(m.PlanHeight))
- i--
- dAtA[i] = 0x10
- }
- if len(m.ClientId) > 0 {
- i -= len(m.ClientId)
- copy(dAtA[i:], m.ClientId)
- i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
- i--
- dAtA[i] = 0xa
- }
return len(dAtA) - i, nil
}
@@ -1530,6 +1626,64 @@ func (m *QueryUpgradedClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (in
return len(dAtA) - i, nil
}
+func (m *QueryUpgradedConsensusStateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUpgradedConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUpgradedConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryUpgradedConsensusStateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryUpgradedConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryUpgradedConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.UpgradedConsensusState != nil {
+ {
+ size, err := m.UpgradedConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintQuery(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
offset -= sovQuery(v)
base := offset
@@ -1710,13 +1864,6 @@ func (m *QueryUpgradedClientStateRequest) Size() (n int) {
}
var l int
_ = l
- l = len(m.ClientId)
- if l > 0 {
- n += 1 + l + sovQuery(uint64(l))
- }
- if m.PlanHeight != 0 {
- n += 1 + sovQuery(uint64(m.PlanHeight))
- }
return n
}
@@ -1733,6 +1880,28 @@ func (m *QueryUpgradedClientStateResponse) Size() (n int) {
return n
}
+func (m *QueryUpgradedConsensusStateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *QueryUpgradedConsensusStateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.UpgradedConsensusState != nil {
+ l = m.UpgradedConsensusState.Size()
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
func sovQuery(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -2876,11 +3045,61 @@ func (m *QueryUpgradedClientStateRequest) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: QueryUpgradedClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUpgradedClientStateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUpgradedClientStateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUpgradedClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field UpgradedClientState", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
@@ -2890,43 +3109,78 @@ func (m *QueryUpgradedClientStateRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthQuery
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthQuery
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ClientId = string(dAtA[iNdEx:postIndex])
+ if m.UpgradedClientState == nil {
+ m.UpgradedClientState = &types.Any{}
+ }
+ if err := m.UpgradedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PlanHeight", wireType)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
}
- m.PlanHeight = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowQuery
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PlanHeight |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryUpgradedConsensusStateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
}
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryUpgradedConsensusStateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryUpgradedConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
@@ -2948,7 +3202,7 @@ func (m *QueryUpgradedClientStateRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *QueryUpgradedClientStateResponse) Unmarshal(dAtA []byte) error {
+func (m *QueryUpgradedConsensusStateResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2971,15 +3225,15 @@ func (m *QueryUpgradedClientStateResponse) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: QueryUpgradedClientStateResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: QueryUpgradedConsensusStateResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: QueryUpgradedClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: QueryUpgradedConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UpgradedClientState", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field UpgradedConsensusState", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3006,10 +3260,10 @@ func (m *QueryUpgradedClientStateResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.UpgradedClientState == nil {
- m.UpgradedClientState = &types.Any{}
+ if m.UpgradedConsensusState == nil {
+ m.UpgradedConsensusState = &types.Any{}
}
- if err := m.UpgradedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.UpgradedConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
diff --git a/modules/core/02-client/types/query.pb.gw.go b/modules/core/02-client/types/query.pb.gw.go
index f29a4fea..ed3df357 100644
--- a/modules/core/02-client/types/query.pb.gw.go
+++ b/modules/core/02-client/types/query.pb.gw.go
@@ -327,39 +327,10 @@ func local_request_Query_ClientParams_0(ctx context.Context, marshaler runtime.M
}
-var (
- filter_Query_UpgradedClientState_0 = &utilities.DoubleArray{Encoding: map[string]int{"client_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
-)
-
func request_Query_UpgradedClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryUpgradedClientStateRequest
var metadata runtime.ServerMetadata
- var (
- val string
- ok bool
- err error
- _ = err
- )
-
- val, ok = pathParams["client_id"]
- if !ok {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
- }
-
- protoReq.ClientId, err = runtime.String(val)
-
- if err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
- }
-
- if err := req.ParseForm(); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
- if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_UpgradedClientState_0); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
msg, err := client.UpgradedClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
@@ -369,32 +340,25 @@ func local_request_Query_UpgradedClientState_0(ctx context.Context, marshaler ru
var protoReq QueryUpgradedClientStateRequest
var metadata runtime.ServerMetadata
- var (
- val string
- ok bool
- err error
- _ = err
- )
+ msg, err := server.UpgradedClientState(ctx, &protoReq)
+ return msg, metadata, err
- val, ok = pathParams["client_id"]
- if !ok {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
- }
+}
- protoReq.ClientId, err = runtime.String(val)
+func request_Query_UpgradedConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUpgradedConsensusStateRequest
+ var metadata runtime.ServerMetadata
- if err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
- }
+ msg, err := client.UpgradedConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
- if err := req.ParseForm(); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
- if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_UpgradedClientState_0); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
+}
- msg, err := server.UpgradedClientState(ctx, &protoReq)
+func local_request_Query_UpgradedConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryUpgradedConsensusStateRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := server.UpgradedConsensusState(ctx, &protoReq)
return msg, metadata, err
}
@@ -525,6 +489,26 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
})
+ mux.Handle("GET", pattern_Query_UpgradedConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_UpgradedConsensusState_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UpgradedConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
return nil
}
@@ -686,6 +670,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie
})
+ mux.Handle("GET", pattern_Query_UpgradedConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_UpgradedConsensusState_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_UpgradedConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
return nil
}
@@ -700,7 +704,9 @@ var (
pattern_Query_ClientParams_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"ibc", "client", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true)))
- pattern_Query_UpgradedClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "upgraded_client_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Query_UpgradedClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "client", "v1", "upgraded_client_states"}, "", runtime.AssumeColonVerbOpt(true)))
+
+ pattern_Query_UpgradedConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "client", "v1", "upgraded_consensus_states"}, "", runtime.AssumeColonVerbOpt(true)))
)
var (
@@ -715,4 +721,6 @@ var (
forward_Query_ClientParams_0 = runtime.ForwardResponseMessage
forward_Query_UpgradedClientState_0 = runtime.ForwardResponseMessage
+
+ forward_Query_UpgradedConsensusState_0 = runtime.ForwardResponseMessage
)
diff --git a/modules/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go
index 84a79b66..9a0645a4 100644
--- a/modules/light-clients/07-tendermint/types/tendermint.pb.go
+++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go
@@ -11,10 +11,10 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
- _ "github.com/golang/protobuf/ptypes/duration"
- _ "github.com/golang/protobuf/ptypes/timestamp"
github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes"
types2 "github.com/tendermint/tendermint/proto/tendermint/types"
+ _ "google.golang.org/protobuf/types/known/durationpb"
+ _ "google.golang.org/protobuf/types/known/timestamppb"
io "io"
math "math"
math_bits "math/bits"
diff --git a/proto/ibc/core/client/v1/query.proto b/proto/ibc/core/client/v1/query.proto
index 915b6008..0aa988ba 100644
--- a/proto/ibc/core/client/v1/query.proto
+++ b/proto/ibc/core/client/v1/query.proto
@@ -51,7 +51,14 @@ service Query {
rpc UpgradedClientState(QueryUpgradedClientStateRequest)
returns (QueryUpgradedClientStateResponse) {
option (google.api.http).get =
- "/ibc/core/client/v1/upgraded_client_states/{client_id}";
+ "/ibc/core/client/v1/upgraded_client_states";
+ }
+
+ // UpgradedConsensusState queries an Upgraded IBC consensus state.
+ rpc UpgradedConsensusState(QueryUpgradedConsensusStateRequest)
+ returns (QueryUpgradedConsensusStateResponse) {
+ option (google.api.http).get =
+ "/ibc/core/client/v1/upgraded_consensus_states";
}
}
@@ -148,15 +155,10 @@ message QueryClientParamsResponse {
// params defines the parameters of the module.
Params params = 1;
}
+
// QueryUpgradedClientStateRequest is the request type for the
// Query/UpgradedClientState RPC method
-message QueryUpgradedClientStateRequest {
- // client state unique identifier
- string client_id = 1;
- // plan height of the current chain must be sent in request
- // as this is the height under which upgraded client state is stored
- int64 plan_height = 2;
-}
+message QueryUpgradedClientStateRequest { }
// QueryUpgradedClientStateResponse is the response type for the
// Query/UpgradedClientState RPC method.
@@ -164,3 +166,14 @@ message QueryUpgradedClientStateResponse {
// client state associated with the request identifier
google.protobuf.Any upgraded_client_state = 1;
}
+
+// QueryUpgradedConsensusStateRequest is the request type for the
+// Query/UpgradedConsensusState RPC method
+message QueryUpgradedConsensusStateRequest { }
+
+// QueryUpgradedConsensusStateResponse is the response type for the
+// Query/UpgradedConsensusState RPC method.
+message QueryUpgradedConsensusStateResponse {
+ // Consensus state associated with the request identifier
+ google.protobuf.Any upgraded_consensus_state = 1;
+}
From 48af41240218bbe9cd5ee4f83e9e968bae5cb10a Mon Sep 17 00:00:00 2001
From: Marko
Date: Tue, 23 Mar 2021 11:36:01 +0000
Subject: [PATCH 023/393] add dependabot (#93)
---
.github/dependabot.yml | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
create mode 100644 .github/dependabot.yml
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..1fed7f0c
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,18 @@
+version: 2
+updates:
+ - package-ecosystem: github-actions
+ directory: "/"
+ schedule:
+ interval: daily
+ open-pull-requests-limit: 10
+ - package-ecosystem: gomod
+ directory: "/"
+ schedule:
+ interval: daily
+ open-pull-requests-limit: 10
+ reviewers:
+ - colin-axner
+ - fedekunze
+ - AdityaSripal
+ labels:
+ - dependencies
From f5bc421aa360eb77274403e40ee108a098ad6357 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 23 Mar 2021 11:36:19 +0000
Subject: [PATCH 024/393] Bump codecov/codecov-action from v1.2.1 to v1.3.1
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from v1.2.1 to v1.3.1.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.2.1...fcebab03f26c7530a22baa63f06b3e0515f0c7cd)
Signed-off-by: dependabot[bot]
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 98308a96..b791a357 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -150,7 +150,7 @@ jobs:
sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
done
if: env.GIT_DIFF
- - uses: codecov/codecov-action@v1.2.1
+ - uses: codecov/codecov-action@v1.3.1
with:
file: ./coverage.txt
if: env.GIT_DIFF
From 372eaa42a9082bcbeaca344fc41151ab95f8b5cd Mon Sep 17 00:00:00 2001
From: Marko
Date: Thu, 25 Mar 2021 10:58:30 +0000
Subject: [PATCH 025/393] ci: remove extra race ci tests (#94)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* remove extra race ci tests
* go change to test ci
* fix ci
* test without -r
* test with -r
* change -r to -race
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
.github/workflows/test.yml | 71 +-----------------------
Makefile | 12 ++--
modules/apps/transfer/keeper/encoding.go | 1 +
3 files changed, 8 insertions(+), 76 deletions(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index b791a357..6abaeec6 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -101,7 +101,7 @@ jobs:
if: env.GIT_DIFF
- name: test & coverage report creation
run: |
- cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 30m -coverprofile=${{ matrix.part }}profile.out -covermode=atomic -tags='norace ledger test_ledger_mock'
+ cat pkgs.txt.part.${{ matrix.part }} | xargs go test -race -mod=readonly -timeout 30m -coverprofile=${{ matrix.part }}profile.out -covermode=atomic -tags='ledger test_ledger_mock'
if: env.GIT_DIFF
- uses: actions/upload-artifact@v2
with:
@@ -154,72 +154,3 @@ jobs:
with:
file: ./coverage.txt
if: env.GIT_DIFF
-
- test-race:
- runs-on: ubuntu-latest
- needs: split-test-files
- strategy:
- fail-fast: false
- matrix:
- part: ["00", "01", "02", "03"]
- steps:
- - uses: actions/checkout@v2
- - uses: actions/setup-go@v2.1.3
- with:
- go-version: 1.15
- - uses: technote-space/get-diff-action@v4
- with:
- PATTERNS: |
- **/**.go
- go.mod
- go.sum
- - uses: actions/download-artifact@v2
- with:
- name: "${{ github.sha }}-${{ matrix.part }}"
- if: env.GIT_DIFF
- - name: test & coverage report creation
- run: |
- xargs --arg-file=pkgs.txt.part.${{ matrix.part }} go test -mod=readonly -json -timeout 30m -race -tags='cgo ledger test_ledger_mock' | tee ${{ matrix.part }}-race-output.txt
- if: env.GIT_DIFF
- - uses: actions/upload-artifact@v2
- with:
- name: "${{ github.sha }}-${{ matrix.part }}-race-output"
- path: ./${{ matrix.part }}-race-output.txt
-
- race-detector-report:
- runs-on: ubuntu-latest
- needs: [test-race, install-tparse]
- timeout-minutes: 5
- steps:
- - uses: actions/checkout@v2
- - uses: technote-space/get-diff-action@v4
- id: git_diff
- with:
- PATTERNS: |
- **/**.go
- go.mod
- go.sum
- - uses: actions/download-artifact@v2
- with:
- name: "${{ github.sha }}-00-race-output"
- if: env.GIT_DIFF
- - uses: actions/download-artifact@v2
- with:
- name: "${{ github.sha }}-01-race-output"
- if: env.GIT_DIFF
- - uses: actions/download-artifact@v2
- with:
- name: "${{ github.sha }}-02-race-output"
- if: env.GIT_DIFF
- - uses: actions/download-artifact@v2
- with:
- name: "${{ github.sha }}-03-race-output"
- if: env.GIT_DIFF
- - uses: actions/cache@v2.1.4
- with:
- path: ~/go/bin
- key: ${{ runner.os }}-go-tparse-binary
- if: env.GIT_DIFF
- - name: Generate test report (go test -race)
- run: cat ./*-race-output.txt | ~/go/bin/tparse
- if: env.GIT_DIFF
diff --git a/Makefile b/Makefile
index 8bf73ae6..69318533 100644
--- a/Makefile
+++ b/Makefile
@@ -216,10 +216,10 @@ TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-
# Test runs-specific rules. To add a new test target, just add
# a new rule, customise ARGS or TEST_PACKAGES ad libitum, and
# append the new rule to the TEST_TARGETS list.
-test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace'
-test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace'
-test-ledger: ARGS=-tags='cgo ledger norace'
-test-ledger-mock: ARGS=-tags='ledger test_ledger_mock norace'
+test-unit: ARGS=-tags='cgo ledger test_ledger_mock'
+test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino'
+test-ledger: ARGS=-tags='cgo ledger'
+test-ledger-mock: ARGS=-tags='ledger test_ledger_mock'
test-race: ARGS=-race -tags='cgo ledger test_ledger_mock'
test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION)
$(TEST_TARGETS): run-tests
@@ -227,8 +227,8 @@ $(TEST_TARGETS): run-tests
# check-* compiles and collects tests without running them
# note: go test -c doesn't support multiple packages yet (https://github.com/golang/go/issues/15513)
CHECK_TEST_TARGETS := check-test-unit check-test-unit-amino
-check-test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace'
-check-test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace'
+check-test-unit: ARGS=-tags='cgo ledger test_ledger_mock'
+check-test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino'
$(CHECK_TEST_TARGETS): EXTRA_ARGS=-run=none
$(CHECK_TEST_TARGETS): run-tests
diff --git a/modules/apps/transfer/keeper/encoding.go b/modules/apps/transfer/keeper/encoding.go
index ae0741a9..30e2ff4d 100644
--- a/modules/apps/transfer/keeper/encoding.go
+++ b/modules/apps/transfer/keeper/encoding.go
@@ -11,6 +11,7 @@ func (k Keeper) UnmarshalDenomTrace(bz []byte) (types.DenomTrace, error) {
if err := k.cdc.UnmarshalBinaryBare(bz, &denomTrace); err != nil {
return types.DenomTrace{}, err
}
+
return denomTrace, nil
}
From 07b6a97b67d17fd214a83764cbdb2c2c3daef445 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 30 Mar 2021 23:17:06 +0200
Subject: [PATCH 026/393] Add max length chain-id check on tendermint client
(#99)
* add max length chain-id check
ref: https://github.com/tendermint/tendermint/blob/3ed8f14bf973bb8cc3c262240a291f07d45b3f3f/types/block.go#L390
* update CHANGELOG
* Update modules/light-clients/07-tendermint/types/client_state_test.go
---
CHANGELOG.md | 1 +
modules/light-clients/07-tendermint/types/client_state.go | 5 +++++
.../light-clients/07-tendermint/types/client_state_test.go | 6 ++++++
3 files changed, 12 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cf8e227a..234a38be 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### State Machine Breaking
+* (modules/light-clients/07-tendermint) [\#99](https://github.com/cosmos/ibc-go/pull/99) Enforce maximum chain-id length for tendermint client.
* (modules/core/02-client) [\#8405](https://github.com/cosmos/cosmos-sdk/pull/8405) Refactor IBC client update governance proposals to use a substitute client to update a frozen or expired client.
* (modules/core/02-client) [\#8673](https://github.com/cosmos/cosmos-sdk/pull/8673) IBC upgrade logic moved to 02-client and an IBC UpgradeProposal is added.
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 8a21ef9a..060150d9 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -6,6 +6,7 @@ import (
ics23 "github.com/confio/ics23/go"
"github.com/tendermint/tendermint/light"
+ tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
@@ -80,6 +81,10 @@ func (cs ClientState) Validate() error {
if strings.TrimSpace(cs.ChainId) == "" {
return sdkerrors.Wrap(ErrInvalidChainID, "chain id cannot be empty string")
}
+ if len(cs.ChainId) > tmtypes.MaxChainIDLen {
+ return sdkerrors.Wrapf(ErrInvalidChainID, "chainID is too long; got: %d, max: %d", len(cs.ChainId), tmtypes.MaxChainIDLen)
+ }
+
if err := light.ValidateTrustLevel(cs.TrustLevel.ToTendermint()); err != nil {
return err
}
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index feb1e7db..914851fa 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -21,6 +21,7 @@ const (
testPortID = "testportid"
testChannelID = "testchannelid"
testSequence = 1
+ longChainID = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
)
var (
@@ -48,6 +49,11 @@ func (suite *TendermintTestSuite) TestValidate() {
clientState: types.NewClientState(" ", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
expPass: false,
},
+ {
+ name: "invalid chainID - chainID is above maximum character length",
+ clientState: types.NewClientState(longChainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
{
name: "invalid trust level",
clientState: types.NewClientState(chainID, types.Fraction{Numerator: 0, Denominator: 1}, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
From e0d3c046b2b2a04fe1260b1e904ee402b8ad65b5 Mon Sep 17 00:00:00 2001
From: Calvin Lau <38898718+calvinaco@users.noreply.github.com>
Date: Fri, 2 Apr 2021 19:54:53 +0800
Subject: [PATCH 027/393] Update overview.md
---
docs/overview.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/overview.md b/docs/overview.md
index dc5cc9d7..a513c792 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -56,7 +56,7 @@ of a handshake, or a packet intended to be relayed to a module on the counterpar
process monitors for updates to these paths, and will relay messages, by submitting the data stored
under the path along with a proof to the counterparty chain. The paths that all IBC implementations
must use for committing IBC messages is defined in
-[ICS-24](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements) and the proof
+[ICS-24](https://github.com/cosmos/ics/tree/master/spec/core/ics-024-host-requirements) and the proof
format that all implementations must be able to produce and verify is defined in this [ICS-23 implementation](https://github.com/confio/ics23).
### [Capabilities](./ocap.md)
From 58df13ca2847ed42a18666b337a4bd8ec0aac1eb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 6 Apr 2021 13:20:11 +0200
Subject: [PATCH 028/393] Bump codecov/codecov-action from v1.3.1 to v1.3.2
(#104)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from v1.3.1 to v1.3.2.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.3.1...9b0b9bbe2c64e9ed41413180dd7398450dfeee14)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 6abaeec6..b2354e79 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -150,7 +150,7 @@ jobs:
sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
done
if: env.GIT_DIFF
- - uses: codecov/codecov-action@v1.3.1
+ - uses: codecov/codecov-action@v1.3.2
with:
file: ./coverage.txt
if: env.GIT_DIFF
From d5cc991492e6db75e939f0b219dbd14a5fccdde7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 12 Apr 2021 12:01:16 +0200
Subject: [PATCH 029/393] Modify all message constructors to take in a string
for address (#108)
* Modify all message constructors to take in a string for address
All NewMsg() functions take in the signer as a string. This prevents bugs from happening at the level of the caller since String relies on external context
* add changelog
---
CHANGELOG.md | 4 ++
modules/apps/transfer/client/cli/tx.go | 2 +-
modules/apps/transfer/handler_test.go | 6 +--
modules/apps/transfer/keeper/relay_test.go | 8 +--
modules/apps/transfer/types/msgs.go | 8 +--
modules/apps/transfer/types/msgs_test.go | 10 ++--
modules/apps/transfer/types/packet_test.go | 10 ++--
modules/core/02-client/client/cli/tx.go | 8 +--
modules/core/02-client/keeper/client_test.go | 4 +-
modules/core/02-client/types/msgs.go | 16 +++---
modules/core/02-client/types/msgs_test.go | 52 +++++++++----------
modules/core/03-connection/client/cli/tx.go | 8 +--
modules/core/03-connection/types/msgs.go | 16 +++---
modules/core/03-connection/types/msgs_test.go | 24 ++++-----
modules/core/04-channel/client/cli/tx.go | 12 ++---
modules/core/04-channel/types/msgs.go | 40 +++++++-------
modules/core/04-channel/types/msgs_test.go | 10 ++--
modules/core/keeper/msg_server_test.go | 14 ++---
testing/chain.go | 22 ++++----
testing/coordinator.go | 4 +-
20 files changed, 140 insertions(+), 138 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 234a38be..f05c56b8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,6 +36,10 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
+## API Breaking
+
+* (modules) [\#108](https://github.com/cosmos/ibc-go/pull/108) All message constructors take the signer as a string to prevent upstream bugs. The `String()` function for an SDK Acc Address relies on external context.
+
### State Machine Breaking
* (modules/light-clients/07-tendermint) [\#99](https://github.com/cosmos/ibc-go/pull/99) Enforce maximum chain-id length for tendermint client.
diff --git a/modules/apps/transfer/client/cli/tx.go b/modules/apps/transfer/client/cli/tx.go
index 0efba3b4..c1cbc9f9 100644
--- a/modules/apps/transfer/client/cli/tx.go
+++ b/modules/apps/transfer/client/cli/tx.go
@@ -40,7 +40,7 @@ to the counterparty channel. Any timeout set to 0 is disabled.`),
if err != nil {
return err
}
- sender := clientCtx.GetFromAddress()
+ sender := clientCtx.GetFromAddress().String()
srcPort := args[0]
srcChannel := args[1]
receiver := args[2]
diff --git a/modules/apps/transfer/handler_test.go b/modules/apps/transfer/handler_test.go
index 5d4d95d7..03129ba9 100644
--- a/modules/apps/transfer/handler_test.go
+++ b/modules/apps/transfer/handler_test.go
@@ -43,7 +43,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() {
coinToSendToB := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
// send from chainA to chainB
- msg := types.NewMsgTransfer(channelA.PortID, channelA.ID, coinToSendToB, suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+ msg := types.NewMsgTransfer(channelA.PortID, channelA.ID, coinToSendToB, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, clientB, msg)
suite.Require().NoError(err) // message committed
@@ -67,7 +67,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() {
channelOnBForC, channelOnCForB := suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connOnBForC, connOnCForB, channeltypes.UNORDERED)
// send from chainB to chainC
- msg = types.NewMsgTransfer(channelOnBForC.PortID, channelOnBForC.ID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+ msg = types.NewMsgTransfer(channelOnBForC.PortID, channelOnBForC.ID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0)
err = suite.coordinator.SendMsg(suite.chainB, suite.chainC, clientOnCForB, msg)
suite.Require().NoError(err) // message committed
@@ -91,7 +91,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() {
suite.Require().Zero(balance.Amount.Int64())
// send from chainC back to chainB
- msg = types.NewMsgTransfer(channelOnCForB.PortID, channelOnCForB.ID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+ msg = types.NewMsgTransfer(channelOnCForB.PortID, channelOnCForB.ID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
err = suite.coordinator.SendMsg(suite.chainC, suite.chainB, clientOnBForC, msg)
suite.Require().NoError(err) // message committed
diff --git a/modules/apps/transfer/keeper/relay_test.go b/modules/apps/transfer/keeper/relay_test.go
index 2f754e60..e67d2f8a 100644
--- a/modules/apps/transfer/keeper/relay_test.go
+++ b/modules/apps/transfer/keeper/relay_test.go
@@ -103,7 +103,7 @@ func (suite *KeeperTestSuite) TestSendTransfer() {
if !tc.sendFromSource {
// send coin from chainB to chainA
coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
- transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
+ transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
err = suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg)
suite.Require().NoError(err) // message committed
@@ -115,7 +115,7 @@ func (suite *KeeperTestSuite) TestSendTransfer() {
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())
+ recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
err = suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, recvMsg)
suite.Require().NoError(err) // message committed
}
@@ -190,7 +190,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() {
if tc.recvIsSource {
// send coin from chainB to chainA, receive them, acknowledge them, and send back to chainB
coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
- transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
+ transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
err := suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg)
suite.Require().NoError(err) // message committed
@@ -210,7 +210,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() {
}
// send coin from chainA to chainB
- transferMsg := types.NewMsgTransfer(channelA.PortID, channelA.ID, sdk.NewCoin(trace.IBCDenom(), amount), suite.chainA.SenderAccount.GetAddress(), receiver, clienttypes.NewHeight(0, 110), 0)
+ transferMsg := types.NewMsgTransfer(channelA.PortID, channelA.ID, sdk.NewCoin(trace.IBCDenom(), amount), suite.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(0, 110), 0)
err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, transferMsg)
suite.Require().NoError(err) // message committed
diff --git a/modules/apps/transfer/types/msgs.go b/modules/apps/transfer/types/msgs.go
index 6985e3b9..b45dca98 100644
--- a/modules/apps/transfer/types/msgs.go
+++ b/modules/apps/transfer/types/msgs.go
@@ -18,14 +18,14 @@ const (
//nolint:interfacer
func NewMsgTransfer(
sourcePort, sourceChannel string,
- token sdk.Coin, sender sdk.AccAddress, receiver string,
+ token sdk.Coin, sender, receiver string,
timeoutHeight clienttypes.Height, timeoutTimestamp uint64,
) *MsgTransfer {
return &MsgTransfer{
SourcePort: sourcePort,
SourceChannel: sourceChannel,
Token: token,
- Sender: sender.String(),
+ Sender: sender,
Receiver: receiver,
TimeoutHeight: timeoutHeight,
TimeoutTimestamp: timeoutTimestamp,
@@ -77,9 +77,9 @@ func (msg MsgTransfer) GetSignBytes() []byte {
// GetSigners implements sdk.Msg
func (msg MsgTransfer) GetSigners() []sdk.AccAddress {
- valAddr, err := sdk.AccAddressFromBech32(msg.Sender)
+ signer, err := sdk.AccAddressFromBech32(msg.Sender)
if err != nil {
panic(err)
}
- return []sdk.AccAddress{valAddr}
+ return []sdk.AccAddress{signer}
}
diff --git a/modules/apps/transfer/types/msgs_test.go b/modules/apps/transfer/types/msgs_test.go
index 2d24438f..6e63b5ed 100644
--- a/modules/apps/transfer/types/msgs_test.go
+++ b/modules/apps/transfer/types/msgs_test.go
@@ -25,9 +25,9 @@ const (
)
var (
- addr1 = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address())
+ addr1 = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()).String()
addr2 = sdk.AccAddress("testaddr2").String()
- emptyAddr sdk.AccAddress
+ emptyAddr string
coin = sdk.NewCoin("atom", sdk.NewInt(100))
ibcCoin = sdk.NewCoin("ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", sdk.NewInt(100))
@@ -96,8 +96,10 @@ func TestMsgTransferValidation(t *testing.T) {
// TestMsgTransferGetSigners tests GetSigners for MsgTransfer
func TestMsgTransferGetSigners(t *testing.T) {
- msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0)
+ addr := sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address())
+
+ msg := NewMsgTransfer(validPort, validChannel, coin, addr.String(), addr2, timeoutHeight, 0)
res := msg.GetSigners()
- require.Equal(t, []sdk.AccAddress{addr1}, res)
+ require.Equal(t, []sdk.AccAddress{addr}, res)
}
diff --git a/modules/apps/transfer/types/packet_test.go b/modules/apps/transfer/types/packet_test.go
index 1edcb093..6b16095b 100644
--- a/modules/apps/transfer/types/packet_test.go
+++ b/modules/apps/transfer/types/packet_test.go
@@ -18,11 +18,11 @@ func TestFungibleTokenPacketDataValidateBasic(t *testing.T) {
packetData FungibleTokenPacketData
expPass bool
}{
- {"valid packet", NewFungibleTokenPacketData(denom, amount, addr1.String(), addr2), true},
- {"invalid denom", NewFungibleTokenPacketData("", amount, addr1.String(), addr2), false},
- {"invalid amount", NewFungibleTokenPacketData(denom, 0, addr1.String(), addr2), false},
- {"missing sender address", NewFungibleTokenPacketData(denom, amount, emptyAddr.String(), addr2), false},
- {"missing recipient address", NewFungibleTokenPacketData(denom, amount, addr1.String(), emptyAddr.String()), false},
+ {"valid packet", NewFungibleTokenPacketData(denom, amount, addr1, addr2), true},
+ {"invalid denom", NewFungibleTokenPacketData("", amount, addr1, addr2), false},
+ {"invalid amount", NewFungibleTokenPacketData(denom, 0, addr1, addr2), false},
+ {"missing sender address", NewFungibleTokenPacketData(denom, amount, emptyAddr, addr2), false},
+ {"missing recipient address", NewFungibleTokenPacketData(denom, amount, addr1, emptyAddr), false},
}
for i, tc := range testCases {
diff --git a/modules/core/02-client/client/cli/tx.go b/modules/core/02-client/client/cli/tx.go
index 78a66012..64d7b97f 100644
--- a/modules/core/02-client/client/cli/tx.go
+++ b/modules/core/02-client/client/cli/tx.go
@@ -71,7 +71,7 @@ func NewCreateClientCmd() *cobra.Command {
}
}
- msg, err := types.NewMsgCreateClient(clientState, consensusState, clientCtx.GetFromAddress())
+ msg, err := types.NewMsgCreateClient(clientState, consensusState, clientCtx.GetFromAddress().String())
if err != nil {
return err
}
@@ -124,7 +124,7 @@ func NewUpdateClientCmd() *cobra.Command {
}
}
- msg, err := types.NewMsgUpdateClient(clientID, header, clientCtx.GetFromAddress())
+ msg, err := types.NewMsgUpdateClient(clientID, header, clientCtx.GetFromAddress().String())
if err != nil {
return err
}
@@ -172,7 +172,7 @@ func NewSubmitMisbehaviourCmd() *cobra.Command {
}
}
- msg, err := types.NewMsgSubmitMisbehaviour(misbehaviour.GetClientID(), misbehaviour, clientCtx.GetFromAddress())
+ msg, err := types.NewMsgSubmitMisbehaviour(misbehaviour.GetClientID(), misbehaviour, clientCtx.GetFromAddress().String())
if err != nil {
return err
}
@@ -242,7 +242,7 @@ func NewUpgradeClientCmd() *cobra.Command {
proofUpgradeClient := []byte(args[3])
proofUpgradeConsensus := []byte(args[4])
- msg, err := types.NewMsgUpgradeClient(clientID, clientState, consensusState, proofUpgradeClient, proofUpgradeConsensus, clientCtx.GetFromAddress())
+ msg, err := types.NewMsgUpgradeClient(clientID, clientState, consensusState, proofUpgradeClient, proofUpgradeConsensus, clientCtx.GetFromAddress().String())
if err != nil {
return err
}
diff --git a/modules/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go
index 21002d17..69e0953b 100644
--- a/modules/core/02-client/keeper/client_test.go
+++ b/modules/core/02-client/keeper/client_test.go
@@ -7,6 +7,7 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
@@ -15,7 +16,6 @@ import (
localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *KeeperTestSuite) TestCreateClient() {
@@ -598,7 +598,7 @@ func (suite *KeeperTestSuite) TestUpdateClientEventEmission() {
msg, err := clienttypes.NewMsgUpdateClient(
clientID, header,
- suite.chainA.SenderAccount.GetAddress(),
+ suite.chainA.SenderAccount.GetAddress().String(),
)
result, err := suite.chainA.SendMsgs(msg)
diff --git a/modules/core/02-client/types/msgs.go b/modules/core/02-client/types/msgs.go
index bc17c675..46538c95 100644
--- a/modules/core/02-client/types/msgs.go
+++ b/modules/core/02-client/types/msgs.go
@@ -31,7 +31,7 @@ var (
// NewMsgCreateClient creates a new MsgCreateClient instance
//nolint:interfacer
func NewMsgCreateClient(
- clientState exported.ClientState, consensusState exported.ConsensusState, signer sdk.AccAddress,
+ clientState exported.ClientState, consensusState exported.ConsensusState, signer string,
) (*MsgCreateClient, error) {
anyClientState, err := PackClientState(clientState)
@@ -47,7 +47,7 @@ func NewMsgCreateClient(
return &MsgCreateClient{
ClientState: anyClientState,
ConsensusState: anyConsensusState,
- Signer: signer.String(),
+ Signer: signer,
}, nil
}
@@ -119,7 +119,7 @@ func (msg MsgCreateClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) err
// NewMsgUpdateClient creates a new MsgUpdateClient instance
//nolint:interfacer
-func NewMsgUpdateClient(id string, header exported.Header, signer sdk.AccAddress) (*MsgUpdateClient, error) {
+func NewMsgUpdateClient(id string, header exported.Header, signer string) (*MsgUpdateClient, error) {
anyHeader, err := PackHeader(header)
if err != nil {
return nil, err
@@ -128,7 +128,7 @@ func NewMsgUpdateClient(id string, header exported.Header, signer sdk.AccAddress
return &MsgUpdateClient{
ClientId: id,
Header: anyHeader,
- Signer: signer.String(),
+ Signer: signer,
}, nil
}
@@ -185,7 +185,7 @@ func (msg MsgUpdateClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) err
// NewMsgUpgradeClient creates a new MsgUpgradeClient instance
// nolint: interfacer
func NewMsgUpgradeClient(clientID string, clientState exported.ClientState, consState exported.ConsensusState,
- proofUpgradeClient, proofUpgradeConsState []byte, signer sdk.AccAddress) (*MsgUpgradeClient, error) {
+ proofUpgradeClient, proofUpgradeConsState []byte, signer string) (*MsgUpgradeClient, error) {
anyClient, err := PackClientState(clientState)
if err != nil {
return nil, err
@@ -201,7 +201,7 @@ func NewMsgUpgradeClient(clientID string, clientState exported.ClientState, cons
ConsensusState: anyConsState,
ProofUpgradeClient: proofUpgradeClient,
ProofUpgradeConsensusState: proofUpgradeConsState,
- Signer: signer.String(),
+ Signer: signer,
}, nil
}
@@ -276,7 +276,7 @@ func (msg MsgUpgradeClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) er
// NewMsgSubmitMisbehaviour creates a new MsgSubmitMisbehaviour instance.
//nolint:interfacer
-func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.Misbehaviour, signer sdk.AccAddress) (*MsgSubmitMisbehaviour, error) {
+func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.Misbehaviour, signer string) (*MsgSubmitMisbehaviour, error) {
anyMisbehaviour, err := PackMisbehaviour(misbehaviour)
if err != nil {
return nil, err
@@ -285,7 +285,7 @@ func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.Misbehaviou
return &MsgSubmitMisbehaviour{
ClientId: clientID,
Misbehaviour: anyMisbehaviour,
- Signer: signer.String(),
+ Signer: signer,
}, nil
}
diff --git a/modules/core/02-client/types/msgs_test.go b/modules/core/02-client/types/msgs_test.go
index 9019f133..7efc07b4 100644
--- a/modules/core/02-client/types/msgs_test.go
+++ b/modules/core/02-client/types/msgs_test.go
@@ -49,14 +49,14 @@ func (suite *TypesTestSuite) TestMarshalMsgCreateClient() {
{
"solo machine client", func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
},
{
"tendermint client", func() {
tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
},
@@ -101,7 +101,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
"valid - tendermint client",
func() {
tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
true,
@@ -109,7 +109,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
{
"invalid tendermint client",
func() {
- msg, err = types.NewMsgCreateClient(&ibctmtypes.ClientState{}, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(&ibctmtypes.ClientState{}, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -125,7 +125,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
"failed to unpack consensus state",
func() {
tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
msg.ConsensusState = nil
},
@@ -142,7 +142,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
"valid - solomachine client",
func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
true,
@@ -151,7 +151,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
"invalid solomachine client",
func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgCreateClient(&solomachinetypes.ClientState{}, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(&solomachinetypes.ClientState{}, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -160,7 +160,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
"invalid solomachine consensus state",
func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), &solomachinetypes.ConsensusState{}, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), &solomachinetypes.ConsensusState{}, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -170,7 +170,7 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() {
func() {
tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgCreateClient(tendermintClient, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgCreateClient(tendermintClient, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -203,13 +203,13 @@ func (suite *TypesTestSuite) TestMarshalMsgUpdateClient() {
{
"solo machine client", func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
},
{
"tendermint client", func() {
- msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
@@ -261,7 +261,7 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() {
{
"valid - tendermint header",
func() {
- msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
true,
@@ -269,7 +269,7 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() {
{
"invalid tendermint header",
func() {
- msg, err = types.NewMsgUpdateClient("tendermint", &ibctmtypes.Header{}, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient("tendermint", &ibctmtypes.Header{}, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -292,7 +292,7 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() {
"valid - solomachine header",
func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
true,
@@ -300,7 +300,7 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() {
{
"invalid solomachine header",
func() {
- msg, err = types.NewMsgUpdateClient("solomachine", &solomachinetypes.Header{}, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient("solomachine", &solomachinetypes.Header{}, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -308,7 +308,7 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() {
{
"unsupported - localhost",
func() {
- msg, err = types.NewMsgUpdateClient(exported.Localhost, suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpdateClient(exported.Localhost, suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -341,7 +341,7 @@ func (suite *TypesTestSuite) TestMarshalMsgUpgradeClient() {
func() {
tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
tendermintConsState := &ibctmtypes.ConsensusState{NextValidatorsHash: []byte("nextValsHash")}
- msg, err = types.NewMsgUpgradeClient("clientid", tendermintClient, tendermintConsState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpgradeClient("clientid", tendermintClient, tendermintConsState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
},
@@ -349,7 +349,7 @@ func (suite *TypesTestSuite) TestMarshalMsgUpgradeClient() {
"client upgrades to new solomachine client",
func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1)
- msg, err = types.NewMsgUpgradeClient("clientid", soloMachine.ClientState(), soloMachine.ConsensusState(), []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgUpgradeClient("clientid", soloMachine.ClientState(), soloMachine.ConsensusState(), []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
},
@@ -454,7 +454,7 @@ func (suite *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() {
clientState := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
consState := &ibctmtypes.ConsensusState{NextValidatorsHash: []byte("nextValsHash")}
- msg, err := types.NewMsgUpgradeClient("testclientid", clientState, consState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress())
+ msg, err := types.NewMsgUpgradeClient("testclientid", clientState, consState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
tc.malleate(msg)
@@ -482,7 +482,7 @@ func (suite *TypesTestSuite) TestMarshalMsgSubmitMisbehaviour() {
{
"solo machine client", func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
},
@@ -494,7 +494,7 @@ func (suite *TypesTestSuite) TestMarshalMsgSubmitMisbehaviour() {
header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
misbehaviour := ibctmtypes.NewMisbehaviour("tendermint", header1, header2)
- msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
@@ -552,7 +552,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() {
header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers)
misbehaviour := ibctmtypes.NewMisbehaviour("tendermint", header1, header2)
- msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
true,
@@ -560,7 +560,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() {
{
"invalid tendermint misbehaviour",
func() {
- msg, err = types.NewMsgSubmitMisbehaviour("tendermint", &ibctmtypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour("tendermint", &ibctmtypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -583,7 +583,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() {
"valid - solomachine misbehaviour",
func() {
soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2)
- msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
true,
@@ -591,7 +591,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() {
{
"invalid solomachine misbehaviour",
func() {
- msg, err = types.NewMsgSubmitMisbehaviour("solomachine", &solomachinetypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour("solomachine", &solomachinetypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
@@ -600,7 +600,7 @@ func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() {
"client-id mismatch",
func() {
soloMachineMisbehaviour := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateMisbehaviour()
- msg, err = types.NewMsgSubmitMisbehaviour("external", soloMachineMisbehaviour, suite.chainA.SenderAccount.GetAddress())
+ msg, err = types.NewMsgSubmitMisbehaviour("external", soloMachineMisbehaviour, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
false,
diff --git a/modules/core/03-connection/client/cli/tx.go b/modules/core/03-connection/client/cli/tx.go
index 87c8de75..df396c8b 100644
--- a/modules/core/03-connection/client/cli/tx.go
+++ b/modules/core/03-connection/client/cli/tx.go
@@ -74,7 +74,7 @@ func NewConnectionOpenInitCmd() *cobra.Command {
msg := types.NewMsgConnectionOpenInit(
clientID, counterpartyClientID,
- counterpartyPrefix, version, delayPeriod, clientCtx.GetFromAddress(),
+ counterpartyPrefix, version, delayPeriod, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
@@ -190,7 +190,7 @@ func NewConnectionOpenTryCmd() *cobra.Command {
connectionID, clientID, counterpartyConnectionID, counterpartyClientID,
counterpartyClient, counterpartyPrefix, counterpartyVersions, delayPeriod,
proofInit, proofClient, proofConsensus, proofHeight,
- consensusHeight, clientCtx.GetFromAddress(),
+ consensusHeight, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
@@ -280,7 +280,7 @@ func NewConnectionOpenAckCmd() *cobra.Command {
msg := types.NewMsgConnectionOpenAck(
connectionID, counterpartyConnectionID, counterpartyClient, proofTry, proofClient, proofConsensus, proofHeight,
- consensusHeight, version, clientCtx.GetFromAddress(),
+ consensusHeight, version, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
@@ -328,7 +328,7 @@ func NewConnectionOpenConfirmCmd() *cobra.Command {
}
msg := types.NewMsgConnectionOpenConfirm(
- connectionID, proofAck, proofHeight, clientCtx.GetFromAddress(),
+ connectionID, proofAck, proofHeight, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
diff --git a/modules/core/03-connection/types/msgs.go b/modules/core/03-connection/types/msgs.go
index fcae27b6..0ef6f06b 100644
--- a/modules/core/03-connection/types/msgs.go
+++ b/modules/core/03-connection/types/msgs.go
@@ -26,7 +26,7 @@ var (
func NewMsgConnectionOpenInit(
clientID, counterpartyClientID string,
counterpartyPrefix commitmenttypes.MerklePrefix,
- version *Version, delayPeriod uint64, signer sdk.AccAddress,
+ version *Version, delayPeriod uint64, signer string,
) *MsgConnectionOpenInit {
// counterparty must have the same delay period
counterparty := NewCounterparty(counterpartyClientID, "", counterpartyPrefix)
@@ -35,7 +35,7 @@ func NewMsgConnectionOpenInit(
Counterparty: counterparty,
Version: version,
DelayPeriod: delayPeriod,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -94,7 +94,7 @@ func NewMsgConnectionOpenTry(
counterpartyPrefix commitmenttypes.MerklePrefix,
counterpartyVersions []*Version, delayPeriod uint64,
proofInit, proofClient, proofConsensus []byte,
- proofHeight, consensusHeight clienttypes.Height, signer sdk.AccAddress,
+ proofHeight, consensusHeight clienttypes.Height, signer string,
) *MsgConnectionOpenTry {
counterparty := NewCounterparty(counterpartyClientID, counterpartyConnectionID, counterpartyPrefix)
csAny, _ := clienttypes.PackClientState(counterpartyClient)
@@ -110,7 +110,7 @@ func NewMsgConnectionOpenTry(
ProofConsensus: proofConsensus,
ProofHeight: proofHeight,
ConsensusHeight: consensusHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -206,7 +206,7 @@ func NewMsgConnectionOpenAck(
proofTry, proofClient, proofConsensus []byte,
proofHeight, consensusHeight clienttypes.Height,
version *Version,
- signer sdk.AccAddress,
+ signer string,
) *MsgConnectionOpenAck {
csAny, _ := clienttypes.PackClientState(counterpartyClient)
return &MsgConnectionOpenAck{
@@ -219,7 +219,7 @@ func NewMsgConnectionOpenAck(
ProofHeight: proofHeight,
ConsensusHeight: consensusHeight,
Version: version,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -300,13 +300,13 @@ func (msg MsgConnectionOpenAck) GetSigners() []sdk.AccAddress {
//nolint:interfacer
func NewMsgConnectionOpenConfirm(
connectionID string, proofAck []byte, proofHeight clienttypes.Height,
- signer sdk.AccAddress,
+ signer string,
) *MsgConnectionOpenConfirm {
return &MsgConnectionOpenConfirm{
ConnectionId: connectionID,
ProofAck: proofAck,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
diff --git a/modules/core/03-connection/types/msgs_test.go b/modules/core/03-connection/types/msgs_test.go
index a929ff72..1875804d 100644
--- a/modules/core/03-connection/types/msgs_test.go
+++ b/modules/core/03-connection/types/msgs_test.go
@@ -10,19 +10,20 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
- "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
- sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/03-connection/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
var (
+ signer = "cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht"
+
emptyPrefix = commitmenttypes.MerklePrefix{}
emptyProof = []byte{}
)
@@ -77,7 +78,6 @@ func TestMsgTestSuite(t *testing.T) {
func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() {
prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey"))
- signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
// empty versions are considered valid, the default compatible versions
// will be used in protocol.
var version *types.Version
@@ -89,10 +89,10 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() {
}{
{"invalid client ID", types.NewMsgConnectionOpenInit("test/iris", "clienttotest", prefix, version, 500, signer), false},
{"invalid counterparty client ID", types.NewMsgConnectionOpenInit("clienttotest", "(clienttotest)", prefix, version, 500, signer), false},
- {"invalid counterparty connection ID", &types.MsgConnectionOpenInit{connectionID, types.NewCounterparty("clienttotest", "connectiontotest", prefix), version, 500, signer.String()}, false},
+ {"invalid counterparty connection ID", &types.MsgConnectionOpenInit{connectionID, types.NewCounterparty("clienttotest", "connectiontotest", prefix), version, 500, signer}, false},
{"empty counterparty prefix", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", emptyPrefix, version, 500, signer), false},
{"supplied version fails basic validation", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, &types.Version{}, 500, signer), false},
- {"empty singer", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, nil), false},
+ {"empty singer", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, ""), false},
{"success", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, signer), true},
}
@@ -108,7 +108,6 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() {
func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() {
prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey"))
- signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
clientState := ibctmtypes.NewClientState(
chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
@@ -137,7 +136,7 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() {
{"invalid counterparty connection ID", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "ibc/test", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
{"invalid counterparty client ID", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "test/conn1", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
{"invalid nil counterparty client", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", nil, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
- {"invalid client unpacking", &types.MsgConnectionOpenTry{connectionID, "clienttotesta", invalidAny, counterparty, 500, []*types.Version{ibctesting.ConnectionVersion}, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer.String()}, false},
+ {"invalid client unpacking", &types.MsgConnectionOpenTry{connectionID, "clienttotesta", invalidAny, counterparty, 500, []*types.Version{ibctesting.ConnectionVersion}, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer}, false},
{"counterparty failed validate", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", invalidClient, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
{"empty counterparty prefix", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, emptyPrefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
{"empty counterpartyVersions", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
@@ -146,7 +145,7 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() {
{"empty proofConsensus", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, emptyProof, clientHeight, clientHeight, signer), false},
{"invalid proofHeight", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clienttypes.ZeroHeight(), clientHeight, signer), false},
{"invalid consensusHeight", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clienttypes.ZeroHeight(), signer), false},
- {"empty singer", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, nil), false},
+ {"empty singer", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ""), false},
{"success", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), true},
{"invalid version", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{{}}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false},
}
@@ -162,7 +161,6 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() {
}
func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() {
- signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
clientState := ibctmtypes.NewClientState(
chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false,
)
@@ -187,7 +185,7 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() {
{"invalid connection ID", types.NewMsgConnectionOpenAck("test/conn1", connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
{"invalid counterparty connection ID", types.NewMsgConnectionOpenAck(connectionID, "test/conn1", clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
{"invalid nil counterparty client", types.NewMsgConnectionOpenAck(connectionID, connectionID, nil, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
- {"invalid unpacking counterparty client", &types.MsgConnectionOpenAck{connectionID, connectionID, ibctesting.ConnectionVersion, invalidAny, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer.String()}, false},
+ {"invalid unpacking counterparty client", &types.MsgConnectionOpenAck{connectionID, connectionID, ibctesting.ConnectionVersion, invalidAny, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer}, false},
{"counterparty client failed validate", types.NewMsgConnectionOpenAck(connectionID, connectionID, invalidClient, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
{"empty proofTry", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, emptyProof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
{"empty proofClient", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, emptyProof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false},
@@ -195,7 +193,7 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() {
{"invalid proofHeight", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clienttypes.ZeroHeight(), clientHeight, ibctesting.ConnectionVersion, signer), false},
{"invalid consensusHeight", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clienttypes.ZeroHeight(), ibctesting.ConnectionVersion, signer), false},
{"invalid version", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, &types.Version{}, signer), false},
- {"empty signer", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, nil), false},
+ {"empty signer", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, ""), false},
{"success", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), true},
}
@@ -210,13 +208,11 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() {
}
func (suite *MsgTestSuite) TestNewMsgConnectionOpenConfirm() {
- signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht")
-
testMsgs := []*types.MsgConnectionOpenConfirm{
types.NewMsgConnectionOpenConfirm("test/conn1", suite.proof, clientHeight, signer),
types.NewMsgConnectionOpenConfirm(connectionID, emptyProof, clientHeight, signer),
types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clienttypes.ZeroHeight(), signer),
- types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, nil),
+ types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, ""),
types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, signer),
}
diff --git a/modules/core/04-channel/client/cli/tx.go b/modules/core/04-channel/client/cli/tx.go
index fb6adede..c76c02f3 100644
--- a/modules/core/04-channel/client/cli/tx.go
+++ b/modules/core/04-channel/client/cli/tx.go
@@ -41,7 +41,7 @@ func NewChannelOpenInitCmd() *cobra.Command {
msg := types.NewMsgChannelOpenInit(
portID, version, order, hops,
- counterpartyPortID, clientCtx.GetFromAddress(),
+ counterpartyPortID, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
msgClient := types.NewMsgClient(svcMsgClientConn)
@@ -95,7 +95,7 @@ func NewChannelOpenTryCmd() *cobra.Command {
msg := types.NewMsgChannelOpenTry(
portID, channelID, version, order, hops,
counterpartyPortID, counterpartyChannelID, version,
- proofInit, proofHeight, clientCtx.GetFromAddress(),
+ proofInit, proofHeight, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
msgClient := types.NewMsgClient(svcMsgClientConn)
@@ -144,7 +144,7 @@ func NewChannelOpenAckCmd() *cobra.Command {
}
msg := types.NewMsgChannelOpenAck(
- portID, channelID, counterpartyChannelID, version, proofTry, proofHeight, clientCtx.GetFromAddress(),
+ portID, channelID, counterpartyChannelID, version, proofTry, proofHeight, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
msgClient := types.NewMsgClient(svcMsgClientConn)
@@ -187,7 +187,7 @@ func NewChannelOpenConfirmCmd() *cobra.Command {
}
msg := types.NewMsgChannelOpenConfirm(
- portID, channelID, proofAck, proofHeight, clientCtx.GetFromAddress(),
+ portID, channelID, proofAck, proofHeight, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
msgClient := types.NewMsgClient(svcMsgClientConn)
@@ -219,7 +219,7 @@ func NewChannelCloseInitCmd() *cobra.Command {
portID := args[0]
channelID := args[1]
- msg := types.NewMsgChannelCloseInit(portID, channelID, clientCtx.GetFromAddress())
+ msg := types.NewMsgChannelCloseInit(portID, channelID, clientCtx.GetFromAddress().String())
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
msgClient := types.NewMsgClient(svcMsgClientConn)
_, err = msgClient.ChannelCloseInit(cmd.Context(), msg)
@@ -261,7 +261,7 @@ func NewChannelCloseConfirmCmd() *cobra.Command {
}
msg := types.NewMsgChannelCloseConfirm(
- portID, channelID, proofInit, proofHeight, clientCtx.GetFromAddress(),
+ portID, channelID, proofInit, proofHeight, clientCtx.GetFromAddress().String(),
)
svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
msgClient := types.NewMsgClient(svcMsgClientConn)
diff --git a/modules/core/04-channel/types/msgs.go b/modules/core/04-channel/types/msgs.go
index d35c983f..151b5582 100644
--- a/modules/core/04-channel/types/msgs.go
+++ b/modules/core/04-channel/types/msgs.go
@@ -17,14 +17,14 @@ var _ sdk.Msg = &MsgChannelOpenInit{}
// nolint:interfacer
func NewMsgChannelOpenInit(
portID, version string, channelOrder Order, connectionHops []string,
- counterpartyPortID string, signer sdk.AccAddress,
+ counterpartyPortID string, signer string,
) *MsgChannelOpenInit {
counterparty := NewCounterparty(counterpartyPortID, "")
channel := NewChannel(INIT, channelOrder, counterparty, connectionHops, version)
return &MsgChannelOpenInit{
PortId: portID,
Channel: channel,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -81,7 +81,7 @@ var _ sdk.Msg = &MsgChannelOpenTry{}
func NewMsgChannelOpenTry(
portID, previousChannelID, version string, channelOrder Order, connectionHops []string,
counterpartyPortID, counterpartyChannelID, counterpartyVersion string,
- proofInit []byte, proofHeight clienttypes.Height, signer sdk.AccAddress,
+ proofInit []byte, proofHeight clienttypes.Height, signer string,
) *MsgChannelOpenTry {
counterparty := NewCounterparty(counterpartyPortID, counterpartyChannelID)
channel := NewChannel(TRYOPEN, channelOrder, counterparty, connectionHops, version)
@@ -92,7 +92,7 @@ func NewMsgChannelOpenTry(
CounterpartyVersion: counterpartyVersion,
ProofInit: proofInit,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -161,7 +161,7 @@ var _ sdk.Msg = &MsgChannelOpenAck{}
// nolint:interfacer
func NewMsgChannelOpenAck(
portID, channelID, counterpartyChannelID string, cpv string, proofTry []byte, proofHeight clienttypes.Height,
- signer sdk.AccAddress,
+ signer string,
) *MsgChannelOpenAck {
return &MsgChannelOpenAck{
PortId: portID,
@@ -170,7 +170,7 @@ func NewMsgChannelOpenAck(
CounterpartyVersion: cpv,
ProofTry: proofTry,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -229,14 +229,14 @@ var _ sdk.Msg = &MsgChannelOpenConfirm{}
// nolint:interfacer
func NewMsgChannelOpenConfirm(
portID, channelID string, proofAck []byte, proofHeight clienttypes.Height,
- signer sdk.AccAddress,
+ signer string,
) *MsgChannelOpenConfirm {
return &MsgChannelOpenConfirm{
PortId: portID,
ChannelId: channelID,
ProofAck: proofAck,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -291,12 +291,12 @@ var _ sdk.Msg = &MsgChannelCloseInit{}
// NewMsgChannelCloseInit creates a new MsgChannelCloseInit instance
// nolint:interfacer
func NewMsgChannelCloseInit(
- portID string, channelID string, signer sdk.AccAddress,
+ portID string, channelID string, signer string,
) *MsgChannelCloseInit {
return &MsgChannelCloseInit{
PortId: portID,
ChannelId: channelID,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -346,14 +346,14 @@ var _ sdk.Msg = &MsgChannelCloseConfirm{}
// nolint:interfacer
func NewMsgChannelCloseConfirm(
portID, channelID string, proofInit []byte, proofHeight clienttypes.Height,
- signer sdk.AccAddress,
+ signer string,
) *MsgChannelCloseConfirm {
return &MsgChannelCloseConfirm{
PortId: portID,
ChannelId: channelID,
ProofInit: proofInit,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -409,13 +409,13 @@ var _ sdk.Msg = &MsgRecvPacket{}
// nolint:interfacer
func NewMsgRecvPacket(
packet Packet, proofCommitment []byte, proofHeight clienttypes.Height,
- signer sdk.AccAddress,
+ signer string,
) *MsgRecvPacket {
return &MsgRecvPacket{
Packet: packet,
ProofCommitment: proofCommitment,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -472,14 +472,14 @@ var _ sdk.Msg = &MsgTimeout{}
// nolint:interfacer
func NewMsgTimeout(
packet Packet, nextSequenceRecv uint64, proofUnreceived []byte,
- proofHeight clienttypes.Height, signer sdk.AccAddress,
+ proofHeight clienttypes.Height, signer string,
) *MsgTimeout {
return &MsgTimeout{
Packet: packet,
NextSequenceRecv: nextSequenceRecv,
ProofUnreceived: proofUnreceived,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -531,7 +531,7 @@ func (msg MsgTimeout) Type() string {
func NewMsgTimeoutOnClose(
packet Packet, nextSequenceRecv uint64,
proofUnreceived, proofClose []byte,
- proofHeight clienttypes.Height, signer sdk.AccAddress,
+ proofHeight clienttypes.Height, signer string,
) *MsgTimeoutOnClose {
return &MsgTimeoutOnClose{
Packet: packet,
@@ -539,7 +539,7 @@ func NewMsgTimeoutOnClose(
ProofUnreceived: proofUnreceived,
ProofClose: proofClose,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
@@ -597,14 +597,14 @@ func NewMsgAcknowledgement(
packet Packet,
ack, proofAcked []byte,
proofHeight clienttypes.Height,
- signer sdk.AccAddress,
+ signer string,
) *MsgAcknowledgement {
return &MsgAcknowledgement{
Packet: packet,
Acknowledgement: ack,
ProofAcked: proofAcked,
ProofHeight: proofHeight,
- Signer: signer.String(),
+ Signer: signer,
}
}
diff --git a/modules/core/04-channel/types/msgs_test.go b/modules/core/04-channel/types/msgs_test.go
index fd8d2328..daa2195b 100644
--- a/modules/core/04-channel/types/msgs_test.go
+++ b/modules/core/04-channel/types/msgs_test.go
@@ -9,7 +9,6 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tm-db"
- "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
@@ -18,6 +17,7 @@ import (
"github.com/cosmos/ibc-go/modules/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
"github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
@@ -59,8 +59,8 @@ var (
invalidProofs1 = exported.Proof(nil)
invalidProofs2 = emptyProof
- addr = sdk.AccAddress("testaddr111111111111")
- emptyAddr sdk.AccAddress
+ addr = sdk.AccAddress("testaddr111111111111").String()
+ emptyAddr string
connHops = []string{"testconnection"}
invalidConnHops = []string{"testconnection", "testconnection"}
@@ -125,7 +125,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() {
{"connection id contains non-alpha", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, []string{invalidConnection}, cpportid, addr), false},
{"", types.NewMsgChannelOpenInit(portid, "", types.UNORDERED, connHops, cpportid, addr), true},
{"invalid counterparty port id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, connHops, invalidPort, addr), false},
- {"channel not in INIT state", &types.MsgChannelOpenInit{portid, tryOpenChannel, addr.String()}, false},
+ {"channel not in INIT state", &types.MsgChannelOpenInit{portid, tryOpenChannel, addr}, false},
}
for _, tc := range testCases {
@@ -169,7 +169,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() {
{"invalid counterparty port id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, invalidPort, cpchanid, version, suite.proof, height, addr), false},
{"invalid counterparty channel id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, cpportid, invalidChannel, version, suite.proof, height, addr), false},
{"empty proof", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, cpportid, cpchanid, version, emptyProof, height, addr), false},
- {"channel not in TRYOPEN state", &types.MsgChannelOpenTry{portid, chanid, initChannel, version, suite.proof, height, addr.String()}, false},
+ {"channel not in TRYOPEN state", &types.MsgChannelOpenTry{portid, chanid, initChannel, version, suite.proof, height, addr}, false},
}
for _, tc := range testCases {
diff --git a/modules/core/keeper/msg_server_test.go b/modules/core/keeper/msg_server_test.go
index 18830d79..4ec1a0d2 100644
--- a/modules/core/keeper/msg_server_test.go
+++ b/modules/core/keeper/msg_server_test.go
@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/suite"
sdk "github.com/cosmos/cosmos-sdk/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
@@ -15,7 +16,6 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibcmock "github.com/cosmos/ibc-go/testing/mock"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const height = 10
@@ -144,7 +144,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
proof, proofHeight := suite.chainA.QueryProof(packetKey)
- msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress())
+ msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress().String())
// ante-handle RecvPacket
_, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
@@ -283,7 +283,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())
+ msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
_, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
@@ -404,7 +404,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress())
+ msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
_, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
@@ -586,7 +586,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
channelKey := host.ChannelKey(counterpartyChannel.PortID, counterpartyChannel.ID)
proofClosed, _ := suite.chainB.QueryProof(channelKey)
- msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, proofClosed, proofHeight, suite.chainA.SenderAccount.GetAddress())
+ msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, proofClosed, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
_, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
@@ -660,7 +660,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
proofUpgradedConsState, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState,
- proofUpgradeClient, proofUpgradedConsState, suite.chainA.SenderAccount.GetAddress())
+ proofUpgradeClient, proofUpgradedConsState, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
expPass: true,
@@ -694,7 +694,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
suite.Require().NoError(err)
- msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress())
+ msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
expPass: false,
diff --git a/testing/chain.go b/testing/chain.go
index 6f1f4e78..3578bd0e 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -461,7 +461,7 @@ func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, client
}
msg, err := clienttypes.NewMsgCreateClient(
- clientState, consensusState, chain.SenderAccount.GetAddress(),
+ clientState, consensusState, chain.SenderAccount.GetAddress().String(),
)
require.NoError(chain.t, err)
return msg
@@ -484,7 +484,7 @@ func (chain *TestChain) UpdateTMClient(counterparty *TestChain, clientID string)
msg, err := clienttypes.NewMsgUpdateClient(
clientID, header,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
require.NoError(chain.t, err)
@@ -645,7 +645,7 @@ func (chain *TestChain) ConnectionOpenInit(
connection.ClientID,
connection.CounterpartyClientID,
counterparty.GetPrefix(), DefaultOpenInitVersion, DefaultDelayPeriod,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -668,7 +668,7 @@ func (chain *TestChain) ConnectionOpenTry(
counterpartyClient, counterparty.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, DefaultDelayPeriod,
proofInit, proofClient, proofConsensus,
proofHeight, consensusHeight,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -690,7 +690,7 @@ func (chain *TestChain) ConnectionOpenAck(
proofTry, proofClient, proofConsensus,
proofHeight, consensusHeight,
ConnectionVersion,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -706,7 +706,7 @@ func (chain *TestChain) ConnectionOpenConfirm(
msg := connectiontypes.NewMsgConnectionOpenConfirm(
connection.ID,
proof, height,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -788,7 +788,7 @@ func (chain *TestChain) ChanOpenInit(
ch.PortID,
ch.Version, order, []string{connectionID},
counterparty.PortID,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -807,7 +807,7 @@ func (chain *TestChain) ChanOpenTry(
ch.Version, order, []string{connectionID},
counterpartyCh.PortID, counterpartyCh.ID, counterpartyCh.Version,
proof, height,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -823,7 +823,7 @@ func (chain *TestChain) ChanOpenAck(
ch.PortID, ch.ID,
counterpartyCh.ID, counterpartyCh.Version, // testing doesn't use flexible selection
proof, height,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -838,7 +838,7 @@ func (chain *TestChain) ChanOpenConfirm(
msg := channeltypes.NewMsgChannelOpenConfirm(
ch.PortID, ch.ID,
proof, height,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
@@ -852,7 +852,7 @@ func (chain *TestChain) ChanCloseInit(
) error {
msg := channeltypes.NewMsgChannelCloseInit(
channel.PortID, channel.ID,
- chain.SenderAccount.GetAddress(),
+ chain.SenderAccount.GetAddress().String(),
)
return chain.sendMsgs(msg)
}
diff --git a/testing/coordinator.go b/testing/coordinator.go
index 80f45157..9bf6f040 100644
--- a/testing/coordinator.go
+++ b/testing/coordinator.go
@@ -245,7 +245,7 @@ func (coord *Coordinator) RecvPacket(
coord.IncrementTime()
coord.CommitBlock(source, counterparty)
- recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, counterparty.SenderAccount.GetAddress())
+ recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, counterparty.SenderAccount.GetAddress().String())
// receive on counterparty and update source client
return coord.SendMsgs(counterparty, source, sourceClient, []sdk.Msg{recvMsg})
@@ -288,7 +288,7 @@ func (coord *Coordinator) AcknowledgePacket(
coord.IncrementTime()
coord.CommitBlock(source, counterparty)
- ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, source.SenderAccount.GetAddress())
+ ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, source.SenderAccount.GetAddress().String())
return coord.SendMsgs(source, counterparty, counterpartyClient, []sdk.Msg{ackMsg})
}
From e98838612a4fa5d240e392aad3409db5ec428f50 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 12 Apr 2021 12:23:23 +0200
Subject: [PATCH 030/393] Revert application state changes on failed
acknowledgements (#107)
* make changes and start fixing tests
* add msg server test
* change to revert on SendCoins fail
* update interfaces and fix tests
* self review fixes
* Update modules/core/04-channel/types/acknowledgement.go
* Add Changelog
* update docs
* Update CHANGELOG.md
Co-authored-by: Aditya
* add note for async acks
Co-authored-by: Aditya Sripal
---
CHANGELOG.md | 3 +-
docs/custom.md | 55 ++++----
docs/migrations/ibc-migration-043.md | 8 ++
modules/apps/transfer/handler_test.go | 6 +-
modules/apps/transfer/keeper/relay.go | 2 +-
modules/apps/transfer/keeper/relay_test.go | 2 +-
modules/apps/transfer/module.go | 41 +++---
.../core/03-connection/keeper/verify_test.go | 12 +-
modules/core/04-channel/keeper/packet_test.go | 125 +++++++++--------
.../core/04-channel/keeper/timeout_test.go | 54 ++++----
.../core/04-channel/types/acknowledgement.go | 20 ++-
.../04-channel/types/acknowledgement_test.go | 17 ++-
modules/core/05-port/types/module.go | 8 +-
modules/core/exported/channel.go | 7 +
modules/core/keeper/msg_server.go | 11 +-
modules/core/keeper/msg_server_test.go | 127 ++++++++++++------
.../07-tendermint/types/client_state_test.go | 10 +-
testing/chain.go | 9 +-
testing/mock/mock.go | 37 +++--
19 files changed, 322 insertions(+), 232 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f05c56b8..edda2d7a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,8 +36,9 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
-## API Breaking
+### API Breaking
+* (modules) [\#107](https://github.com/cosmos/ibc-go/pull/107) Modify OnRecvPacket callback to return an acknowledgement which indicates if it is successful or not. Callback state changes are discarded for unsuccessful acknowledgements only.
* (modules) [\#108](https://github.com/cosmos/ibc-go/pull/108) All message constructors take the signer as a string to prevent upstream bugs. The `String()` function for an SDK Acc Address relies on external context.
### State Machine Breaking
diff --git a/docs/custom.md b/docs/custom.md
index a5f75aad..64d857f2 100644
--- a/docs/custom.md
+++ b/docs/custom.md
@@ -295,49 +295,46 @@ invoked by the IBC module after the packet has been proved valid and correctly p
keepers. Thus, the `OnRecvPacket` callback only needs to worry about making the appropriate state
changes given the packet data without worrying about whether the packet is valid or not.
-Modules may return an acknowledgement as a byte string and return it to the IBC handler.
+Modules may return to the IBC handler an acknowledgement which implements the Acknowledgement interface.
The IBC handler will then commit this acknowledgement of the packet so that a relayer may relay the
acknowledgement back to the sender module.
+The state changes that occurred during this callback will only be written if:
+- the acknowledgement was successful as indicated by the `Success()` function of the acknowledgement
+- if the acknowledgement returned is nil indicating that an asynchronous process is occurring
+
+NOTE: Applications which process asynchronous acknowledgements must handle reverting state changes
+when appropriate. Any state changes that occurred during the `OnRecvPacket` callback will be written
+for asynchronous acknowledgements.
+
```go
OnRecvPacket(
ctx sdk.Context,
packet channeltypes.Packet,
-) (res *sdk.Result, ack []byte, abort error) {
+) ibcexported.Acknowledgement {
// Decode the packet data
packetData := DecodePacketData(packet.Data)
- // do application state changes based on packet data
- // and return result, acknowledgement and abortErr
- // Note: abortErr is only not nil if we need to abort the entire receive packet, and allow a replay of the receive.
- // If the application state change failed but we do not want to replay the packet,
- // simply encode this failure with relevant information in ack and return nil error
- res, ack, abortErr := processPacket(ctx, packet, packetData)
-
- // if we need to abort the entire receive packet, return error
- if abortErr != nil {
- return nil, nil, abortErr
- }
-
- // Encode the ack since IBC expects acknowledgement bytes
- ackBytes := EncodeAcknowledgement(ack)
+ // do application state changes based on packet data and return the acknowledgement
+ // NOTE: The acknowledgement will indicate to the IBC handler if the application
+ // state changes should be written via the `Success()` function. Application state
+ // changes are only written if the acknowledgement is successful or the acknowledgement
+ // returned is nil indicating that an asynchronous acknowledgement will occur.
+ ack := processPacket(ctx, packet, packetData)
- return res, ackBytes, nil
+ return ack
}
```
-::: warning
-`OnRecvPacket` should **only** return an error if we want the entire receive packet execution
-(including the IBC handling) to be reverted. This will allow the packet to be replayed in the case
-that some mistake in the relaying caused the packet processing to fail.
-
-If some application-level error happened while processing the packet data, in most cases, we will
-not want the packet processing to revert. Instead, we may want to encode this failure into the
-acknowledgement and finish processing the packet. This will ensure the packet cannot be replayed,
-and will also allow the sender module to potentially remediate the situation upon receiving the
-acknowledgement. An example of this technique is in the `ibc-transfer` module's
-[`OnRecvPacket`](https://github.com/cosmos/ibc-go/tree/main/modules/apps/transfer/module.go).
-:::
+The Acknowledgement interface:
+```go
+// Acknowledgement defines the interface used to return
+// acknowledgements in the OnRecvPacket callback.
+type Acknowledgement interface {
+ Success() bool
+ Acknowledgement() []byte
+}
+```
### Acknowledgements
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 93afc79e..6d6b9cff 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -91,3 +91,11 @@ REST routes are not supported for these proposals.
## Proto file changes
The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
+
+## IBC callback changes
+
+### OnRecvPacket
+
+Application developers need to update their `OnRecvPacket` callback logic.
+
+The `OnRecvPacket` callback has been modified to only return the acknowledgement. The acknowledgement returned must implement the `Acknowledgement` interface. The acknowledgement should indicate if it represents a successful processing of a packet by returning true on `Success()` and false in all other cases. A return value of false on `Success()` will result in all state changes which occurred in the callback being discarded. More information can be found in the [documentation](https://github.com/cosmos/ibc-go/blob/main/docs/custom.md#receiving-packets).
diff --git a/modules/apps/transfer/handler_test.go b/modules/apps/transfer/handler_test.go
index 03129ba9..976ecb2e 100644
--- a/modules/apps/transfer/handler_test.go
+++ b/modules/apps/transfer/handler_test.go
@@ -52,7 +52,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() {
fungibleTokenPacket := types.NewFungibleTokenPacketData(coinToSendToB.Denom, coinToSendToB.Amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
- err = suite.coordinator.RelayPacket(suite.chainA, suite.chainB, clientA, clientB, packet, ack.GetBytes())
+ err = suite.coordinator.RelayPacket(suite.chainA, suite.chainB, clientA, clientB, packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
// check that voucher exists on chain B
@@ -77,7 +77,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() {
fullDenomPath := types.GetPrefixedDenom(channelOnCForB.PortID, channelOnCForB.ID, voucherDenomTrace.GetFullDenomPath())
fungibleTokenPacket = types.NewFungibleTokenPacketData(voucherDenomTrace.GetFullDenomPath(), coinSentFromAToB.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String())
packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnBForC.PortID, channelOnBForC.ID, channelOnCForB.PortID, channelOnCForB.ID, timeoutHeight, 0)
- err = suite.coordinator.RelayPacket(suite.chainB, suite.chainC, clientOnBForC, clientOnCForB, packet, ack.GetBytes())
+ err = suite.coordinator.RelayPacket(suite.chainB, suite.chainC, clientOnBForC, clientOnCForB, packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
coinSentFromBToC := sdk.NewInt64Coin(types.ParseDenomTrace(fullDenomPath).IBCDenom(), 100)
@@ -100,7 +100,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() {
// NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment
fungibleTokenPacket = types.NewFungibleTokenPacketData(fullDenomPath, coinSentFromBToC.Amount.Uint64(), suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnCForB.PortID, channelOnCForB.ID, channelOnBForC.PortID, channelOnBForC.ID, timeoutHeight, 0)
- err = suite.coordinator.RelayPacket(suite.chainC, suite.chainB, clientOnCForB, clientOnBForC, packet, ack.GetBytes())
+ err = suite.coordinator.RelayPacket(suite.chainC, suite.chainB, clientOnCForB, clientOnBForC, packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom)
diff --git a/modules/apps/transfer/keeper/relay.go b/modules/apps/transfer/keeper/relay.go
index a4ce016d..7e9bdf37 100644
--- a/modules/apps/transfer/keeper/relay.go
+++ b/modules/apps/transfer/keeper/relay.go
@@ -296,7 +296,7 @@ func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data t
if err := k.bankKeeper.SendCoinsFromModuleToAccount(
ctx, types.ModuleName, receiver, sdk.NewCoins(voucher),
); err != nil {
- panic(fmt.Sprintf("unable to send coins from module to account despite previously minting coins to module account: %v", err))
+ return err
}
defer func() {
diff --git a/modules/apps/transfer/keeper/relay_test.go b/modules/apps/transfer/keeper/relay_test.go
index e67d2f8a..020b132c 100644
--- a/modules/apps/transfer/keeper/relay_test.go
+++ b/modules/apps/transfer/keeper/relay_test.go
@@ -198,7 +198,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() {
fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String())
packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0)
ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
- err = suite.coordinator.RelayPacket(suite.chainB, suite.chainA, clientB, clientA, packet, ack.GetBytes())
+ err = suite.coordinator.RelayPacket(suite.chainB, suite.chainA, clientB, clientA, packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
seq++
diff --git a/modules/apps/transfer/module.go b/modules/apps/transfer/module.go
index f4620ee9..a8080aad 100644
--- a/modules/apps/transfer/module.go
+++ b/modules/apps/transfer/module.go
@@ -7,13 +7,6 @@ import (
"math"
"math/rand"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
-
- "github.com/gorilla/mux"
- "github.com/spf13/cobra"
-
- abci "github.com/tendermint/tendermint/abci/types"
-
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
@@ -22,6 +15,11 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/gorilla/mux"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ abci "github.com/tendermint/tendermint/abci/types"
+
"github.com/cosmos/ibc-go/modules/apps/transfer/client/cli"
"github.com/cosmos/ibc-go/modules/apps/transfer/keeper"
"github.com/cosmos/ibc-go/modules/apps/transfer/simulation"
@@ -29,6 +27,7 @@ import (
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
+ ibcexported "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
@@ -318,21 +317,27 @@ func (am AppModule) OnChanCloseConfirm(
return nil
}
-// OnRecvPacket implements the IBCModule interface
+// OnRecvPacket implements the IBCModule interface. A successful acknowledgement
+// is returned if the packet data is succesfully decoded and the receive application
+// logic returns without error.
func (am AppModule) OnRecvPacket(
ctx sdk.Context,
packet channeltypes.Packet,
-) (*sdk.Result, []byte, error) {
+) ibcexported.Acknowledgement {
+ ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
+
var data types.FungibleTokenPacketData
if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
- return nil, nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
+ ack = channeltypes.NewErrorAcknowledgement(fmt.Sprintf("cannot unmarshal ICS-20 transfer packet data: %s", err.Error()))
}
- acknowledgement := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
-
- err := am.keeper.OnRecvPacket(ctx, packet, data)
- if err != nil {
- acknowledgement = channeltypes.NewErrorAcknowledgement(err.Error())
+ // only attempt the application logic if the packet data
+ // was successfully decoded
+ if ack.Success() {
+ err := am.keeper.OnRecvPacket(ctx, packet, data)
+ if err != nil {
+ ack = channeltypes.NewErrorAcknowledgement(err.Error())
+ }
}
ctx.EventManager().EmitEvent(
@@ -342,14 +347,12 @@ func (am AppModule) OnRecvPacket(
sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver),
sdk.NewAttribute(types.AttributeKeyDenom, data.Denom),
sdk.NewAttribute(types.AttributeKeyAmount, fmt.Sprintf("%d", data.Amount)),
- sdk.NewAttribute(types.AttributeKeyAckSuccess, fmt.Sprintf("%t", err != nil)),
+ sdk.NewAttribute(types.AttributeKeyAckSuccess, fmt.Sprintf("%t", ack.Success())),
),
)
// NOTE: acknowledgement will be written synchronously during IBC handler execution.
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, acknowledgement.GetBytes(), nil
+ return ack
}
// OnAcknowledgementPacket implements the IBCModule interface
diff --git a/modules/core/03-connection/keeper/verify_test.go b/modules/core/03-connection/keeper/verify_test.go
index d11db9d7..9afd4816 100644
--- a/modules/core/03-connection/keeper/verify_test.go
+++ b/modules/core/03-connection/keeper/verify_test.go
@@ -283,7 +283,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
connection.ClientId = ibctesting.InvalidID
}
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -344,7 +344,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
}
// send and receive packet
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -360,12 +360,12 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
ack := ibcmock.MockAcknowledgement
if tc.changeAcknowledgement {
- ack = []byte(ibctesting.InvalidID)
+ ack = ibcmock.MockFailAcknowledgement
}
err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketAcknowledgement(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
- packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack.Acknowledgement(),
)
if tc.expPass {
@@ -412,7 +412,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
}
// send, only receive if specified
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -481,7 +481,7 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
}
// send and receive packet
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
diff --git a/modules/core/04-channel/keeper/packet_test.go b/modules/core/04-channel/keeper/packet_test.go
index aa9bc316..9a503e34 100644
--- a/modules/core/04-channel/keeper/packet_test.go
+++ b/modules/core/04-channel/keeper/packet_test.go
@@ -14,7 +14,6 @@ import (
)
var (
- validPacketData = []byte("VALID PACKET DATA")
disabledTimeoutTimestamp = uint64(0)
disabledTimeoutHeight = clienttypes.ZeroHeight()
timeoutHeight = clienttypes.NewHeight(0, 100)
@@ -39,23 +38,23 @@ func (suite *KeeperTestSuite) TestSendPacket() {
testCases := []testCase{
{"success: UNORDERED channel", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, true},
{"success: ORDERED channel", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, true},
{"sending packet out of order on UNORDERED channel", func() {
// setup creates an unordered channel
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"sending packet out of order on ORDERED channel", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet basic validation failed, empty packet data", func() {
@@ -66,12 +65,12 @@ func (suite *KeeperTestSuite) TestSendPacket() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"channel closed", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
suite.Require().NoError(err)
@@ -79,13 +78,13 @@ func (suite *KeeperTestSuite) TestSendPacket() {
{"packet dest port ≠ channel counterparty port", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong port for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet dest channel ID ≠ channel counterparty channel ID", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong channel for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"connection not found", func() {
@@ -97,7 +96,7 @@ func (suite *KeeperTestSuite) TestSendPacket() {
channelA.PortID, channelA.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
@@ -109,7 +108,7 @@ func (suite *KeeperTestSuite) TestSendPacket() {
connection.ClientId = ibctesting.InvalidID
suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"client state is frozen", func() {
@@ -124,7 +123,7 @@ func (suite *KeeperTestSuite) TestSendPacket() {
cs.FrozenHeight = clienttypes.NewHeight(0, 1)
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
@@ -132,7 +131,7 @@ func (suite *KeeperTestSuite) TestSendPacket() {
clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use client state latest height for timeout
clientState := suite.chainA.GetClientState(clientA)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clientState.GetLatestHeight().(clienttypes.Height), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clientState.GetLatestHeight().(clienttypes.Height), disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"timeout timestamp passed", func() {
@@ -143,14 +142,14 @@ func (suite *KeeperTestSuite) TestSendPacket() {
timestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight(suite.chainA.GetContext(), connection, clientState.GetLatestHeight())
suite.Require().NoError(err)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, timestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, timestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"next sequence send not found", func() {
_, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// manually creating channel prevents next sequence from being set
suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
@@ -162,13 +161,13 @@ func (suite *KeeperTestSuite) TestSendPacket() {
}, false},
{"next sequence wrong", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 5)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"channel capability not found", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = capabilitytypes.NewCapability(5)
}, false},
}
@@ -204,7 +203,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
testCases := []testCase{
{"success: ORDERED channel", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
@@ -212,7 +211,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
{"success UNORDERED channel", func() {
// setup uses an UNORDERED channel
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
@@ -220,13 +219,13 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
{"success with out of order packet: UNORDERED channel", func() {
// setup uses an UNORDERED channel
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// send 2 packets
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
// set sequence to 2
- packet = types.NewPacket(validPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
// attempts to receive packet 2 without receiving packet 1
@@ -234,13 +233,13 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
}, true},
{"out of order packet failure with ORDERED channel", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// send 2 packets
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
// set sequence to 2
- packet = types.NewPacket(validPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
// attempts to receive packet 2 without receiving packet 1
@@ -249,12 +248,12 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"channel not open", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
suite.Require().NoError(err)
@@ -262,7 +261,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
}, false},
{"capability cannot authenticate", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
channelCap = capabilitytypes.NewCapability(3)
@@ -270,13 +269,13 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
{"packet source port ≠ channel counterparty port", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong port for dest
- packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"packet source channel ID ≠ channel counterparty channel ID", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong port for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"connection not found", func() {
@@ -288,7 +287,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
channelB.PortID, channelB.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
@@ -306,18 +305,18 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
channelB.PortID, channelB.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"timeout height passed", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"timeout timestamp passed", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"next receive sequence is not found", func() {
@@ -332,17 +331,17 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// manually set packet commitment
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.TestHash)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.MockPacketData)
suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"receipt already stored", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), channelB.PortID, channelB.ID, 1)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
@@ -350,7 +349,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
{"validation failed", func() {
// packet commitment not set resulting in invalid proof
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
}
@@ -403,8 +402,8 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
"success",
func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- ack = ibctesting.TestHash
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.MockAcknowledgement
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
},
true,
@@ -412,14 +411,14 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
- ack = ibctesting.TestHash
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.MockAcknowledgement
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
}, false},
{"channel not open", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- ack = ibctesting.TestHash
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.MockAcknowledgement
err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
suite.Require().NoError(err)
@@ -429,8 +428,8 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
"capability authentication failed",
func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- ack = ibctesting.TestHash
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.MockAcknowledgement
channelCap = capabilitytypes.NewCapability(3)
},
false,
@@ -439,8 +438,8 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
"no-op, already acked",
func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- ack = ibctesting.TestHash
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ ack = ibctesting.MockAcknowledgement
suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack)
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
},
@@ -450,7 +449,7 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
"empty acknowledgement",
func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
ack = nil
channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
},
@@ -487,7 +486,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
testCases := []testCase{
{"success on ordered channel", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -501,7 +500,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
{"success on unordered channel", func() {
// setup uses an UNORDERED channel
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -516,11 +515,11 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"channel not open", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
suite.Require().NoError(err)
@@ -528,7 +527,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
}, false},
{"capability authentication failed", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -542,13 +541,13 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
{"packet destination port ≠ channel counterparty port", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong port for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet destination channel ID ≠ channel counterparty channel ID", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong channel for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"connection not found", func() {
@@ -560,7 +559,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
channelB.PortID, channelB.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
@@ -578,20 +577,20 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
channelA.PortID, channelA.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet hasn't been sent", func() {
// packet commitment never written
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet ack verification failed", func() {
// ack never written
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -601,7 +600,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
_, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// manually creating channel prevents next sequence acknowledgement from being set
suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
@@ -609,16 +608,16 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
)
// manually set packet commitment
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.TestHash)
+ suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.MockPacketData)
// manually set packet acknowledgement and capability
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), channelB.PortID, channelB.ID, packet.GetSequence(), ibctesting.TestHash)
+ suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), channelB.PortID, channelB.ID, packet.GetSequence(), ibctesting.MockAcknowledgement)
suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"next ack sequence mismatch", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -642,7 +641,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack, proof, proofHeight)
+ err := suite.chainA.App.IBCKeeper.ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack.Acknowledgement(), proof, proofHeight)
pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
channelA, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
diff --git a/modules/core/04-channel/keeper/timeout_test.go b/modules/core/04-channel/keeper/timeout_test.go
index 4c286690..94c4b6a0 100644
--- a/modules/core/04-channel/keeper/timeout_test.go
+++ b/modules/core/04-channel/keeper/timeout_test.go
@@ -26,7 +26,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
ordered = true
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
// need to update chainA's client representing chainB to prove missing ack
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
@@ -35,7 +35,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
ordered = false
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
// need to update chainA's client representing chainB to prove missing ack
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
@@ -43,11 +43,11 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"channel not open", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
suite.Require().NoError(err)
@@ -55,12 +55,12 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
{"packet destination port ≠ channel counterparty port", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong port for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"packet destination channel ID ≠ channel counterparty channel ID", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong channel for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"connection not found", func() {
channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
@@ -71,11 +71,11 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
channelA.PortID, channelA.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"timeout", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
}, false},
@@ -84,13 +84,13 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
nextSeqRecv = 2
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
}, false},
{"packet hasn't been sent", func() {
clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
}, false},
{"next seq receive verification failed", func() {
@@ -98,7 +98,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
ordered = false
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
}, false},
@@ -107,7 +107,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
ordered = true
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
}, false},
@@ -156,7 +156,7 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() {
testCases := []testCase{
{"success ORDERED", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
@@ -164,11 +164,11 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"incorrect capability", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
chanCap = capabilitytypes.NewCapability(100)
@@ -209,7 +209,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
{"success: ORDERED", func() {
ordered = true
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
// need to update chainA's client representing chainB to prove missing ack
@@ -220,7 +220,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
{"success: UNORDERED", func() {
ordered = false
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
// need to update chainA's client representing chainB to prove missing ack
@@ -231,18 +231,18 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
{"channel not found", func() {
// use wrong channel naming
_, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"packet dest port ≠ channel counterparty port", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong port for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet dest channel ID ≠ channel counterparty channel ID", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
// use wrong channel for dest
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"connection not found", func() {
@@ -254,7 +254,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
channelA.PortID, channelA.ID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
// create chancap
suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
@@ -262,14 +262,14 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
}, false},
{"packet hasn't been sent", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
{"packet already received", func() {
nextSeqRecv = 2
ordered = true
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
// need to update chainA's client representing chainB to prove missing ack
@@ -280,7 +280,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
{"channel verification failed", func() {
ordered = true
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
}, false},
@@ -288,7 +288,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
// set ordered to false providing the wrong proof for ORDERED case
ordered = false
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
@@ -298,7 +298,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
// set ordered to true providing the wrong proof for UNORDERED case
ordered = true
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
@@ -307,7 +307,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
{"channel capability not found", func() {
ordered = true
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
// need to update chainA's client representing chainB to prove missing ack
diff --git a/modules/core/04-channel/types/acknowledgement.go b/modules/core/04-channel/types/acknowledgement.go
index a3f677ab..cfc088ab 100644
--- a/modules/core/04-channel/types/acknowledgement.go
+++ b/modules/core/04-channel/types/acknowledgement.go
@@ -1,6 +1,7 @@
package types
import (
+ "reflect"
"strings"
sdk "github.com/cosmos/cosmos-sdk/types"
@@ -27,11 +28,6 @@ func NewErrorAcknowledgement(err string) Acknowledgement {
}
}
-// GetBytes is a helper for serialising acknowledgements
-func (ack Acknowledgement) GetBytes() []byte {
- return sdk.MustSortJSON(SubModuleCdc.MustMarshalJSON(&ack))
-}
-
// ValidateBasic performs a basic validation of the acknowledgement
func (ack Acknowledgement) ValidateBasic() error {
switch resp := ack.Response.(type) {
@@ -43,8 +39,22 @@ func (ack Acknowledgement) ValidateBasic() error {
if strings.TrimSpace(resp.Error) == "" {
return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement error cannot be empty")
}
+
default:
return sdkerrors.Wrapf(ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", resp)
}
return nil
}
+
+// Success implements the Acknowledgement interface. The acknowledgement is
+// considered successful if it is a ResultAcknowledgement. Otherwise it is
+// considered a failed acknowledgement.
+func (ack Acknowledgement) Success() bool {
+ return reflect.TypeOf(ack.Response) == reflect.TypeOf(((*Acknowledgement_Result)(nil)))
+}
+
+// Acknowledgement implements the Acknowledgement interface. It returns the
+// acknowledgement serialised using JSON.
+func (ack Acknowledgement) Acknowledgement() []byte {
+ return sdk.MustSortJSON(SubModuleCdc.MustMarshalJSON(&ack))
+}
diff --git a/modules/core/04-channel/types/acknowledgement_test.go b/modules/core/04-channel/types/acknowledgement_test.go
index fa286d06..92d546a8 100644
--- a/modules/core/04-channel/types/acknowledgement_test.go
+++ b/modules/core/04-channel/types/acknowledgement_test.go
@@ -5,29 +5,34 @@ import "github.com/cosmos/ibc-go/modules/core/04-channel/types"
// tests acknowledgement.ValidateBasic and acknowledgement.GetBytes
func (suite TypesTestSuite) TestAcknowledgement() {
testCases := []struct {
- name string
- ack types.Acknowledgement
- expPass bool
+ name string
+ ack types.Acknowledgement
+ expSuccess bool // indicate if this is a success or failed ack
+ expPass bool
}{
{
"valid successful ack",
types.NewResultAcknowledgement([]byte("success")),
true,
+ true,
},
{
"valid failed ack",
types.NewErrorAcknowledgement("error"),
+ false,
true,
},
{
"empty successful ack",
types.NewResultAcknowledgement([]byte{}),
+ true,
false,
},
{
"empty faied ack",
types.NewErrorAcknowledgement(" "),
false,
+ false,
},
{
"nil response",
@@ -35,6 +40,7 @@ func (suite TypesTestSuite) TestAcknowledgement() {
Response: nil,
},
false,
+ false,
},
}
@@ -54,10 +60,11 @@ func (suite TypesTestSuite) TestAcknowledgement() {
// expect all acks to be able to be marshaled
suite.NotPanics(func() {
- bz := tc.ack.GetBytes()
+ bz := tc.ack.Acknowledgement()
suite.Require().NotNil(bz)
})
+
+ suite.Require().Equal(tc.expSuccess, tc.ack.Success())
})
}
-
}
diff --git a/modules/core/05-port/types/module.go b/modules/core/05-port/types/module.go
index 91ee642f..10a756bb 100644
--- a/modules/core/05-port/types/module.go
+++ b/modules/core/05-port/types/module.go
@@ -5,6 +5,7 @@ import (
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
// IBCModule defines an interface that implements all the callbacks
@@ -58,12 +59,15 @@ type IBCModule interface {
channelID string,
) error
- // OnRecvPacket must return the acknowledgement bytes
+ // OnRecvPacket must return an acknowledgement that implements the Acknowledgement interface.
// In the case of an asynchronous acknowledgement, nil should be returned.
+ // If the acknowledgement returned is successful, the state changes on callback are written,
+ // otherwise the application state changes are discarded. In either case the packet is received
+ // and the acknowledgement is written (in synchronous cases).
OnRecvPacket(
ctx sdk.Context,
packet channeltypes.Packet,
- ) (*sdk.Result, []byte, error)
+ ) exported.Acknowledgement
OnAcknowledgementPacket(
ctx sdk.Context,
diff --git a/modules/core/exported/channel.go b/modules/core/exported/channel.go
index 6a0d542c..f6393393 100644
--- a/modules/core/exported/channel.go
+++ b/modules/core/exported/channel.go
@@ -30,3 +30,10 @@ type PacketI interface {
GetData() []byte
ValidateBasic() error
}
+
+// Acknowledgement defines the interface used to return
+// acknowledgements in the OnRecvPacket callback.
+type Acknowledgement interface {
+ Success() bool
+ Acknowledgement() []byte
+}
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index d931abed..a64cb2ec 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -443,16 +443,19 @@ func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacke
}
// Perform application logic callback
- _, ack, err := cbs.OnRecvPacket(ctx, msg.Packet)
- if err != nil {
- return nil, sdkerrors.Wrap(err, "receive packet callback failed")
+ // Cache context so that we may discard state changes from callback if the acknowledgement is unsuccessful.
+ cacheCtx, writeFn := ctx.CacheContext()
+ ack := cbs.OnRecvPacket(cacheCtx, msg.Packet)
+ if ack == nil || ack.Success() {
+ // write application state changes for asynchronous and successful acknowledgements
+ writeFn()
}
// Set packet acknowledgement only if the acknowledgement is not nil.
// NOTE: IBC applications modules may call the WriteAcknowledgement asynchronously if the
// acknowledgement is nil.
if ack != nil {
- if err := k.ChannelKeeper.WriteAcknowledgement(ctx, cap, msg.Packet, ack); err != nil {
+ if err := k.ChannelKeeper.WriteAcknowledgement(ctx, cap, msg.Packet, ack.Acknowledgement()); err != nil {
return nil, err
}
}
diff --git a/modules/core/keeper/msg_server_test.go b/modules/core/keeper/msg_server_test.go
index 4ec1a0d2..97d2c4ab 100644
--- a/modules/core/keeper/msg_server_test.go
+++ b/modules/core/keeper/msg_server_test.go
@@ -57,79 +57,104 @@ func TestIBCTestSuite(t *testing.T) {
func (suite *KeeperTestSuite) TestHandleRecvPacket() {
var (
packet channeltypes.Packet
+ async bool // indicate no ack written
)
testCases := []struct {
- name string
- malleate func()
- expPass bool
+ name string
+ malleate func()
+ expPass bool
+ expRevert bool
}{
{"success: ORDERED", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
- }, true},
+ }, true, false},
{"success: UNORDERED", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
- }, true},
+ }, true, false},
{"success: UNORDERED out of order packet", func() {
// setup uses an UNORDERED channel
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
// attempts to receive packet with sequence 10 without receiving packet with sequence 1
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
}
- }, true},
+ }, true, false},
+ {"success: OnRecvPacket callback returns revert=true", func() {
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibctesting.MockFailPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }, true, true},
+ {"success: ORDERED - async acknowledgement", func() {
+ async = true
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }, true, false},
+ {"success: UNORDERED - async acknowledgement", func() {
+ async = true
+ _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+
+ err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.Require().NoError(err)
+ }, true, false},
{"failure: ORDERED out of order packet", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
// attempts to receive packet with sequence 10 without receiving packet with sequence 1
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
}
- }, false},
+ }, false, false},
{"channel does not exist", func() {
// any non-nil value of packet is valid
suite.Require().NotNil(packet)
- }, false},
+ }, false, false},
{"packet not sent", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
- }, false},
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ }, false, false},
{"ORDERED: packet already received (replay)", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
suite.Require().NoError(err)
- }, false},
+ }, false, false},
{"UNORDERED: packet already received (replay)", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
suite.Require().NoError(err)
- }, false},
+ }, false, false},
}
for _, tc := range testCases {
@@ -137,6 +162,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ async = false // reset
tc.malleate()
@@ -146,7 +172,6 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress().String())
- // ante-handle RecvPacket
_, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
if tc.expPass {
@@ -156,10 +181,25 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
_, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
suite.Require().Error(err)
- // verify ack was written
+ // check that callback state was handled correctly
+ _, exists := suite.chainB.App.ScopedIBCMockKeeper.GetCapability(suite.chainB.GetContext(), ibctesting.MockCanaryCapabilityName)
+ if tc.expRevert {
+ suite.Require().False(exists, "capability exists in store even after callback reverted")
+ } else {
+ suite.Require().True(exists, "callback state not persisted when revert is false")
+ }
+
+ // verify if ack was written
ack, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
- suite.Require().NotNil(ack)
- suite.Require().True(found)
+
+ if async {
+ suite.Require().Nil(ack)
+ suite.Require().False(found)
+
+ } else {
+ suite.Require().NotNil(ack)
+ suite.Require().True(found)
+ }
} else {
suite.Require().Error(err)
}
@@ -184,7 +224,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
}{
{"success: ORDERED", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -194,7 +234,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
}, true},
{"success: UNORDERED", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -208,7 +248,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment)
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -222,7 +262,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -237,14 +277,14 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
}, false},
{"packet not received", func() {
_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
}, false},
{"ORDERED: packet already acknowledged (replay)", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -252,13 +292,13 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
suite.Require().NoError(err)
- err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.TestHash)
+ err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.MockAcknowledgement)
suite.Require().NoError(err)
}, false},
{"UNORDERED: packet already acknowledged (replay)", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
suite.Require().NoError(err)
@@ -266,7 +306,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
suite.Require().NoError(err)
- err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.TestHash)
+ err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.MockAcknowledgement)
suite.Require().NoError(err)
}, false},
}
@@ -276,14 +316,13 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
- ibctesting.TestHash = ibctesting.MockAcknowledgement
tc.malleate()
packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
+ msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement.Acknowledgement(), proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
_, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
@@ -323,7 +362,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
}{
{"success: ORDERED", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -336,7 +375,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
}, true},
{"success: UNORDERED", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -354,7 +393,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -370,7 +409,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -389,7 +428,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
}, false},
{"UNORDERED: packet not sent", func() {
_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
}, false},
}
@@ -445,7 +484,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
}{
{"success: ORDERED", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
counterpartyChannel = ibctesting.TestChannel{
PortID: channelB.PortID,
ID: channelB.ID,
@@ -466,7 +505,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
}, true},
{"success: UNORDERED", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
counterpartyChannel = ibctesting.TestChannel{
PortID: channelB.PortID,
ID: channelB.ID,
@@ -497,7 +536,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -521,7 +560,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
// create packet commitment
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -542,7 +581,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
}, false},
{"UNORDERED: packet not sent", func() {
clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
packetKey = host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
counterpartyChannel = ibctesting.TestChannel{
PortID: channelB.PortID,
@@ -555,7 +594,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
}, false},
{"ORDERED: channel not closed", func() {
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
counterpartyChannel = ibctesting.TestChannel{
PortID: channelB.PortID,
ID: channelB.ID,
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index 914851fa..2838ebb4 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -435,7 +435,7 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
// setup testing conditions
clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 100), 0)
err := suite.coordinator.SendPacket(suite.chainB, suite.chainA, packet, clientA)
suite.Require().NoError(err)
@@ -534,7 +534,7 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
// setup testing conditions
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
// send packet
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -562,7 +562,7 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyPacketAcknowledgement(
store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
- packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ibcmock.MockAcknowledgement,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ibcmock.MockAcknowledgement.Acknowledgement(),
)
if tc.expPass {
@@ -638,7 +638,7 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
// setup testing conditions
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
// send packet, but no recv
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
@@ -741,7 +741,7 @@ func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
// setup testing conditions
clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
// send packet
err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
diff --git a/testing/chain.go b/testing/chain.go
index 3578bd0e..19aa1f2c 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -65,15 +65,16 @@ var (
// Default params variables used to create a TM client
DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel
- TestHash = tmhash.Sum([]byte("TESTING HASH"))
TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
UpgradePath = []string{"upgrade", "upgradedIBCState"}
ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0]
- MockAcknowledgement = mock.MockAcknowledgement
- MockCommitment = mock.MockCommitment
+ MockAcknowledgement = mock.MockAcknowledgement.Acknowledgement()
+ MockPacketData = mock.MockPacketData
+ MockFailPacketData = mock.MockFailPacketData
+ MockCanaryCapabilityName = mock.MockCanaryCapabilityName
)
// TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI
@@ -897,7 +898,7 @@ func (chain *TestChain) WriteAcknowledgement(
channelCap := chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel())
// no need to send message, acting as a handler
- err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, TestHash)
+ err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, MockAcknowledgement)
if err != nil {
return err
}
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index 8a709fba..1ac33f8f 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -1,25 +1,24 @@
package mock
import (
+ "bytes"
"encoding/json"
- "github.com/cosmos/cosmos-sdk/types/module"
-
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
-
- "github.com/gorilla/mux"
- "github.com/spf13/cobra"
-
- abci "github.com/tendermint/tendermint/abci/types"
-
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/cosmos-sdk/types/module"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ "github.com/gorilla/mux"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/spf13/cobra"
+ abci "github.com/tendermint/tendermint/abci/types"
+
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
const (
@@ -27,8 +26,12 @@ const (
)
var (
- MockAcknowledgement = []byte("mock acknowledgement")
- MockCommitment = []byte("mock packet commitment")
+ MockAcknowledgement = channeltypes.NewResultAcknowledgement([]byte("mock acknowledgement"))
+ MockFailAcknowledgement = channeltypes.NewErrorAcknowledgement("mock failed acknowledgement")
+ MockPacketData = []byte("mock packet data")
+ MockFailPacketData = []byte("mock failed packet data")
+ MockAsyncPacketData = []byte("mock async packet data")
+ MockCanaryCapabilityName = "mock canary capability name"
)
// AppModuleBasic is the mock AppModuleBasic.
@@ -171,8 +174,16 @@ func (am AppModule) OnChanCloseConfirm(sdk.Context, string, string) error {
}
// OnRecvPacket implements the IBCModule interface.
-func (am AppModule) OnRecvPacket(sdk.Context, channeltypes.Packet) (*sdk.Result, []byte, error) {
- return nil, MockAcknowledgement, nil
+func (am AppModule) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet) exported.Acknowledgement {
+ // set state by claiming capability to check if revert happens return
+ am.scopedKeeper.NewCapability(ctx, MockCanaryCapabilityName)
+ if bytes.Equal(MockPacketData, packet.GetData()) {
+ return MockAcknowledgement
+ } else if bytes.Equal(MockAsyncPacketData, packet.GetData()) {
+ return nil
+ }
+
+ return MockFailAcknowledgement
}
// OnAcknowledgementPacket implements the IBCModule interface.
From 4e145d807b553828dc97213fe336152ec624bd42 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 12 Apr 2021 20:20:57 +0200
Subject: [PATCH 031/393] remove connection and channel handshake cli commands
(#109)
* remove connection handshake cli commands
* add changelog entry
* remove channel handshake commands
* update gomod
---
CHANGELOG.md | 3 +-
go.mod | 1 -
modules/core/03-connection/client/cli/cli.go | 21 --
modules/core/03-connection/client/cli/tx.go | 348 -------------------
modules/core/03-connection/module.go | 5 -
modules/core/04-channel/client/cli/cli.go | 9 +-
modules/core/04-channel/client/cli/tx.go | 288 ---------------
modules/core/client/cli/cli.go | 1 -
testing/sdk_test.go | 14 +-
9 files changed, 12 insertions(+), 678 deletions(-)
delete mode 100644 modules/core/03-connection/client/cli/tx.go
delete mode 100644 modules/core/04-channel/client/cli/tx.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index edda2d7a..696cc07c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,8 +36,9 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
-### API Breaking
+### API Breaking
+* (modules/core) [\#109](https://github.com/cosmos/ibc-go/pull/109) Remove connection and channel handshake CLI commands.
* (modules) [\#107](https://github.com/cosmos/ibc-go/pull/107) Modify OnRecvPacket callback to return an acknowledgement which indicates if it is successful or not. Callback state changes are discarded for unsuccessful acknowledgements only.
* (modules) [\#108](https://github.com/cosmos/ibc-go/pull/108) All message constructors take the signer as a string to prevent upstream bugs. The `String()` function for an SDK Acc Address relies on external context.
diff --git a/go.mod b/go.mod
index 7cf8a7b5..f9c9b147 100644
--- a/go.mod
+++ b/go.mod
@@ -16,7 +16,6 @@ require (
github.com/rakyll/statik v0.1.7
github.com/spf13/cast v1.3.1
github.com/spf13/cobra v1.1.3
- github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tendermint v0.34.8
diff --git a/modules/core/03-connection/client/cli/cli.go b/modules/core/03-connection/client/cli/cli.go
index a7024055..743ca49e 100644
--- a/modules/core/03-connection/client/cli/cli.go
+++ b/modules/core/03-connection/client/cli/cli.go
@@ -3,7 +3,6 @@ package cli
import (
"github.com/spf13/cobra"
- "github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/ibc-go/modules/core/03-connection/types"
)
@@ -24,23 +23,3 @@ func GetQueryCmd() *cobra.Command {
return queryCmd
}
-
-// NewTxCmd returns a CLI command handler for all x/ibc connection transaction commands.
-func NewTxCmd() *cobra.Command {
- txCmd := &cobra.Command{
- Use: types.SubModuleName,
- Short: "IBC connection transaction subcommands",
- DisableFlagParsing: true,
- SuggestionsMinimumDistance: 2,
- RunE: client.ValidateCmd,
- }
-
- txCmd.AddCommand(
- NewConnectionOpenInitCmd(),
- NewConnectionOpenTryCmd(),
- NewConnectionOpenAckCmd(),
- NewConnectionOpenConfirmCmd(),
- )
-
- return txCmd
-}
diff --git a/modules/core/03-connection/client/cli/tx.go b/modules/core/03-connection/client/cli/tx.go
deleted file mode 100644
index df396c8b..00000000
--- a/modules/core/03-connection/client/cli/tx.go
+++ /dev/null
@@ -1,348 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
- "github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/cosmos-sdk/client/flags"
- "github.com/cosmos/cosmos-sdk/client/tx"
- "github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/cosmos-sdk/types/msgservice"
- "github.com/cosmos/cosmos-sdk/version"
- clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- "github.com/cosmos/ibc-go/modules/core/03-connection/client/utils"
- "github.com/cosmos/ibc-go/modules/core/03-connection/types"
- host "github.com/cosmos/ibc-go/modules/core/24-host"
-)
-
-const (
- flagVersionIdentifier = "version-identifier"
- flagVersionFeatures = "version-features"
- flagDelayPeriod = "delay-period"
-)
-
-// NewConnectionOpenInitCmd defines the command to initialize a connection on
-// chain A with a given counterparty chain B
-func NewConnectionOpenInitCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "open-init [client-id] [counterparty-client-id] [path/to/counterparty_prefix.json]",
- Short: "Initialize connection on chain A",
- Long: `Initialize a connection on chain A with a given counterparty chain B.
- - 'version-identifier' flag can be a single pre-selected version identifier to be used in the handshake.
- - 'version-features' flag can be a list of features separated by commas to accompany the version identifier.`,
- Example: fmt.Sprintf(
- "%s tx %s %s open-init [client-id] [counterparty-client-id] [path/to/counterparty_prefix.json] --version-identifier=\"1.0\" --version-features=\"ORDER_UNORDERED\" --delay-period=500",
- version.AppName, host.ModuleName, types.SubModuleName,
- ),
- Args: cobra.ExactArgs(3),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- clientID := args[0]
- counterpartyClientID := args[1]
-
- counterpartyPrefix, err := utils.ParsePrefix(clientCtx.LegacyAmino, args[2])
- if err != nil {
- return err
- }
-
- var version *types.Version
- versionIdentifier, _ := cmd.Flags().GetString(flagVersionIdentifier)
-
- if versionIdentifier != "" {
- var features []string
-
- versionFeatures, _ := cmd.Flags().GetString(flagVersionFeatures)
- if versionFeatures != "" {
- features = strings.Split(versionFeatures, ",")
- }
-
- version = types.NewVersion(versionIdentifier, features)
- }
-
- delayPeriod, err := cmd.Flags().GetUint64(flagDelayPeriod)
- if err != nil {
- return err
- }
-
- msg := types.NewMsgConnectionOpenInit(
- clientID, counterpartyClientID,
- counterpartyPrefix, version, delayPeriod, clientCtx.GetFromAddress().String(),
- )
-
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ConnectionOpenInit(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- // NOTE: we should use empty default values since the user may not want to select a version
- // at this step in the handshake.
- cmd.Flags().String(flagVersionIdentifier, "", "version identifier to be used in the connection handshake version negotiation")
- cmd.Flags().String(flagVersionFeatures, "", "version features list separated by commas without spaces. The features must function with the version identifier.")
- cmd.Flags().Uint64(flagDelayPeriod, 0, "delay period that must pass before packet verification can pass against a consensus state")
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewConnectionOpenTryCmd defines the command to relay a try open a connection on
-// chain B
-func NewConnectionOpenTryCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: strings.TrimSpace(`open-try [connection-id] [client-id]
-[counterparty-connection-id] [counterparty-client-id] [path/to/counterparty_prefix.json] [path/to/client_state.json]
-[path/to/counterparty_version1.json,path/to/counterparty_version2.json...] [consensus-height] [proof-height] [path/to/proof_init.json] [path/to/proof_client.json] [path/to/proof_consensus.json]`),
- Short: "initiate connection handshake between two chains",
- Long: "Initialize a connection on chain A with a given counterparty chain B. Provide counterparty versions separated by commas",
- Example: fmt.Sprintf(
- `%s tx %s %s open-try connection-id] [client-id] \
-[counterparty-connection-id] [counterparty-client-id] [path/to/counterparty_prefix.json] [path/to/client_state.json]\
-[counterparty-versions] [consensus-height] [proof-height] [path/to/proof_init.json] [path/to/proof_client.json] [path/to/proof_consensus.json]`,
- version.AppName, host.ModuleName, types.SubModuleName,
- ),
- Args: cobra.ExactArgs(12),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- connectionID := args[0]
- clientID := args[1]
- counterpartyConnectionID := args[2]
- counterpartyClientID := args[3]
-
- counterpartyPrefix, err := utils.ParsePrefix(clientCtx.LegacyAmino, args[4])
- if err != nil {
- return err
- }
-
- counterpartyClient, err := utils.ParseClientState(clientCtx.LegacyAmino, args[5])
- if err != nil {
- return err
- }
-
- cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
-
- versionsStr := strings.Split(args[6], ",")
- counterpartyVersions := make([]*types.Version, len(versionsStr))
-
- for _, ver := range versionsStr {
-
- // attempt to unmarshal version
- version := &types.Version{}
- if err := cdc.UnmarshalJSON([]byte(ver), version); err != nil {
-
- // check for file path if JSON input is not provided
- contents, err := ioutil.ReadFile(ver)
- if err != nil {
- return errors.Wrap(err, "neither JSON input nor path to .json file for version were provided")
- }
-
- if err := cdc.UnmarshalJSON(contents, version); err != nil {
- return errors.Wrap(err, "error unmarshalling version file")
- }
- }
- }
-
- consensusHeight, err := clienttypes.ParseHeight(args[7])
- if err != nil {
- return err
- }
- proofHeight, err := clienttypes.ParseHeight(args[8])
- if err != nil {
- return err
- }
-
- proofInit, err := utils.ParseProof(clientCtx.LegacyAmino, args[9])
- if err != nil {
- return err
- }
-
- proofClient, err := utils.ParseProof(clientCtx.LegacyAmino, args[10])
- if err != nil {
- return err
- }
-
- proofConsensus, err := utils.ParseProof(clientCtx.LegacyAmino, args[11])
- if err != nil {
- return err
- }
-
- delayPeriod, err := cmd.Flags().GetUint64(flagDelayPeriod)
- if err != nil {
- return err
- }
-
- msg := types.NewMsgConnectionOpenTry(
- connectionID, clientID, counterpartyConnectionID, counterpartyClientID,
- counterpartyClient, counterpartyPrefix, counterpartyVersions, delayPeriod,
- proofInit, proofClient, proofConsensus, proofHeight,
- consensusHeight, clientCtx.GetFromAddress().String(),
- )
-
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ConnectionOpenTry(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- cmd.Flags().Uint64(flagDelayPeriod, 0, "delay period that must pass before packet verification can pass against a consensus state")
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewConnectionOpenAckCmd defines the command to relay the acceptance of a
-// connection open attempt from chain B to chain A
-func NewConnectionOpenAckCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: `open-ack [connection-id] [counterparty-connection-id] [path/to/client_state.json] [consensus-height] [proof-height]
- [path/to/proof_try.json] [path/to/proof_client.json] [path/to/proof_consensus.json] [version]`,
- Short: "relay the acceptance of a connection open attempt",
- Long: "Relay the acceptance of a connection open attempt from chain B to chain A",
- Example: fmt.Sprintf(
- `%s tx %s %s open-ack [connection-id] [counterparty-connection-id] [path/to/client_state.json] [consensus-height] [proof-height]
- [path/to/proof_try.json] [path/to/proof_client.json] [path/to/proof_consensus.json] [version]`,
- version.AppName, host.ModuleName, types.SubModuleName,
- ),
- Args: cobra.ExactArgs(9),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- connectionID := args[0]
- counterpartyConnectionID := args[1]
-
- counterpartyClient, err := utils.ParseClientState(clientCtx.LegacyAmino, args[2])
- if err != nil {
- return err
- }
-
- consensusHeight, err := clienttypes.ParseHeight(args[3])
- if err != nil {
- return err
- }
- proofHeight, err := clienttypes.ParseHeight(args[4])
- if err != nil {
- return err
- }
-
- proofTry, err := utils.ParseProof(clientCtx.LegacyAmino, args[5])
- if err != nil {
- return err
- }
-
- proofClient, err := utils.ParseProof(clientCtx.LegacyAmino, args[6])
- if err != nil {
- return err
- }
-
- proofConsensus, err := utils.ParseProof(clientCtx.LegacyAmino, args[7])
- if err != nil {
- return err
- }
-
- cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
-
- // attempt to unmarshal version
- version := &types.Version{}
- if err := cdc.UnmarshalJSON([]byte(args[8]), version); err != nil {
-
- // check for file path if JSON input is not provided
- contents, err := ioutil.ReadFile(args[8])
- if err != nil {
- return errors.Wrap(err, "neither JSON input nor path to .json file for version were provided")
- }
-
- if err := cdc.UnmarshalJSON(contents, version); err != nil {
- return errors.Wrap(err, "error unmarshalling version file")
- }
- }
-
- msg := types.NewMsgConnectionOpenAck(
- connectionID, counterpartyConnectionID, counterpartyClient, proofTry, proofClient, proofConsensus, proofHeight,
- consensusHeight, version, clientCtx.GetFromAddress().String(),
- )
-
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ConnectionOpenAck(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewConnectionOpenConfirmCmd defines the command to initialize a connection on
-// chain A with a given counterparty chain B
-func NewConnectionOpenConfirmCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "open-confirm [connection-id] [proof-height] [path/to/proof_ack.json]",
- Short: "confirm to chain B that connection is open on chain A",
- Long: "Confirm to chain B that connection is open on chain A",
- Example: fmt.Sprintf(
- "%s tx %s %s open-confirm [connection-id] [proof-height] [path/to/proof_ack.json]",
- version.AppName, host.ModuleName, types.SubModuleName,
- ),
- Args: cobra.ExactArgs(3),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- connectionID := args[0]
- proofHeight, err := clienttypes.ParseHeight(args[1])
- if err != nil {
- return err
- }
-
- proofAck, err := utils.ParseProof(clientCtx.LegacyAmino, args[2])
- if err != nil {
- return err
- }
-
- msg := types.NewMsgConnectionOpenConfirm(
- connectionID, proofAck, proofHeight, clientCtx.GetFromAddress().String(),
- )
-
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ConnectionOpenConfirm(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
diff --git a/modules/core/03-connection/module.go b/modules/core/03-connection/module.go
index c0bbc68f..d4c344fa 100644
--- a/modules/core/03-connection/module.go
+++ b/modules/core/03-connection/module.go
@@ -13,11 +13,6 @@ func Name() string {
return types.SubModuleName
}
-// GetTxCmd returns the root tx command for the IBC connections.
-func GetTxCmd() *cobra.Command {
- return cli.NewTxCmd()
-}
-
// GetQueryCmd returns the root query command for the IBC connections.
func GetQueryCmd() *cobra.Command {
return cli.GetQueryCmd()
diff --git a/modules/core/04-channel/client/cli/cli.go b/modules/core/04-channel/client/cli/cli.go
index 2786f233..3335f379 100644
--- a/modules/core/04-channel/client/cli/cli.go
+++ b/modules/core/04-channel/client/cli/cli.go
@@ -45,14 +45,7 @@ func NewTxCmd() *cobra.Command {
RunE: client.ValidateCmd,
}
- txCmd.AddCommand(
- NewChannelOpenInitCmd(),
- NewChannelOpenTryCmd(),
- NewChannelOpenAckCmd(),
- NewChannelOpenConfirmCmd(),
- NewChannelCloseInitCmd(),
- NewChannelCloseConfirmCmd(),
- )
+ txCmd.AddCommand()
return txCmd
}
diff --git a/modules/core/04-channel/client/cli/tx.go b/modules/core/04-channel/client/cli/tx.go
deleted file mode 100644
index c76c02f3..00000000
--- a/modules/core/04-channel/client/cli/tx.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package cli
-
-import (
- "strings"
-
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
-
- "github.com/cosmos/cosmos-sdk/client"
- "github.com/cosmos/cosmos-sdk/client/flags"
- "github.com/cosmos/cosmos-sdk/client/tx"
- "github.com/cosmos/cosmos-sdk/types/msgservice"
- ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
- clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- connectionutils "github.com/cosmos/ibc-go/modules/core/03-connection/client/utils"
- "github.com/cosmos/ibc-go/modules/core/04-channel/types"
-)
-
-// IBC Channel flags
-const (
- FlagOrdered = "ordered"
- FlagIBCVersion = "ibc-version"
-)
-
-// NewChannelOpenInitCmd returns the command to create a MsgChannelOpenInit transaction
-func NewChannelOpenInitCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "open-init [port-id] [counterparty-port-id] [connection-hops]",
- Short: "Creates and sends a ChannelOpenInit message",
- Args: cobra.ExactArgs(3),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- portID := args[0]
- counterpartyPortID := args[1]
- hops := strings.Split(args[2], "/")
- order := channelOrder(cmd.Flags())
- version, _ := cmd.Flags().GetString(FlagIBCVersion)
-
- msg := types.NewMsgChannelOpenInit(
- portID, version, order, hops,
- counterpartyPortID, clientCtx.GetFromAddress().String(),
- )
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ChannelOpenInit(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- cmd.Flags().Bool(FlagOrdered, true, "Pass flag for opening ordered channels")
- cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version")
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewChannelOpenTryCmd returns the command to create a MsgChannelOpenTry transaction
-func NewChannelOpenTryCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "open-try [port-id] [channel-id] [counterparty-port-id] [counterparty-channel-id] [connection-hops] [/path/to/proof_init.json] [proof-height]",
- Short: "Creates and sends a ChannelOpenTry message",
- Args: cobra.ExactArgs(7),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- portID := args[0]
- channelID := args[1]
- counterpartyPortID := args[2]
- counterpartyChannelID := args[3]
- hops := strings.Split(args[4], "/")
- order := channelOrder(cmd.Flags())
-
- // TODO: Differentiate between channel and counterparty versions.
- version, _ := cmd.Flags().GetString(FlagIBCVersion)
-
- proofInit, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[5])
- if err != nil {
- return err
- }
-
- proofHeight, err := clienttypes.ParseHeight(args[6])
- if err != nil {
- return err
- }
-
- msg := types.NewMsgChannelOpenTry(
- portID, channelID, version, order, hops,
- counterpartyPortID, counterpartyChannelID, version,
- proofInit, proofHeight, clientCtx.GetFromAddress().String(),
- )
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ChannelOpenTry(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- cmd.Flags().Bool(FlagOrdered, true, "Pass flag for opening ordered channels")
- cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version")
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewChannelOpenAckCmd returns the command to create a MsgChannelOpenAck transaction
-func NewChannelOpenAckCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "open-ack [port-id] [channel-id] [counterparty-channel-id] [/path/to/proof_try.json] [proof-height]",
- Short: "Creates and sends a ChannelOpenAck message",
- Args: cobra.ExactArgs(5),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- portID := args[0]
- channelID := args[1]
- counterpartyChannelID := args[2]
-
- // TODO: Differentiate between channel and counterparty versions.
- version, _ := cmd.Flags().GetString(FlagIBCVersion)
-
- proofTry, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[3])
- if err != nil {
- return err
- }
-
- proofHeight, err := clienttypes.ParseHeight(args[4])
- if err != nil {
- return err
- }
-
- msg := types.NewMsgChannelOpenAck(
- portID, channelID, counterpartyChannelID, version, proofTry, proofHeight, clientCtx.GetFromAddress().String(),
- )
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ChannelOpenAck(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
- cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version")
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewChannelOpenConfirmCmd returns the command to create a MsgChannelOpenConfirm transaction
-func NewChannelOpenConfirmCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "open-confirm [port-id] [channel-id] [/path/to/proof_ack.json] [proof-height]",
- Short: "Creates and sends a ChannelOpenConfirm message",
- Args: cobra.ExactArgs(4),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- portID := args[0]
- channelID := args[1]
-
- proofAck, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[2])
- if err != nil {
- return err
- }
-
- proofHeight, err := clienttypes.ParseHeight(args[3])
- if err != nil {
- return err
- }
-
- msg := types.NewMsgChannelOpenConfirm(
- portID, channelID, proofAck, proofHeight, clientCtx.GetFromAddress().String(),
- )
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ChannelOpenConfirm(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewChannelCloseInitCmd returns the command to create a MsgChannelCloseInit transaction
-func NewChannelCloseInitCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "close-init [port-id] [channel-id]",
- Short: "Creates and sends a ChannelCloseInit message",
- Args: cobra.ExactArgs(2),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- portID := args[0]
- channelID := args[1]
-
- msg := types.NewMsgChannelCloseInit(portID, channelID, clientCtx.GetFromAddress().String())
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ChannelCloseInit(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-// NewChannelCloseConfirmCmd returns the command to create a MsgChannelCloseConfirm transaction
-func NewChannelCloseConfirmCmd() *cobra.Command {
- cmd := &cobra.Command{
- Use: "close-confirm [port-id] [channel-id] [/path/to/proof_init.json] [proof-height]",
- Short: "Creates and sends a ChannelCloseConfirm message",
- Args: cobra.ExactArgs(4),
- RunE: func(cmd *cobra.Command, args []string) error {
- clientCtx, err := client.GetClientTxContext(cmd)
- if err != nil {
- return err
- }
- portID := args[0]
- channelID := args[1]
-
- proofInit, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[2])
- if err != nil {
- return err
- }
-
- proofHeight, err := clienttypes.ParseHeight(args[3])
- if err != nil {
- return err
- }
-
- msg := types.NewMsgChannelCloseConfirm(
- portID, channelID, proofInit, proofHeight, clientCtx.GetFromAddress().String(),
- )
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.ChannelCloseConfirm(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
- },
- }
-
- flags.AddTxFlagsToCmd(cmd)
-
- return cmd
-}
-
-func channelOrder(fs *pflag.FlagSet) types.Order {
- if ordered, _ := fs.GetBool(FlagOrdered); ordered {
- return types.ORDERED
- }
-
- return types.UNORDERED
-}
diff --git a/modules/core/client/cli/cli.go b/modules/core/client/cli/cli.go
index 4a7054fb..687806b1 100644
--- a/modules/core/client/cli/cli.go
+++ b/modules/core/client/cli/cli.go
@@ -22,7 +22,6 @@ func GetTxCmd() *cobra.Command {
ibcTxCmd.AddCommand(
ibcclient.GetTxCmd(),
- connection.GetTxCmd(),
channel.GetTxCmd(),
)
diff --git a/testing/sdk_test.go b/testing/sdk_test.go
index 2136dd07..2375a2bc 100644
--- a/testing/sdk_test.go
+++ b/testing/sdk_test.go
@@ -29,7 +29,6 @@ import (
dbm "github.com/tendermint/tm-db"
ibcclientcli "github.com/cosmos/ibc-go/modules/core/02-client/client/cli"
- ibccli "github.com/cosmos/ibc-go/modules/core/04-channel/client/cli"
"github.com/cosmos/ibc-go/testing/simapp"
)
@@ -202,6 +201,11 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
`{"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}`,
)
+ badClientStateJSON := testutil.WriteToNewTempFile(
+ s.T(),
+ `{"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"DIFFERENT","timestamp":"10"},"allow_update_after_proposal":false}`,
+ )
+
// Write consensus json to temp file, used for an IBC message.
// Generated by printing the result of cdc.MarshalIntefaceJSON on
// a solo machine consensus state
@@ -218,10 +222,10 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
}{
{
"Failing IBC message",
- ibccli.NewChannelCloseInitCmd(),
+ ibcclientcli.NewCreateClientCmd(),
[]string{
- "121", // dummy port-id
- "channel-0", // dummy channel-id
+ badClientStateJSON.Name(), // path to client state json
+ consensusJSON.Name(), // path to consensus json,
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
@@ -229,7 +233,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
fmt.Sprintf("--%s=foobar", flags.FlagMemo),
},
- uint32(7),
+ uint32(8),
},
{
"Successful IBC message",
From e12261ce54efb238229316003fe62dc0d0ab2e2e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 13 Apr 2021 11:58:57 +0200
Subject: [PATCH 032/393] update README (#110)
* update README
* fix formatting
---
README.md | 40 +++++++++++++++++++++++++---------------
1 file changed, 25 insertions(+), 15 deletions(-)
diff --git a/README.md b/README.md
index 6eaf9931..6388b452 100644
--- a/README.md
+++ b/README.md
@@ -29,28 +29,38 @@
-Interblockchain communication protocol (IBC) implementation in Golang built as a SDK module.
+Interblockchain communication protocol (IBC) implementation in Golang built as a SDK module.
-## Components
+## Contents
-### Core
+1. **[Core IBC Implementation](https://github.com/cosmos/ibc-go/tree/main/modules/core)**
-The `core/` directory contains the SDK IBC module that SDK based chains must integrate in order to utilize this implementation of IBC.
-It handles the core components of IBC including clients, connection, channels, packets, acknowledgements, and timeouts.
+ 1.1 [ICS 02 Client](https://github.com/cosmos/ibc-go/tree/main/modules/core/02-client)
-### Applications
+ 1.2 [ICS 03 Connection](https://github.com/cosmos/ibc-go/tree/main/modules/core/03-connection)
-Applications can be built as modules to utilize core IBC by fulfilling a set of callbacks.
-Fungible Token Transfers is currently the only supported application module.
+ 1.3 [ICS 04 Channel](https://github.com/cosmos/ibc-go/tree/main/modules/core/04-channel)
-### IBC Light Clients
+ 1.4 [ICS 05 Port](https://github.com/cosmos/ibc-go/tree/main/modules/core/05-port)
-IBC light clients are on-chain implementations of an off-chain light clients.
-This repository currently supports tendermint and solo-machine light clients.
-The localhost client is currently non-functional.
+ 1.5 [ICS 23 Commitment](https://github.com/cosmos/ibc-go/tree/main/modules/core/23-commitment/types)
-## Docs
+ 1.6 [ICS 24 Host](https://github.com/cosmos/ibc-go/tree/main/modules/core/24-host)
-Please see our [documentation](docs/README.md) for more information.
+2. **Applications**
-Checkout the [IBC website](https://ibcprotocol.org/).
+ 2.1 [ICS 20 Fungible Token Transfers](https://github.com/cosmos/ibc-go/tree/main/modules/apps/transfer)
+
+3. **Light Clients**
+
+ 3.1 [ICS 07 Tendermint](https://github.com/cosmos/ibc-go/tree/main/modules/light-clients/07-tendermint)
+
+ 3.2 [ICS 06 Solo Machine](https://github.com/cosmos/ibc-go/tree/main/modules/light-clients/06-solomachine)
+
+Note: The localhost client is currently non-functional.
+
+## Resources
+
+- [IBC Website](https://ibcprotocol.org/)
+- [IBC Specification](https://github.com/cosmos/ibc)
+- [Documentation](docs/README.md)
From 0174312bc7ed1a7e84087aa45263748cf47d8b6b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 13 Apr 2021 10:05:25 +0000
Subject: [PATCH 033/393] Bump actions/cache from v2.1.4 to v2.1.5 (#117)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [actions/cache](https://github.com/actions/cache) from v2.1.4 to v2.1.5.
- [Release notes](https://github.com/actions/cache/releases)
- [Commits](https://github.com/actions/cache/compare/v2.1.4...1a9e2138d905efd099035b49d8b7a3888c653ca8)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index b2354e79..452c41f3 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -26,7 +26,7 @@ jobs:
- name: install tparse
run: |
export GO111MODULE="on" && go get github.com/mfridman/tparse@v0.8.3
- - uses: actions/cache@v2.1.4
+ - uses: actions/cache@v2.1.5
with:
path: ~/go/bin
key: ${{ runner.os }}-go-tparse-binary
From 7e673b9dad207ee663e50684877211fcc9ea72f6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 14 Apr 2021 18:01:11 +0200
Subject: [PATCH 034/393] Fix solo machine handshake verification bug (#120)
* modify solo machine verify functions to use a pointer to ensure the sequence is incremented
* update changelog
---
CHANGELOG.md | 4 +++
.../06-solomachine/types/client_state.go | 34 +++++++++----------
.../06-solomachine/types/client_state_test.go | 5 +++
3 files changed, 26 insertions(+), 17 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 696cc07c..3e92fea9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,6 +36,10 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
+### Bug Fixes
+
+* (modules/light-clients/06-solomachine) [\#120](https://github.com/cosmos/ibc-go/pull/120) Fix solo machine handshake verification bug.
+
### API Breaking
* (modules/core) [\#109](https://github.com/cosmos/ibc-go/pull/109) Remove connection and channel handshake CLI commands.
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index d008ac81..c208e23b 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -100,7 +100,7 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
// VerifyClientState verifies a proof of the client state of the running chain
// stored on the solo machine.
-func (cs ClientState) VerifyClientState(
+func (cs *ClientState) VerifyClientState(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -131,13 +131,13 @@ func (cs ClientState) VerifyClientState(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyClientConsensusState verifies a proof of the consensus state of the
// running chain stored on the solo machine.
-func (cs ClientState) VerifyClientConsensusState(
+func (cs *ClientState) VerifyClientConsensusState(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -169,13 +169,13 @@ func (cs ClientState) VerifyClientConsensusState(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyConnectionState verifies a proof of the connection state of the
// specified connection end stored on the target machine.
-func (cs ClientState) VerifyConnectionState(
+func (cs *ClientState) VerifyConnectionState(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -206,13 +206,13 @@ func (cs ClientState) VerifyConnectionState(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyChannelState verifies a proof of the channel state of the specified
// channel end, under the specified port, stored on the target machine.
-func (cs ClientState) VerifyChannelState(
+func (cs *ClientState) VerifyChannelState(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -244,13 +244,13 @@ func (cs ClientState) VerifyChannelState(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
// the specified port, specified channel, and specified sequence.
-func (cs ClientState) VerifyPacketCommitment(
+func (cs *ClientState) VerifyPacketCommitment(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -285,13 +285,13 @@ func (cs ClientState) VerifyPacketCommitment(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyPacketAcknowledgement verifies a proof of an incoming packet
// acknowledgement at the specified port, specified channel, and specified sequence.
-func (cs ClientState) VerifyPacketAcknowledgement(
+func (cs *ClientState) VerifyPacketAcknowledgement(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -326,14 +326,14 @@ func (cs ClientState) VerifyPacketAcknowledgement(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyPacketReceiptAbsence verifies a proof of the absence of an
// incoming packet receipt at the specified port, specified channel, and
// specified sequence.
-func (cs ClientState) VerifyPacketReceiptAbsence(
+func (cs *ClientState) VerifyPacketReceiptAbsence(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -367,13 +367,13 @@ func (cs ClientState) VerifyPacketReceiptAbsence(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
// received of the specified channel at the specified port.
-func (cs ClientState) VerifyNextSequenceRecv(
+func (cs *ClientState) VerifyNextSequenceRecv(
store sdk.KVStore,
cdc codec.BinaryMarshaler,
height exported.Height,
@@ -407,7 +407,7 @@ func (cs ClientState) VerifyNextSequenceRecv(
cs.Sequence++
cs.ConsensusState.Timestamp = timestamp
- setClientState(store, cdc, &cs)
+ setClientState(store, cdc, cs)
return nil
}
@@ -417,7 +417,7 @@ func (cs ClientState) VerifyNextSequenceRecv(
// along with the solo-machine sequence encoded in the proofHeight.
func produceVerificationArgs(
cdc codec.BinaryMarshaler,
- cs ClientState,
+ cs *ClientState,
height exported.Height,
prefix exported.Prefix,
proof []byte,
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index 6666f2d4..6c7661ef 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -247,6 +247,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientState() {
if tc.expPass {
suite.Require().NoError(err)
+ suite.Require().Equal(expSeq, tc.clientState.Sequence)
suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %s", suite.GetSequenceFromStore(), tc.name)
} else {
suite.Require().Error(err)
@@ -375,6 +376,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
if tc.expPass {
suite.Require().NoError(err)
+ suite.Require().Equal(expSeq, tc.clientState.Sequence)
suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %s", suite.GetSequenceFromStore(), tc.name)
} else {
suite.Require().Error(err)
@@ -465,6 +467,7 @@ func (suite *SoloMachineTestSuite) TestVerifyConnectionState() {
if tc.expPass {
suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, tc.clientState.Sequence)
suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
} else {
suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
@@ -554,6 +557,7 @@ func (suite *SoloMachineTestSuite) TestVerifyChannelState() {
if tc.expPass {
suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, tc.clientState.Sequence)
suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
} else {
suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
@@ -903,6 +907,7 @@ func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() {
if tc.expPass {
suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expSeq, tc.clientState.Sequence)
suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name)
} else {
suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
From 5d3a80060ed4ed78752596103d8200c3c07337cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 14 Apr 2021 18:53:39 +0200
Subject: [PATCH 035/393] fix solo machine merkle prefix casting bug (#122)
* fix merkle prefix casting bug, credit: @devashishdxt
* update changelog
* update docs
---
CHANGELOG.md | 2 ++
modules/light-clients/06-solomachine/spec/01_concepts.md | 3 +++
modules/light-clients/06-solomachine/types/client_state.go | 2 +-
.../light-clients/06-solomachine/types/client_state_test.go | 6 ++++--
modules/light-clients/06-solomachine/types/codec_test.go | 4 ++--
5 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3e92fea9..1df5346f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,8 +38,10 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (modules/light-clients/06-solomachine) [\#122](https://github.com/cosmos/ibc-go/pull/122) Fix solo machine merkle prefix casting bug.
* (modules/light-clients/06-solomachine) [\#120](https://github.com/cosmos/ibc-go/pull/120) Fix solo machine handshake verification bug.
+
### API Breaking
* (modules/core) [\#109](https://github.com/cosmos/ibc-go/pull/109) Remove connection and channel handshake CLI commands.
diff --git a/modules/light-clients/06-solomachine/spec/01_concepts.md b/modules/light-clients/06-solomachine/spec/01_concepts.md
index de486b71..a121803f 100644
--- a/modules/light-clients/06-solomachine/spec/01_concepts.md
+++ b/modules/light-clients/06-solomachine/spec/01_concepts.md
@@ -108,6 +108,9 @@ timestampedSignatureData := &types.TimestampedSignatureData{
proof, err := cdc.MarshalBinaryBare(timestampedSignatureData)
```
+NOTE: At the end of this process, the sequence associated with the key needs to be updated.
+The sequence must be incremented each time proof is generated.
+
## Updates By Header
An update by a header will only succeed if:
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index c208e23b..efa740ca 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -435,7 +435,7 @@ func produceVerificationArgs(
return nil, nil, 0, 0, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty")
}
- _, ok := prefix.(commitmenttypes.MerklePrefix)
+ _, ok := prefix.(*commitmenttypes.MerklePrefix)
if !ok {
return nil, nil, 0, 0, sdkerrors.Wrapf(commitmenttypes.ErrInvalidPrefix, "invalid prefix type %T, expected MerklePrefix", prefix)
}
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index 6c7661ef..654ab1ba 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -19,7 +19,9 @@ const (
)
var (
- prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
+ prefix = &commitmenttypes.MerklePrefix{
+ KeyPrefix: []byte("ibc"),
+ }
consensusHeight = clienttypes.ZeroHeight()
)
@@ -387,7 +389,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
}
func (suite *SoloMachineTestSuite) TestVerifyConnectionState() {
- counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix)
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, *prefix)
conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0)
path := suite.solomachine.GetConnectionStatePath(testConnectionID)
diff --git a/modules/light-clients/06-solomachine/types/codec_test.go b/modules/light-clients/06-solomachine/types/codec_test.go
index d4589be6..e635cb8c 100644
--- a/modules/light-clients/06-solomachine/types/codec_test.go
+++ b/modules/light-clients/06-solomachine/types/codec_test.go
@@ -67,7 +67,7 @@ func (suite SoloMachineTestSuite) TestUnmarshalDataByType() {
},
{
"connection", types.CONNECTION, func() {
- counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix)
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, *prefix)
conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0)
path := solomachine.GetConnectionStatePath("connectionID")
@@ -98,7 +98,7 @@ func (suite SoloMachineTestSuite) TestUnmarshalDataByType() {
},
{
"bad channel (uses connection data)", types.CHANNEL, func() {
- counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix)
+ counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, *prefix)
conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0)
path := solomachine.GetConnectionStatePath("connectionID")
From 39ba1af723190a5acae18cd58c3cbf94170250fb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 19 Apr 2021 12:15:09 +0200
Subject: [PATCH 036/393] Bump codecov/codecov-action from v1.3.2 to v1.4.0
(#126)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from v1.3.2 to v1.4.0.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.3.2...0e28ff86a50029a44d10df6ed4c308711925a6a8)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 452c41f3..41163003 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -150,7 +150,7 @@ jobs:
sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
done
if: env.GIT_DIFF
- - uses: codecov/codecov-action@v1.3.2
+ - uses: codecov/codecov-action@v1.4.0
with:
file: ./coverage.txt
if: env.GIT_DIFF
From 525ffc4d0143d9836b7981f3036d559503c38244 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 20 Apr 2021 16:44:20 +0200
Subject: [PATCH 037/393] add code of conduct (#130)
---
CODE_OF_CONDUCT.md | 46 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 46 insertions(+)
create mode 100644 CODE_OF_CONDUCT.md
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..c9f9cc24
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at community@interchain.io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
From 3a61a65d245c180839a1d5ba0609fb65201168ff Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 21 Apr 2021 09:30:45 +0000
Subject: [PATCH 038/393] Bump codecov/codecov-action from v1.4.0 to v1.4.1
(#132)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from v1.4.0 to v1.4.1.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.4.0...967e2b38a85a62bd61be5529ada27ebc109948c2)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 41163003..7e70a0c4 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -150,7 +150,7 @@ jobs:
sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
done
if: env.GIT_DIFF
- - uses: codecov/codecov-action@v1.4.0
+ - uses: codecov/codecov-action@v1.4.1
with:
file: ./coverage.txt
if: env.GIT_DIFF
From db6f316cb044aec21636c2e83f7926ba6c2edfe4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 22 Apr 2021 11:38:44 +0200
Subject: [PATCH 039/393] IBC Testing Package v2 (#114)
* create global time under coordinator
* fix test
* various fixes to testing package
* initial design work for testing package v2
* add channel support
* fix handshake endpoint functions, SetupNew now works
* fix build
* update simapp func
* add configs
* fix tests, remove unnecessary code
* fix tests
* add send, recv, ack funcs for endpoint, add lots of todos
* apply review comments
* update all light clients to use testing package v2
* refactor handle recv in msg server
* finish msg server tests
* ibc testing part 2 (#127)
* remove old testing code, update light client tests, update part of transfer tests
* fix transfer tests
* remove old code from testing package
* refactor 02-client tests
* refactor 03-connection
* all tests passing
* self review fixes
* ignore testing package for codecov
* Update modules/core/03-connection/keeper/verify_test.go
* IBC Testing Refactor Part 3 (#129)
* IBCKeeper reference into an interface function
* move to testing app interface wip
* add rest of interface functions, fix tests
* finish making testing app modular
* ibc testing refactor part 4 (#133)
* update README
* test modularity with transfer, update README, minor fixes
---
.codecov.yml | 1 +
modules/apps/transfer/handler_test.go | 63 +-
modules/apps/transfer/keeper/genesis_test.go | 6 +-
.../apps/transfer/keeper/grpc_query_test.go | 4 +-
modules/apps/transfer/keeper/keeper_test.go | 14 +-
.../apps/transfer/keeper/mbt_relay_test.go | 27 +-
modules/apps/transfer/keeper/params_test.go | 6 +-
modules/apps/transfer/keeper/relay_test.go | 157 +++--
modules/apps/transfer/module_test.go | 75 +--
modules/core/02-client/abci_test.go | 12 +-
modules/core/02-client/keeper/client_test.go | 56 +-
.../core/02-client/keeper/grpc_query_test.go | 15 +-
modules/core/02-client/keeper/keeper_test.go | 44 +-
modules/core/02-client/keeper/params_test.go | 6 +-
.../core/02-client/keeper/proposal_test.go | 52 +-
.../core/02-client/proposal_handler_test.go | 26 +-
modules/core/02-client/types/client_test.go | 8 +-
modules/core/02-client/types/genesis_test.go | 9 +-
modules/core/02-client/types/proposal_test.go | 19 +-
.../03-connection/keeper/grpc_query_test.go | 95 +--
.../03-connection/keeper/handshake_test.go | 346 +++++-----
.../core/03-connection/keeper/keeper_test.go | 78 ++-
.../core/03-connection/keeper/verify_test.go | 119 ++--
.../core/04-channel/keeper/grpc_query_test.go | 265 ++++----
.../core/04-channel/keeper/handshake_test.go | 510 +++++++-------
modules/core/04-channel/keeper/keeper_test.go | 195 +++---
modules/core/04-channel/keeper/packet_test.go | 507 +++++++-------
.../core/04-channel/keeper/timeout_test.go | 265 ++++----
modules/core/genesis_test.go | 16 +-
modules/core/keeper/msg_server_test.go | 323 +++++----
.../06-solomachine/types/client_state_test.go | 14 +-
.../types/proposal_handle_test.go | 4 +-
.../06-solomachine/types/solomachine_test.go | 2 +-
.../07-tendermint/types/client_state_test.go | 94 +--
.../07-tendermint/types/genesis_test.go | 8 +-
.../types/misbehaviour_handle_test.go | 6 +-
.../types/proposal_handle_test.go | 79 +--
.../07-tendermint/types/store_test.go | 43 +-
.../07-tendermint/types/update_test.go | 4 +-
.../07-tendermint/types/upgrade_test.go | 125 ++--
testing/README.md | 289 ++++++++
testing/app.go | 136 ++++
testing/chain.go | 526 ++-------------
testing/config.go | 65 ++
testing/coordinator.go | 628 +++---------------
testing/endpoint.go | 464 +++++++++++++
testing/events.go | 56 ++
testing/mock/mock.go | 17 +-
testing/path.go | 75 +++
testing/simapp/app.go | 33 +-
testing/simapp/sim_test.go | 2 +-
testing/simapp/test_helpers.go | 105 +--
testing/solomachine.go | 2 -
testing/types.go | 44 --
testing/values.go | 58 ++
55 files changed, 3290 insertions(+), 2908 deletions(-)
create mode 100644 testing/README.md
create mode 100644 testing/app.go
create mode 100644 testing/config.go
create mode 100644 testing/endpoint.go
create mode 100644 testing/events.go
create mode 100644 testing/path.go
delete mode 100644 testing/types.go
create mode 100644 testing/values.go
diff --git a/.codecov.yml b/.codecov.yml
index 7935dfdb..88f51441 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -53,4 +53,5 @@ ignore:
- "modules/**/**/**/*.pb.go"
- "modules/**/**/**/*.pb.gw.go"
- "modules/**/**/**/test_common.go"
+ - "testing/"
- "scripts/"
diff --git a/modules/apps/transfer/handler_test.go b/modules/apps/transfer/handler_test.go
index 976ecb2e..030fb1bd 100644
--- a/modules/apps/transfer/handler_test.go
+++ b/modules/apps/transfer/handler_test.go
@@ -9,7 +9,6 @@ import (
"github.com/cosmos/ibc-go/modules/apps/transfer/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -31,90 +30,102 @@ func (suite *TransferTestSuite) SetupTest() {
suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(2))
}
+func NewTransferPath(chainA, chainB *ibctesting.TestChain) *ibctesting.Path {
+ path := ibctesting.NewPath(chainA, chainB)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.TransferPort
+ path.EndpointB.ChannelConfig.PortID = ibctesting.TransferPort
+
+ return path
+}
+
// constructs a send from chainA to chainB on the established channel/connection
// and sends the same coin back from chainB to chainA.
func (suite *TransferTestSuite) TestHandleMsgTransfer() {
// setup between chainA and chainB
- clientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB := suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
- // originalBalance := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom)
+ path := NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+
+ // originalBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom)
timeoutHeight := clienttypes.NewHeight(0, 110)
coinToSendToB := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
// send from chainA to chainB
- msg := types.NewMsgTransfer(channelA.PortID, channelA.ID, coinToSendToB, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+ msg := types.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, coinToSendToB, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
- err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, clientB, msg)
+ _, err := suite.chainA.SendMsgs(msg)
suite.Require().NoError(err) // message committed
// relay send
fungibleTokenPacket := types.NewFungibleTokenPacketData(coinToSendToB.Denom, coinToSendToB.Amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
- packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
- err = suite.coordinator.RelayPacket(suite.chainA, suite.chainB, clientA, clientB, packet, ack.Acknowledgement())
+ err = path.RelayPacket(packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
// check that voucher exists on chain B
voucherDenomTrace := types.ParseDenomTrace(types.GetPrefixedDenom(packet.GetDestPort(), packet.GetDestChannel(), sdk.DefaultBondDenom))
- balance := suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom())
+ balance := suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom())
- coinSentFromAToB := types.GetTransferCoin(channelB.PortID, channelB.ID, sdk.DefaultBondDenom, 100)
+ coinSentFromAToB := types.GetTransferCoin(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sdk.DefaultBondDenom, 100)
suite.Require().Equal(coinSentFromAToB, balance)
// setup between chainB to chainC
- clientOnBForC, clientOnCForB, connOnBForC, connOnCForB := suite.coordinator.SetupClientConnections(suite.chainB, suite.chainC, exported.Tendermint)
- channelOnBForC, channelOnCForB := suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connOnBForC, connOnCForB, channeltypes.UNORDERED)
+ // NOTE:
+ // pathBtoC.EndpointA = endpoint on chainB
+ // pathBtoC.EndpointB = endpoint on chainC
+ pathBtoC := NewTransferPath(suite.chainB, suite.chainC)
+ suite.coordinator.Setup(pathBtoC)
// send from chainB to chainC
- msg = types.NewMsgTransfer(channelOnBForC.PortID, channelOnBForC.ID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+ msg = types.NewMsgTransfer(pathBtoC.EndpointA.ChannelConfig.PortID, pathBtoC.EndpointA.ChannelID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0)
- err = suite.coordinator.SendMsg(suite.chainB, suite.chainC, clientOnCForB, msg)
+ _, err = suite.chainB.SendMsgs(msg)
suite.Require().NoError(err) // message committed
// relay send
// NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment
- fullDenomPath := types.GetPrefixedDenom(channelOnCForB.PortID, channelOnCForB.ID, voucherDenomTrace.GetFullDenomPath())
+ fullDenomPath := types.GetPrefixedDenom(pathBtoC.EndpointB.ChannelConfig.PortID, pathBtoC.EndpointB.ChannelID, voucherDenomTrace.GetFullDenomPath())
fungibleTokenPacket = types.NewFungibleTokenPacketData(voucherDenomTrace.GetFullDenomPath(), coinSentFromAToB.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String())
- packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnBForC.PortID, channelOnBForC.ID, channelOnCForB.PortID, channelOnCForB.ID, timeoutHeight, 0)
- err = suite.coordinator.RelayPacket(suite.chainB, suite.chainC, clientOnBForC, clientOnCForB, packet, ack.Acknowledgement())
+ packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, pathBtoC.EndpointA.ChannelConfig.PortID, pathBtoC.EndpointA.ChannelID, pathBtoC.EndpointB.ChannelConfig.PortID, pathBtoC.EndpointB.ChannelID, timeoutHeight, 0)
+ err = pathBtoC.RelayPacket(packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
coinSentFromBToC := sdk.NewInt64Coin(types.ParseDenomTrace(fullDenomPath).IBCDenom(), 100)
- balance = suite.chainC.App.BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), coinSentFromBToC.Denom)
+ balance = suite.chainC.GetSimApp().BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), coinSentFromBToC.Denom)
// check that the balance is updated on chainC
suite.Require().Equal(coinSentFromBToC, balance)
// check that balance on chain B is empty
- balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromBToC.Denom)
+ balance = suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromBToC.Denom)
suite.Require().Zero(balance.Amount.Int64())
// send from chainC back to chainB
- msg = types.NewMsgTransfer(channelOnCForB.PortID, channelOnCForB.ID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
+ msg = types.NewMsgTransfer(pathBtoC.EndpointB.ChannelConfig.PortID, pathBtoC.EndpointB.ChannelID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0)
- err = suite.coordinator.SendMsg(suite.chainC, suite.chainB, clientOnBForC, msg)
+ _, err = suite.chainC.SendMsgs(msg)
suite.Require().NoError(err) // message committed
// relay send
// NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment
fungibleTokenPacket = types.NewFungibleTokenPacketData(fullDenomPath, coinSentFromBToC.Amount.Uint64(), suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
- packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnCForB.PortID, channelOnCForB.ID, channelOnBForC.PortID, channelOnBForC.ID, timeoutHeight, 0)
- err = suite.coordinator.RelayPacket(suite.chainC, suite.chainB, clientOnCForB, clientOnBForC, packet, ack.Acknowledgement())
+ packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, pathBtoC.EndpointB.ChannelConfig.PortID, pathBtoC.EndpointB.ChannelID, pathBtoC.EndpointA.ChannelConfig.PortID, pathBtoC.EndpointA.ChannelID, timeoutHeight, 0)
+ err = pathBtoC.RelayPacket(packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
- balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom)
+ balance = suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom)
// check that the balance on chainA returned back to the original state
suite.Require().Equal(coinSentFromAToB, balance)
// check that module account escrow address is empty
escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel())
- balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), escrowAddress, sdk.DefaultBondDenom)
+ balance = suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), escrowAddress, sdk.DefaultBondDenom)
suite.Require().Equal(sdk.NewCoin(sdk.DefaultBondDenom, sdk.ZeroInt()), balance)
// check that balance on chain B is empty
- balance = suite.chainC.App.BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom())
+ balance = suite.chainC.GetSimApp().BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom())
suite.Require().Zero(balance.Amount.Int64())
}
diff --git a/modules/apps/transfer/keeper/genesis_test.go b/modules/apps/transfer/keeper/genesis_test.go
index 7dfffbdb..19e5dfe4 100644
--- a/modules/apps/transfer/keeper/genesis_test.go
+++ b/modules/apps/transfer/keeper/genesis_test.go
@@ -25,15 +25,15 @@ func (suite *KeeperTestSuite) TestGenesis() {
Path: path,
}
traces = append(types.Traces{denomTrace}, traces...)
- suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), denomTrace)
+ suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), denomTrace)
}
- genesis := suite.chainA.App.TransferKeeper.ExportGenesis(suite.chainA.GetContext())
+ genesis := suite.chainA.GetSimApp().TransferKeeper.ExportGenesis(suite.chainA.GetContext())
suite.Require().Equal(types.PortID, genesis.PortId)
suite.Require().Equal(traces.Sort(), genesis.DenomTraces)
suite.Require().NotPanics(func() {
- suite.chainA.App.TransferKeeper.InitGenesis(suite.chainA.GetContext(), *genesis)
+ suite.chainA.GetSimApp().TransferKeeper.InitGenesis(suite.chainA.GetContext(), *genesis)
})
}
diff --git a/modules/apps/transfer/keeper/grpc_query_test.go b/modules/apps/transfer/keeper/grpc_query_test.go
index ca98bd77..d5ffe692 100644
--- a/modules/apps/transfer/keeper/grpc_query_test.go
+++ b/modules/apps/transfer/keeper/grpc_query_test.go
@@ -44,7 +44,7 @@ func (suite *KeeperTestSuite) TestQueryDenomTrace() {
func() {
expTrace.Path = "transfer/channelToA/transfer/channelToB"
expTrace.BaseDenom = "uatom"
- suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), expTrace)
+ suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), expTrace)
req = &types.QueryDenomTraceRequest{
Hash: expTrace.Hash().String(),
@@ -100,7 +100,7 @@ func (suite *KeeperTestSuite) TestQueryDenomTraces() {
expTraces = append(expTraces, types.DenomTrace{Path: "transfer/channelToA/transfer/channelToB", BaseDenom: "uatom"})
for _, trace := range expTraces {
- suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), trace)
+ suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), trace)
}
req = &types.QueryDenomTracesRequest{
diff --git a/modules/apps/transfer/keeper/keeper_test.go b/modules/apps/transfer/keeper/keeper_test.go
index 8c90f186..de3902df 100644
--- a/modules/apps/transfer/keeper/keeper_test.go
+++ b/modules/apps/transfer/keeper/keeper_test.go
@@ -31,15 +31,23 @@ func (suite *KeeperTestSuite) SetupTest() {
suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(2))
- queryHelper := baseapp.NewQueryServerTestHelper(suite.chainA.GetContext(), suite.chainA.App.InterfaceRegistry())
- types.RegisterQueryServer(queryHelper, suite.chainA.App.TransferKeeper)
+ queryHelper := baseapp.NewQueryServerTestHelper(suite.chainA.GetContext(), suite.chainA.GetSimApp().InterfaceRegistry())
+ types.RegisterQueryServer(queryHelper, suite.chainA.GetSimApp().TransferKeeper)
suite.queryClient = types.NewQueryClient(queryHelper)
}
+func NewTransferPath(chainA, chainB *ibctesting.TestChain) *ibctesting.Path {
+ path := ibctesting.NewPath(chainA, chainB)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.TransferPort
+ path.EndpointB.ChannelConfig.PortID = ibctesting.TransferPort
+
+ return path
+}
+
func (suite *KeeperTestSuite) TestGetTransferAccount() {
expectedMaccAddr := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName)))
- macc := suite.chainA.App.TransferKeeper.GetTransferAccount(suite.chainA.GetContext())
+ macc := suite.chainA.GetSimApp().TransferKeeper.GetTransferAccount(suite.chainA.GetContext())
suite.Require().NotNil(macc)
suite.Require().Equal(types.ModuleName, macc.GetName())
diff --git a/modules/apps/transfer/keeper/mbt_relay_test.go b/modules/apps/transfer/keeper/mbt_relay_test.go
index 4130845c..db425f24 100644
--- a/modules/apps/transfer/keeper/mbt_relay_test.go
+++ b/modules/apps/transfer/keeper/mbt_relay_test.go
@@ -18,7 +18,6 @@ import (
"github.com/cosmos/ibc-go/modules/apps/transfer/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -251,10 +250,10 @@ func (bank *Bank) NonZeroString() string {
// Construct a bank out of the chain bank
func BankOfChain(chain *ibctesting.TestChain) Bank {
bank := MakeBank()
- chain.App.BankKeeper.IterateAllBalances(chain.GetContext(), func(address sdk.AccAddress, coin sdk.Coin) (stop bool) {
+ chain.GetSimApp().BankKeeper.IterateAllBalances(chain.GetContext(), func(address sdk.AccAddress, coin sdk.Coin) (stop bool) {
fullDenom := coin.Denom
if strings.HasPrefix(coin.Denom, "ibc/") {
- fullDenom, _ = chain.App.TransferKeeper.DenomPathFromHash(chain.GetContext(), coin.Denom)
+ fullDenom, _ = chain.GetSimApp().TransferKeeper.DenomPathFromHash(chain.GetContext(), coin.Denom)
}
bank.SetBalance(address.String(), fullDenom, coin.Amount)
return false
@@ -295,18 +294,18 @@ func (suite *KeeperTestSuite) TestModelBasedRelay() {
}
suite.SetupTest()
- _, _, connAB, connBA := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, connBC, connCB := suite.coordinator.SetupClientConnections(suite.chainB, suite.chainC, exported.Tendermint)
- suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connAB, connBA, channeltypes.UNORDERED)
- suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connBC, connCB, channeltypes.UNORDERED)
+ pathAtoB := NewTransferPath(suite.chainA, suite.chainB)
+ pathBtoC := NewTransferPath(suite.chainB, suite.chainC)
+ suite.coordinator.Setup(pathAtoB)
+ suite.coordinator.Setup(pathBtoC)
for i, tlaTc := range tlaTestCases {
tc := OnRecvPacketTestCaseFromTla(tlaTc)
registerDenom := func() {
denomTrace := types.ParseDenomTrace(tc.packet.Data.Denom)
traceHash := denomTrace.Hash()
- if !suite.chainB.App.TransferKeeper.HasDenomTrace(suite.chainB.GetContext(), traceHash) {
- suite.chainB.App.TransferKeeper.SetDenomTrace(suite.chainB.GetContext(), denomTrace)
+ if !suite.chainB.GetSimApp().TransferKeeper.HasDenomTrace(suite.chainB.GetContext(), traceHash) {
+ suite.chainB.GetSimApp().TransferKeeper.SetDenomTrace(suite.chainB.GetContext(), denomTrace)
}
}
@@ -334,7 +333,7 @@ func (suite *KeeperTestSuite) TestModelBasedRelay() {
denom := denomTrace.IBCDenom()
err = sdk.ValidateDenom(denom)
if err == nil {
- err = suite.chainB.App.TransferKeeper.SendTransfer(
+ err = suite.chainB.GetSimApp().TransferKeeper.SendTransfer(
suite.chainB.GetContext(),
tc.packet.SourcePort,
tc.packet.SourceChannel,
@@ -345,17 +344,17 @@ func (suite *KeeperTestSuite) TestModelBasedRelay() {
0)
}
case "OnRecvPacket":
- err = suite.chainB.App.TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, tc.packet.Data)
+ err = suite.chainB.GetSimApp().TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, tc.packet.Data)
case "OnTimeoutPacket":
registerDenom()
- err = suite.chainB.App.TransferKeeper.OnTimeoutPacket(suite.chainB.GetContext(), packet, tc.packet.Data)
+ err = suite.chainB.GetSimApp().TransferKeeper.OnTimeoutPacket(suite.chainB.GetContext(), packet, tc.packet.Data)
case "OnRecvAcknowledgementResult":
- err = suite.chainB.App.TransferKeeper.OnAcknowledgementPacket(
+ err = suite.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket(
suite.chainB.GetContext(), packet, tc.packet.Data,
channeltypes.NewResultAcknowledgement(nil))
case "OnRecvAcknowledgementError":
registerDenom()
- err = suite.chainB.App.TransferKeeper.OnAcknowledgementPacket(
+ err = suite.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket(
suite.chainB.GetContext(), packet, tc.packet.Data,
channeltypes.NewErrorAcknowledgement("MBT Error Acknowledgement"))
default:
diff --git a/modules/apps/transfer/keeper/params_test.go b/modules/apps/transfer/keeper/params_test.go
index ac680a41..a5d78005 100644
--- a/modules/apps/transfer/keeper/params_test.go
+++ b/modules/apps/transfer/keeper/params_test.go
@@ -5,11 +5,11 @@ import "github.com/cosmos/ibc-go/modules/apps/transfer/types"
func (suite *KeeperTestSuite) TestParams() {
expParams := types.DefaultParams()
- params := suite.chainA.App.TransferKeeper.GetParams(suite.chainA.GetContext())
+ params := suite.chainA.GetSimApp().TransferKeeper.GetParams(suite.chainA.GetContext())
suite.Require().Equal(expParams, params)
expParams.SendEnabled = false
- suite.chainA.App.TransferKeeper.SetParams(suite.chainA.GetContext(), expParams)
- params = suite.chainA.App.TransferKeeper.GetParams(suite.chainA.GetContext())
+ suite.chainA.GetSimApp().TransferKeeper.SetParams(suite.chainA.GetContext(), expParams)
+ params = suite.chainA.GetSimApp().TransferKeeper.GetParams(suite.chainA.GetContext())
suite.Require().Equal(expParams, params)
}
diff --git a/modules/apps/transfer/keeper/relay_test.go b/modules/apps/transfer/keeper/relay_test.go
index 020b132c..4c383f09 100644
--- a/modules/apps/transfer/keeper/relay_test.go
+++ b/modules/apps/transfer/keeper/relay_test.go
@@ -10,7 +10,6 @@ import (
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -18,9 +17,9 @@ import (
// chainA and coin that orignate on chainB
func (suite *KeeperTestSuite) TestSendTransfer() {
var (
- amount sdk.Coin
- channelA, channelB ibctesting.TestChannel
- err error
+ amount sdk.Coin
+ path *ibctesting.Path
+ err error
)
testCases := []struct {
@@ -31,37 +30,33 @@ func (suite *KeeperTestSuite) TestSendTransfer() {
}{
{"successful transfer from source chain",
func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ suite.coordinator.CreateTransferChannels(path)
amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
}, true, true},
{"successful transfer with coin from counterparty chain",
func() {
// send coin from chainA back to chainB
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
- amount = types.GetTransferCoin(channelA.PortID, channelA.ID, sdk.DefaultBondDenom, 100)
+ suite.coordinator.CreateTransferChannels(path)
+ amount = types.GetTransferCoin(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, sdk.DefaultBondDenom, 100)
}, false, true},
{"source channel not found",
func() {
// channel references wrong ID
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
- channelA.ID = ibctesting.InvalidID
+ suite.coordinator.CreateTransferChannels(path)
+ path.EndpointA.ChannelID = ibctesting.InvalidID
amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
}, true, false},
{"next seq send not found",
func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- channelB = suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ path.EndpointA.ChannelID = "channel-0"
+ path.EndpointB.ChannelID = "channel-0"
// manually create channel so next seq send is never set
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, channeltypes.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, ibctesting.DefaultChannelVersion),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, ibctesting.DefaultChannelVersion),
)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
}, true, false},
@@ -69,25 +64,22 @@ func (suite *KeeperTestSuite) TestSendTransfer() {
// - source chain
{"send coin failed",
func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ suite.coordinator.CreateTransferChannels(path)
amount = sdk.NewCoin("randomdenom", sdk.NewInt(100))
}, true, false},
// - receiving chain
{"send from module account failed",
func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
- amount = types.GetTransferCoin(channelA.PortID, channelA.ID, " randomdenom", 100)
+ suite.coordinator.CreateTransferChannels(path)
+ amount = types.GetTransferCoin(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, " randomdenom", 100)
}, false, false},
{"channel capability not found",
func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
- cap := suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.CreateTransferChannels(path)
+ cap := suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
// Release channel capability
- suite.chainA.App.ScopedTransferKeeper.ReleaseCapability(suite.chainA.GetContext(), cap)
+ suite.chainA.GetSimApp().ScopedTransferKeeper.ReleaseCapability(suite.chainA.GetContext(), cap)
amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
}, true, false},
}
@@ -97,31 +89,35 @@ func (suite *KeeperTestSuite) TestSendTransfer() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
+ path = NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
tc.malleate()
if !tc.sendFromSource {
// send coin from chainB to chainA
coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
- transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
- err = suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg)
+ transferMsg := types.NewMsgTransfer(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, coinFromBToA, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
+ _, err = suite.chainB.SendMsgs(transferMsg)
suite.Require().NoError(err) // message committed
// receive coin on chainA from chainB
fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String())
- packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0)
+ packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, clienttypes.NewHeight(0, 110), 0)
// get proof of packet commitment from chainB
+ err = path.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
- proof, proofHeight := suite.chainB.QueryProof(packetKey)
+ proof, proofHeight := path.EndpointB.QueryProof(packetKey)
recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
- err = suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, recvMsg)
+ _, err = suite.chainA.SendMsgs(recvMsg)
suite.Require().NoError(err) // message committed
}
- err = suite.chainA.App.TransferKeeper.SendTransfer(
- suite.chainA.GetContext(), channelA.PortID, channelA.ID, amount,
+ err = suite.chainA.GetSimApp().TransferKeeper.SendTransfer(
+ suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, amount,
suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0,
)
@@ -140,10 +136,9 @@ func (suite *KeeperTestSuite) TestSendTransfer() {
// malleate function allows for testing invalid cases.
func (suite *KeeperTestSuite) TestOnRecvPacket() {
var (
- channelA, channelB ibctesting.TestChannel
- trace types.DenomTrace
- amount sdk.Int
- receiver string
+ trace types.DenomTrace
+ amount sdk.Int
+ receiver string
)
testCases := []struct {
@@ -180,8 +175,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
- clientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ path := NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
receiver = suite.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate
amount = sdk.NewInt(100) // must be explicitly changed in malleate
@@ -190,36 +185,36 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() {
if tc.recvIsSource {
// send coin from chainB to chainA, receive them, acknowledge them, and send back to chainB
coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
- transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
- err := suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg)
+ transferMsg := types.NewMsgTransfer(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, coinFromBToA, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0)
+ _, err := suite.chainB.SendMsgs(transferMsg)
suite.Require().NoError(err) // message committed
// relay send packet
fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String())
- packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0)
+ packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, clienttypes.NewHeight(0, 110), 0)
ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
- err = suite.coordinator.RelayPacket(suite.chainB, suite.chainA, clientB, clientA, packet, ack.Acknowledgement())
+ err = path.RelayPacket(packet, ack.Acknowledgement())
suite.Require().NoError(err) // relay committed
seq++
// NOTE: trace must be explicitly changed in malleate to test invalid cases
- trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, sdk.DefaultBondDenom))
} else {
trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
}
// send coin from chainA to chainB
- transferMsg := types.NewMsgTransfer(channelA.PortID, channelA.ID, sdk.NewCoin(trace.IBCDenom(), amount), suite.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(0, 110), 0)
- err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, transferMsg)
+ transferMsg := types.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, sdk.NewCoin(trace.IBCDenom(), amount), suite.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(0, 110), 0)
+ _, err := suite.chainA.SendMsgs(transferMsg)
suite.Require().NoError(err) // message committed
tc.malleate()
data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), receiver)
- packet := channeltypes.NewPacket(data.GetBytes(), seq, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(data.GetBytes(), seq, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.NewHeight(0, 100), 0)
- err = suite.chainB.App.TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, data)
+ err = suite.chainB.GetSimApp().TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, data)
if tc.expPass {
suite.Require().NoError(err)
@@ -238,10 +233,9 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {
var (
successAck = channeltypes.NewResultAcknowledgement([]byte{byte(1)})
failedAck = channeltypes.NewErrorAcknowledgement("failed packet transfer")
-
- channelA, channelB ibctesting.TestChannel
- trace types.DenomTrace
- amount sdk.Int
+ trace types.DenomTrace
+ amount sdk.Int
+ path *ibctesting.Path
)
testCases := []struct {
@@ -252,14 +246,14 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {
expPass bool
}{
{"success ack causes no-op", successAck, func() {
- trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelB.PortID, channelB.ID, sdk.DefaultBondDenom))
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sdk.DefaultBondDenom))
}, true, true},
{"successful refund from source chain", failedAck, func() {
- escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
+ escrow := types.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
coin := sdk.NewCoin(sdk.DefaultBondDenom, amount)
- suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.GetSimApp(), suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
}, false, true},
{"unsuccessful refund from source", failedAck,
func() {
@@ -267,11 +261,11 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {
}, false, false},
{"successful refund from with coin from external chain", failedAck,
func() {
- escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
- trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ escrow := types.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, sdk.DefaultBondDenom))
coin := sdk.NewCoin(trace.IBCDenom(), amount)
- suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.GetSimApp(), suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
}, false, true},
}
@@ -280,20 +274,21 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
- _, _, _, _, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path = NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
amount = sdk.NewInt(100) // must be explicitly changed
tc.malleate()
data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String())
- packet := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(data.GetBytes(), 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.NewHeight(0, 100), 0)
- preCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+ preCoin := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
- err := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)
+ err := suite.chainA.GetSimApp().TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack)
if tc.expPass {
suite.Require().NoError(err)
- postCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+ postCoin := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
deltaAmount := postCoin.Amount.Sub(preCoin.Amount)
if tc.success {
@@ -315,10 +310,10 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() {
// so the refunds are occurring on chainA.
func (suite *KeeperTestSuite) TestOnTimeoutPacket() {
var (
- channelA, channelB ibctesting.TestChannel
- trace types.DenomTrace
- amount sdk.Int
- sender string
+ trace types.DenomTrace
+ path *ibctesting.Path
+ amount sdk.Int
+ sender string
)
testCases := []struct {
@@ -328,19 +323,19 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() {
}{
{"successful timeout from sender as source chain",
func() {
- escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
+ escrow := types.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
trace = types.ParseDenomTrace(sdk.DefaultBondDenom)
coin := sdk.NewCoin(trace.IBCDenom(), amount)
- suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.GetSimApp(), suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
}, true},
{"successful timeout from external chain",
func() {
- escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID)
- trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ escrow := types.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, sdk.DefaultBondDenom))
coin := sdk.NewCoin(trace.IBCDenom(), amount)
- suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
+ suite.Require().NoError(simapp.FundAccount(suite.chainA.GetSimApp(), suite.chainA.GetContext(), escrow, sdk.NewCoins(coin)))
}, true},
{"no balance for coin denom",
func() {
@@ -352,7 +347,7 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() {
}, false},
{"mint failed",
func() {
- trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom))
+ trace = types.ParseDenomTrace(types.GetPrefixedDenom(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, sdk.DefaultBondDenom))
amount = sdk.OneInt()
sender = "invalid address"
}, false},
@@ -364,21 +359,21 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED)
+ path = NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
amount = sdk.NewInt(100) // must be explicitly changed
sender = suite.chainA.SenderAccount.GetAddress().String()
tc.malleate()
data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), sender, suite.chainB.SenderAccount.GetAddress().String())
- packet := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ packet := channeltypes.NewPacket(data.GetBytes(), 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.NewHeight(0, 100), 0)
- preCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+ preCoin := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
- err := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)
+ err := suite.chainA.GetSimApp().TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data)
- postCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
+ postCoin := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom())
deltaAmount := postCoin.Amount.Sub(preCoin.Amount)
if tc.expPass {
diff --git a/modules/apps/transfer/module_test.go b/modules/apps/transfer/module_test.go
index 53876213..63d610de 100644
--- a/modules/apps/transfer/module_test.go
+++ b/modules/apps/transfer/module_test.go
@@ -7,16 +7,14 @@ import (
"github.com/cosmos/ibc-go/modules/apps/transfer/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *TransferTestSuite) TestOnChanOpenInit() {
var (
- channel *channeltypes.Channel
- testChannel ibctesting.TestChannel
- connA *ibctesting.TestConnection
- chanCap *capabilitytypes.Capability
+ channel *channeltypes.Channel
+ path *ibctesting.Path
+ chanCap *capabilitytypes.Capability
)
testCases := []struct {
@@ -30,7 +28,7 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() {
},
{
"max channels reached", func() {
- testChannel.ID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1)
+ path.EndpointA.ChannelID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1)
}, false,
},
{
@@ -40,7 +38,7 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() {
},
{
"invalid port ID", func() {
- testChannel = suite.chainA.NextTestChannel(connA, ibctesting.MockPort)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.MockPort
}, false,
},
{
@@ -50,7 +48,7 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() {
},
{
"capability already claimed", func() {
- err := suite.chainA.App.ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(testChannel.PortID, testChannel.ID))
+ err := suite.chainA.GetSimApp().ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID))
suite.Require().NoError(err)
}, false,
},
@@ -61,31 +59,32 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ path = NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
- _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- counterparty := channeltypes.NewCounterparty(testChannel.PortID, testChannel.ID)
+ counterparty := channeltypes.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
channel = &channeltypes.Channel{
State: channeltypes.INIT,
Ordering: channeltypes.UNORDERED,
Counterparty: counterparty,
- ConnectionHops: []string{connA.ID},
+ ConnectionHops: []string{path.EndpointA.ConnectionID},
Version: types.Version,
}
- module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
+ module, _, err := suite.chainA.App.GetIBCKeeper().PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
suite.Require().NoError(err)
- chanCap, err = suite.chainA.App.ScopedIBCKeeper.NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, testChannel.ID))
+ chanCap, err = suite.chainA.App.GetScopedIBCKeeper().NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, path.EndpointA.ChannelID))
suite.Require().NoError(err)
- cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module)
+ cbs, ok := suite.chainA.App.GetIBCKeeper().Router.GetRoute(module)
suite.Require().True(ok)
tc.malleate() // explicitly change fields in channel and testChannel
err = cbs.OnChanOpenInit(suite.chainA.GetContext(), channel.Ordering, channel.GetConnectionHops(),
- testChannel.PortID, testChannel.ID, chanCap, channel.Counterparty, channel.GetVersion(),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, chanCap, channel.Counterparty, channel.GetVersion(),
)
if tc.expPass {
@@ -101,9 +100,8 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() {
func (suite *TransferTestSuite) TestOnChanOpenTry() {
var (
channel *channeltypes.Channel
- testChannel ibctesting.TestChannel
- connA *ibctesting.TestConnection
chanCap *capabilitytypes.Capability
+ path *ibctesting.Path
counterpartyVersion string
)
@@ -118,12 +116,12 @@ func (suite *TransferTestSuite) TestOnChanOpenTry() {
},
{
"max channels reached", func() {
- testChannel.ID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1)
+ path.EndpointA.ChannelID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1)
}, false,
},
{
"capability already claimed in INIT should pass", func() {
- err := suite.chainA.App.ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(testChannel.PortID, testChannel.ID))
+ err := suite.chainA.GetSimApp().ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID))
suite.Require().NoError(err)
}, true,
},
@@ -134,7 +132,7 @@ func (suite *TransferTestSuite) TestOnChanOpenTry() {
},
{
"invalid port ID", func() {
- testChannel = suite.chainA.NextTestChannel(connA, ibctesting.MockPort)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.MockPort
}, false,
},
{
@@ -155,31 +153,33 @@ func (suite *TransferTestSuite) TestOnChanOpenTry() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
- _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- counterparty := channeltypes.NewCounterparty(testChannel.PortID, testChannel.ID)
+ path = NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
+
+ counterparty := channeltypes.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
channel = &channeltypes.Channel{
State: channeltypes.TRYOPEN,
Ordering: channeltypes.UNORDERED,
Counterparty: counterparty,
- ConnectionHops: []string{connA.ID},
+ ConnectionHops: []string{path.EndpointA.ConnectionID},
Version: types.Version,
}
counterpartyVersion = types.Version
- module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
+ module, _, err := suite.chainA.App.GetIBCKeeper().PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
suite.Require().NoError(err)
- chanCap, err = suite.chainA.App.ScopedIBCKeeper.NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, testChannel.ID))
+ chanCap, err = suite.chainA.App.GetScopedIBCKeeper().NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, path.EndpointA.ChannelID))
suite.Require().NoError(err)
- cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module)
+ cbs, ok := suite.chainA.App.GetIBCKeeper().Router.GetRoute(module)
suite.Require().True(ok)
tc.malleate() // explicitly change fields in channel and testChannel
err = cbs.OnChanOpenTry(suite.chainA.GetContext(), channel.Ordering, channel.GetConnectionHops(),
- testChannel.PortID, testChannel.ID, chanCap, channel.Counterparty, channel.GetVersion(), counterpartyVersion,
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, chanCap, channel.Counterparty, channel.GetVersion(), counterpartyVersion,
)
if tc.expPass {
@@ -193,11 +193,7 @@ func (suite *TransferTestSuite) TestOnChanOpenTry() {
}
func (suite *TransferTestSuite) TestOnChanOpenAck() {
- var (
- testChannel ibctesting.TestChannel
- connA *ibctesting.TestConnection
- counterpartyVersion string
- )
+ var counterpartyVersion string
testCases := []struct {
name string
@@ -221,19 +217,20 @@ func (suite *TransferTestSuite) TestOnChanOpenAck() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
- _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
+ path := NewTransferPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
counterpartyVersion = types.Version
- module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
+ module, _, err := suite.chainA.App.GetIBCKeeper().PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort)
suite.Require().NoError(err)
- cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module)
+ cbs, ok := suite.chainA.App.GetIBCKeeper().Router.GetRoute(module)
suite.Require().True(ok)
tc.malleate() // explicitly change fields in channel and testChannel
- err = cbs.OnChanOpenAck(suite.chainA.GetContext(), testChannel.PortID, testChannel.ID, counterpartyVersion)
+ err = cbs.OnChanOpenAck(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, counterpartyVersion)
if tc.expPass {
suite.Require().NoError(err)
diff --git a/modules/core/02-client/abci_test.go b/modules/core/02-client/abci_test.go
index cbf63d85..0bbdf489 100644
--- a/modules/core/02-client/abci_test.go
+++ b/modules/core/02-client/abci_test.go
@@ -7,13 +7,13 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
client "github.com/cosmos/ibc-go/modules/core/02-client"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
type ClientTestSuite struct {
@@ -36,7 +36,7 @@ func (suite *ClientTestSuite) SetupTest() {
localHostClient := localhosttypes.NewClientState(
suite.chainA.GetContext().ChainID(), types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())),
)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
}
func TestClientTestSuite(t *testing.T) {
@@ -54,7 +54,7 @@ func (suite *ClientTestSuite) TestBeginBlocker() {
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
suite.Require().NotPanics(func() {
- client.BeginBlocker(suite.chainA.GetContext(), suite.chainA.App.IBCKeeper.ClientKeeper)
+ client.BeginBlocker(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper)
}, "BeginBlocker shouldn't panic")
localHostClient = suite.chainA.GetClientState(exported.Localhost)
@@ -69,7 +69,7 @@ func (suite *ClientTestSuite) TestBeginBlockerConsensusState() {
Height: suite.chainA.GetContext().BlockHeight() + 1,
}
// set upgrade plan in the upgrade store
- store := suite.chainA.GetContext().KVStore(suite.chainA.App.GetKey(upgradetypes.StoreKey))
+ store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey))
bz := suite.chainA.App.AppCodec().MustMarshalBinaryBare(plan)
store.Set(upgradetypes.PlanKey(), bz)
@@ -79,14 +79,14 @@ func (suite *ClientTestSuite) TestBeginBlockerConsensusState() {
NextValidatorsHash: nextValsHash,
})
- err := suite.chainA.App.UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state"))
+ err := suite.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state"))
suite.Require().NoError(err)
req := abci.RequestBeginBlock{Header: newCtx.BlockHeader()}
suite.chainA.App.BeginBlock(req)
// plan Height is at ctx.BlockHeight+1
- consState, found := suite.chainA.App.UpgradeKeeper.GetUpgradedConsensusState(newCtx, plan.Height)
+ consState, found := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedConsensusState(newCtx, plan.Height)
suite.Require().True(found)
bz, err = types.MarshalConsensusState(suite.chainA.App.AppCodec(), &ibctmtypes.ConsensusState{Timestamp: newCtx.BlockTime(), NextValidatorsHash: nextValsHash})
suite.Require().NoError(err)
diff --git a/modules/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go
index 69e0953b..b42322d9 100644
--- a/modules/core/02-client/keeper/client_test.go
+++ b/modules/core/02-client/keeper/client_test.go
@@ -5,9 +5,9 @@ import (
"fmt"
"time"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
tmtypes "github.com/tendermint/tendermint/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
@@ -216,20 +216,20 @@ func (suite *KeeperTestSuite) TestUpdateClientLocalhost() {
var localhostClient exported.ClientState = localhosttypes.NewClientState(suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())))
ctx := suite.chainA.GetContext().WithBlockHeight(suite.chainA.GetContext().BlockHeight() + 1)
- err := suite.chainA.App.IBCKeeper.ClientKeeper.UpdateClient(ctx, exported.Localhost, nil)
+ err := suite.chainA.App.GetIBCKeeper().ClientKeeper.UpdateClient(ctx, exported.Localhost, nil)
suite.Require().NoError(err)
- clientState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(ctx, exported.Localhost)
+ clientState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(ctx, exported.Localhost)
suite.Require().True(found)
suite.Require().Equal(localhostClient.GetLatestHeight().(types.Height).Increment(), clientState.GetLatestHeight())
}
func (suite *KeeperTestSuite) TestUpgradeClient() {
var (
+ path *ibctesting.Path
upgradedClient exported.ClientState
upgradedConsState exported.ConsensusState
lastHeight exported.Height
- clientA string
proofUpgradedClient, proofUpgradedConsState []byte
upgradedClientBz, upgradedConsStateBz []byte
err error
@@ -247,16 +247,16 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -271,22 +271,22 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
- clientA = "wrongclientid"
+ path.EndpointA.ClientID = "wrongclientid"
},
expPass: false,
},
@@ -297,16 +297,16 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -316,7 +316,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
tmClient, ok := cs.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClient.FrozenHeight = types.NewHeight(0, 1)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmClient)
},
expPass: false,
},
@@ -327,17 +327,17 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
upgradedClient = ibctmtypes.NewClientState("wrongchainID", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, true, true)
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -349,7 +349,8 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
for _, tc := range testCases {
tc := tc
- clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
upgradedClient = upgradedClient.ZeroCustomFields()
upgradedClientBz, err = types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
@@ -366,7 +367,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
// Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
upgradedClient = upgradedClient.ZeroCustomFields()
- err = suite.chainA.App.IBCKeeper.ClientKeeper.UpgradeClient(suite.chainA.GetContext(), clientA, upgradedClient, upgradedConsState, proofUpgradedClient, proofUpgradedConsState)
+ err = suite.chainA.App.GetIBCKeeper().ClientKeeper.UpgradeClient(suite.chainA.GetContext(), path.EndpointA.ClientID, upgradedClient, upgradedConsState, proofUpgradedClient, proofUpgradedConsState)
if tc.expPass {
suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name)
@@ -592,12 +593,13 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
}
func (suite *KeeperTestSuite) TestUpdateClientEventEmission() {
- clientID, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- header, err := suite.chainA.ConstructUpdateTMClientHeader(suite.chainB, clientID)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ header, err := suite.chainA.ConstructUpdateTMClientHeader(suite.chainB, path.EndpointA.ClientID)
suite.Require().NoError(err)
msg, err := clienttypes.NewMsgUpdateClient(
- clientID, header,
+ path.EndpointA.ClientID, header,
suite.chainA.SenderAccount.GetAddress().String(),
)
diff --git a/modules/core/02-client/keeper/grpc_query_test.go b/modules/core/02-client/keeper/grpc_query_test.go
index 41c128d9..b80fd57f 100644
--- a/modules/core/02-client/keeper/grpc_query_test.go
+++ b/modules/core/02-client/keeper/grpc_query_test.go
@@ -116,14 +116,17 @@ func (suite *KeeperTestSuite) TestQueryClientStates() {
{
"success",
func() {
- clientA1, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- clientA2, _ := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path1)
- clientStateA1 := suite.chainA.GetClientState(clientA1)
- clientStateA2 := suite.chainA.GetClientState(clientA2)
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path2)
- idcs := types.NewIdentifiedClientState(clientA1, clientStateA1)
- idcs2 := types.NewIdentifiedClientState(clientA2, clientStateA2)
+ clientStateA1 := path1.EndpointA.GetClientState()
+ clientStateA2 := path2.EndpointA.GetClientState()
+
+ idcs := types.NewIdentifiedClientState(path1.EndpointA.ClientID, clientStateA1)
+ idcs2 := types.NewIdentifiedClientState(path2.EndpointA.ClientID, clientStateA2)
// order is sorted by client id, localhost is last
expClientStates = types.IdentifiedClientStates{idcs, idcs2}.Sort()
diff --git a/modules/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go
index b31972b3..d3e1cef5 100644
--- a/modules/core/02-client/keeper/keeper_test.go
+++ b/modules/core/02-client/keeper/keeper_test.go
@@ -120,7 +120,7 @@ func (suite *KeeperTestSuite) SetupTest() {
localHostClient := localhosttypes.NewClientState(
suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())),
)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
queryHelper := baseapp.NewQueryServerTestHelper(suite.ctx, app.InterfaceRegistry())
types.RegisterQueryServer(queryHelper, app.IBCKeeper.ClientKeeper)
@@ -222,7 +222,7 @@ func (suite *KeeperTestSuite) TestValidateSelfClient() {
}
for _, tc := range testCases {
- err := suite.chainA.App.IBCKeeper.ClientKeeper.ValidateSelfClient(suite.chainA.GetContext(), tc.clientState)
+ err := suite.chainA.App.GetIBCKeeper().ClientKeeper.ValidateSelfClient(suite.chainA.GetContext(), tc.clientState)
if tc.expPass {
suite.Require().NoError(err, "expected valid client for case: %s", tc.name)
} else {
@@ -244,16 +244,16 @@ func (suite KeeperTestSuite) TestGetAllGenesisClients() {
expGenClients := make(types.IdentifiedClientStates, len(expClients))
for i := range expClients {
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientIDs[i], expClients[i])
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientIDs[i], expClients[i])
expGenClients[i] = types.NewIdentifiedClientState(clientIDs[i], expClients[i])
}
// add localhost client
- localHostClient, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), exported.Localhost)
+ localHostClient, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), exported.Localhost)
suite.Require().True(found)
expGenClients = append(expGenClients, types.NewIdentifiedClientState(exported.Localhost, localHostClient))
- genClients := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext())
+ genClients := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext())
suite.Require().Equal(expGenClients.Sort(), genClients)
}
@@ -261,7 +261,7 @@ func (suite KeeperTestSuite) TestGetAllGenesisClients() {
func (suite KeeperTestSuite) TestGetAllGenesisMetadata() {
expectedGenMetadata := []types.IdentifiedGenesisMetadata{
types.NewIdentifiedGenesisMetadata(
- "clientA",
+ "07-tendermint-1",
[]types.GenesisMetadata{
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 1)), []byte("foo")),
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 2)), []byte("bar")),
@@ -278,13 +278,13 @@ func (suite KeeperTestSuite) TestGetAllGenesisMetadata() {
}
genClients := []types.IdentifiedClientState{
- types.NewIdentifiedClientState("clientA", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientB", &ibctmtypes.ClientState{}),
+ types.NewIdentifiedClientState("07-tendermint-1", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientB", &ibctmtypes.ClientState{}),
types.NewIdentifiedClientState("clientC", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientD", &localhosttypes.ClientState{}),
}
- suite.chainA.App.IBCKeeper.ClientKeeper.SetAllClientMetadata(suite.chainA.GetContext(), expectedGenMetadata)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetAllClientMetadata(suite.chainA.GetContext(), expectedGenMetadata)
- actualGenMetadata, err := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients)
+ actualGenMetadata, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients)
suite.Require().NoError(err, "get client metadata returned error unexpectedly")
suite.Require().Equal(expectedGenMetadata, actualGenMetadata, "retrieved metadata is unexpected")
}
@@ -342,21 +342,22 @@ func (suite KeeperTestSuite) TestConsensusStateHelpers() {
// 2 clients in total are created on chainA. The first client is updated so it contains an initial consensus state
// and a consensus state at the update height.
func (suite KeeperTestSuite) TestGetAllConsensusStates() {
- clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
- clientState := suite.chainA.GetClientState(clientA)
+ clientState := path.EndpointA.GetClientState()
expConsensusHeight0 := clientState.GetLatestHeight()
- consensusState0, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight0)
+ consensusState0, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, expConsensusHeight0)
suite.Require().True(ok)
// update client to create a second consensus state
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- clientState = suite.chainA.GetClientState(clientA)
+ clientState = path.EndpointA.GetClientState()
expConsensusHeight1 := clientState.GetLatestHeight()
suite.Require().True(expConsensusHeight1.GT(expConsensusHeight0))
- consensusState1, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight1)
+ consensusState1, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, expConsensusHeight1)
suite.Require().True(ok)
expConsensus := []exported.ConsensusState{
@@ -365,25 +366,26 @@ func (suite KeeperTestSuite) TestGetAllConsensusStates() {
}
// create second client on chainA
- clientA2, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- clientState = suite.chainA.GetClientState(clientA2)
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path2)
+ clientState = path2.EndpointA.GetClientState()
expConsensusHeight2 := clientState.GetLatestHeight()
- consensusState2, ok := suite.chainA.GetConsensusState(clientA2, expConsensusHeight2)
+ consensusState2, ok := suite.chainA.GetConsensusState(path2.EndpointA.ClientID, expConsensusHeight2)
suite.Require().True(ok)
expConsensus2 := []exported.ConsensusState{consensusState2}
expConsensusStates := types.ClientsConsensusStates{
- types.NewClientConsensusStates(clientA, []types.ConsensusStateWithHeight{
+ types.NewClientConsensusStates(path.EndpointA.ClientID, []types.ConsensusStateWithHeight{
types.NewConsensusStateWithHeight(expConsensusHeight0.(types.Height), expConsensus[0]),
types.NewConsensusStateWithHeight(expConsensusHeight1.(types.Height), expConsensus[1]),
}),
- types.NewClientConsensusStates(clientA2, []types.ConsensusStateWithHeight{
+ types.NewClientConsensusStates(path2.EndpointA.ClientID, []types.ConsensusStateWithHeight{
types.NewConsensusStateWithHeight(expConsensusHeight2.(types.Height), expConsensus2[0]),
}),
}.Sort()
- consStates := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllConsensusStates(suite.chainA.GetContext())
+ consStates := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllConsensusStates(suite.chainA.GetContext())
suite.Require().Equal(expConsensusStates, consStates, "%s \n\n%s", expConsensusStates, consStates)
}
diff --git a/modules/core/02-client/keeper/params_test.go b/modules/core/02-client/keeper/params_test.go
index fdcaad5f..cbcc2fea 100644
--- a/modules/core/02-client/keeper/params_test.go
+++ b/modules/core/02-client/keeper/params_test.go
@@ -7,11 +7,11 @@ import (
func (suite *KeeperTestSuite) TestParams() {
expParams := types.DefaultParams()
- params := suite.chainA.App.IBCKeeper.ClientKeeper.GetParams(suite.chainA.GetContext())
+ params := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(suite.chainA.GetContext())
suite.Require().Equal(expParams, params)
expParams.AllowedClients = []string{}
- suite.chainA.App.IBCKeeper.ClientKeeper.SetParams(suite.chainA.GetContext(), expParams)
- params = suite.chainA.App.IBCKeeper.ClientKeeper.GetParams(suite.chainA.GetContext())
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), expParams)
+ params = suite.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(suite.chainA.GetContext())
suite.Require().Empty(expParams.AllowedClients)
}
diff --git a/modules/core/02-client/keeper/proposal_test.go b/modules/core/02-client/keeper/proposal_test.go
index bd381052..5e4eca40 100644
--- a/modules/core/02-client/keeper/proposal_test.go
+++ b/modules/core/02-client/keeper/proposal_test.go
@@ -2,11 +2,11 @@ package keeper_test
import (
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *KeeperTestSuite) TestClientUpdateProposal() {
@@ -32,14 +32,14 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
"subject and substitute use different revision numbers", func() {
tmClientState, ok := substituteClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
- consState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight)
+ consState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight)
suite.Require().True(found)
newRevisionNumber := tmClientState.GetLatestHeight().GetRevisionNumber() + 1
tmClientState.LatestHeight = types.NewHeight(newRevisionNumber, tmClientState.GetLatestHeight().GetRevisionHeight())
initialHeight = types.NewHeight(newRevisionNumber, initialHeight.GetRevisionHeight())
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, true,
@@ -69,7 +69,7 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClientState.LatestHeight = substituteClientState.GetLatestHeight().(types.Height)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, false,
@@ -79,7 +79,7 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClientState.FrozenHeight = types.ZeroHeight()
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
}, false,
@@ -92,14 +92,19 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
- subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(subjectPath)
+ subject = subjectPath.EndpointA.ClientID
subjectClientState = suite.chainA.GetClientState(subject)
- substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(substitutePath)
+ substitute = substitutePath.EndpointA.ClientID
initialHeight = types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
// update substitute twice
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ substitutePath.EndpointA.UpdateClient()
+ substitutePath.EndpointA.UpdateClient()
substituteClientState = suite.chainA.GetClientState(substitute)
tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
@@ -107,20 +112,20 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
tmClientState.AllowUpdateAfterMisbehaviour = true
tmClientState.AllowUpdateAfterExpiry = true
tmClientState.FrozenHeight = tmClientState.LatestHeight
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
tmClientState, ok = substituteClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClientState.AllowUpdateAfterMisbehaviour = true
tmClientState.AllowUpdateAfterExpiry = true
tmClientState.FrozenHeight = tmClientState.LatestHeight
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
tc.malleate()
updateProp, ok := content.(*types.ClientUpdateProposal)
suite.Require().True(ok)
- err = suite.chainA.App.IBCKeeper.ClientKeeper.ClientUpdateProposal(suite.chainA.GetContext(), updateProp)
+ err = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientUpdateProposal(suite.chainA.GetContext(), updateProp)
if tc.expPass {
suite.Require().NoError(err)
@@ -194,8 +199,9 @@ func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
suite.SetupTest() // reset
oldPlan.Height = 0 //reset
- clientID, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- upgradedClientState = suite.chainA.GetClientState(clientID).ZeroCustomFields().(*ibctmtypes.ClientState)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ upgradedClientState = suite.chainA.GetClientState(path.EndpointA.ClientID).ZeroCustomFields().(*ibctmtypes.ClientState)
// use height 1000 to distinguish from old plan
plan = upgradetypes.Plan{
@@ -208,33 +214,33 @@ func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
// set the old plan if it is not empty
if oldPlan.Height != 0 {
// set upgrade plan in the upgrade store
- store := suite.chainA.GetContext().KVStore(suite.chainA.App.GetKey(upgradetypes.StoreKey))
+ store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey))
bz := suite.chainA.App.AppCodec().MustMarshalBinaryBare(&oldPlan)
store.Set(upgradetypes.PlanKey(), bz)
bz, err := types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClientState)
suite.Require().NoError(err)
- suite.chainA.App.UpgradeKeeper.SetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height, bz)
+ suite.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height, bz)
}
upgradeProp, ok := content.(*types.UpgradeProposal)
suite.Require().True(ok)
- err = suite.chainA.App.IBCKeeper.ClientKeeper.HandleUpgradeProposal(suite.chainA.GetContext(), upgradeProp)
+ err = suite.chainA.App.GetIBCKeeper().ClientKeeper.HandleUpgradeProposal(suite.chainA.GetContext(), upgradeProp)
if tc.expPass {
suite.Require().NoError(err)
// check that the correct plan is returned
- storedPlan, found := suite.chainA.App.UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext())
+ storedPlan, found := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext())
suite.Require().True(found)
suite.Require().Equal(plan, storedPlan)
// check that old upgraded client state is cleared
- _, found = suite.chainA.App.UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height)
+ _, found = suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height)
suite.Require().False(found)
// check that client state was set
- storedClientState, found := suite.chainA.App.UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height)
+ storedClientState, found := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height)
suite.Require().True(found)
clientState, err := types.UnmarshalClientState(suite.chainA.App.AppCodec(), storedClientState)
suite.Require().NoError(err)
@@ -243,7 +249,7 @@ func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
suite.Require().Error(err)
// check that the new plan wasn't stored
- storedPlan, found := suite.chainA.App.UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext())
+ storedPlan, found := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext())
if oldPlan.Height != 0 {
// NOTE: this is only true if the ScheduleUpgrade function
// returns an error before clearing the old plan
@@ -255,7 +261,7 @@ func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
}
// check that client state was not set
- _, found = suite.chainA.App.UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height)
+ _, found = suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height)
suite.Require().False(found)
}
diff --git a/modules/core/02-client/proposal_handler_test.go b/modules/core/02-client/proposal_handler_test.go
index ad7873f5..2c83b95b 100644
--- a/modules/core/02-client/proposal_handler_test.go
+++ b/modules/core/02-client/proposal_handler_test.go
@@ -6,7 +6,6 @@ import (
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
client "github.com/cosmos/ibc-go/modules/core/02-client"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -24,29 +23,34 @@ func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
}{
{
"valid update client proposal", func() {
- subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- subjectClientState := suite.chainA.GetClientState(subject)
- substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(subjectPath)
+ subjectClientState := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID)
+
+ substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(substitutePath)
initialHeight := clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
// update substitute twice
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
- substituteClientState := suite.chainA.GetClientState(substitute)
+ err = substitutePath.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
+ err = substitutePath.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
+ substituteClientState := suite.chainA.GetClientState(substitutePath.EndpointA.ClientID)
tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClientState.AllowUpdateAfterMisbehaviour = true
tmClientState.FrozenHeight = tmClientState.LatestHeight
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID, tmClientState)
// replicate changes to substitute (they must match)
tmClientState, ok = substituteClientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClientState.AllowUpdateAfterMisbehaviour = true
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, tmClientState)
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subjectPath.EndpointA.ClientID, substitutePath.EndpointA.ClientID, initialHeight)
}, true,
},
{
@@ -69,7 +73,7 @@ func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
tc.malleate()
- proposalHandler := client.NewClientProposalHandler(suite.chainA.App.IBCKeeper.ClientKeeper)
+ proposalHandler := client.NewClientProposalHandler(suite.chainA.App.GetIBCKeeper().ClientKeeper)
err = proposalHandler(suite.chainA.GetContext(), content)
diff --git a/modules/core/02-client/types/client_test.go b/modules/core/02-client/types/client_test.go
index e51d7ead..b5b9b73c 100644
--- a/modules/core/02-client/types/client_test.go
+++ b/modules/core/02-client/types/client_test.go
@@ -6,7 +6,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -27,9 +26,10 @@ func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() {
},
{
"tendermint client", func() {
- clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- clientState := suite.chainA.GetClientState(clientA)
- consensusState, ok := suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ clientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ consensusState, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, clientState.GetLatestHeight())
suite.Require().True(ok)
cswh = types.NewConsensusStateWithHeight(clientState.GetLatestHeight().(types.Height), consensusState)
diff --git a/modules/core/02-client/types/genesis_test.go b/modules/core/02-client/types/genesis_test.go
index c3d207c1..ab2c479c 100644
--- a/modules/core/02-client/types/genesis_test.go
+++ b/modules/core/02-client/types/genesis_test.go
@@ -7,7 +7,6 @@ import (
client "github.com/cosmos/ibc-go/modules/core/02-client"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
"github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
@@ -30,10 +29,12 @@ var clientHeight = types.NewHeight(0, 10)
func (suite *TypesTestSuite) TestMarshalGenesisState() {
cdc := suite.chainA.App.AppCodec()
- clientA, _, _, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ err := path.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
- genesis := client.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.IBCKeeper.ClientKeeper)
+ genesis := client.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper)
bz, err := cdc.MarshalJSON(&genesis)
suite.Require().NoError(err)
diff --git a/modules/core/02-client/types/proposal_test.go b/modules/core/02-client/types/proposal_test.go
index ab3fd362..76aee117 100644
--- a/modules/core/02-client/types/proposal_test.go
+++ b/modules/core/02-client/types/proposal_test.go
@@ -7,17 +7,23 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+
"github.com/cosmos/ibc-go/modules/core/02-client/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
func (suite *TypesTestSuite) TestValidateBasic() {
- subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(subjectPath)
+ subject := subjectPath.EndpointA.ClientID
subjectClientState := suite.chainA.GetClientState(subject)
- substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+
+ substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(substitutePath)
+ substitute := substitutePath.EndpointA.ClientID
+
initialHeight := types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
testCases := []struct {
@@ -97,8 +103,9 @@ func (suite *TypesTestSuite) TestUpgradeProposalValidateBasic() {
err error
)
- client, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- cs := suite.chainA.GetClientState(client)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ cs := suite.chainA.GetClientState(path.EndpointA.ClientID)
plan := upgradetypes.Plan{
Name: "ibc upgrade",
Height: 1000,
diff --git a/modules/core/03-connection/keeper/grpc_query_test.go b/modules/core/03-connection/keeper/grpc_query_test.go
index 42892dfd..a8480dad 100644
--- a/modules/core/03-connection/keeper/grpc_query_test.go
+++ b/modules/core/03-connection/keeper/grpc_query_test.go
@@ -7,7 +7,6 @@ import (
"github.com/cosmos/cosmos-sdk/types/query"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
"github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -47,16 +46,17 @@ func (suite *KeeperTestSuite) TestQueryConnection() {
{
"success",
func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
- connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ err := path.EndpointA.ConnOpenInit()
+ suite.Require().NoError(err)
- counterparty := types.NewCounterparty(clientB, connB.ID, suite.chainB.GetPrefix())
- expConnection = types.NewConnectionEnd(types.INIT, clientA, counterparty, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 500)
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, expConnection)
+ counterparty := types.NewCounterparty(path.EndpointB.ClientID, "", suite.chainB.GetPrefix())
+ expConnection = types.NewConnectionEnd(types.INIT, path.EndpointA.ClientID, counterparty, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 500)
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, expConnection)
req = &types.QueryConnectionRequest{
- ConnectionId: connA.ID,
+ ConnectionId: path.EndpointA.ConnectionID,
}
},
true,
@@ -111,23 +111,28 @@ func (suite *KeeperTestSuite) TestQueryConnections() {
{
"success",
func() {
- clientA, clientB, connA0, connB0 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- clientA1, clientB1, connA1, connB1 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- connA2, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path3 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path1)
+ suite.coordinator.SetupConnections(path2)
+ suite.coordinator.SetupClients(path3)
+
+ err := path3.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
- counterparty1 := types.NewCounterparty(clientB, connB0.ID, suite.chainB.GetPrefix())
- counterparty2 := types.NewCounterparty(clientB1, connB1.ID, suite.chainB.GetPrefix())
+ counterparty1 := types.NewCounterparty(path1.EndpointB.ClientID, path1.EndpointB.ConnectionID, suite.chainB.GetPrefix())
+ counterparty2 := types.NewCounterparty(path2.EndpointB.ClientID, path2.EndpointB.ConnectionID, suite.chainB.GetPrefix())
// counterparty connection id is blank after open init
- counterparty3 := types.NewCounterparty(clientB, "", suite.chainB.GetPrefix())
+ counterparty3 := types.NewCounterparty(path3.EndpointB.ClientID, "", suite.chainB.GetPrefix())
- conn1 := types.NewConnectionEnd(types.OPEN, clientA, counterparty1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
- conn2 := types.NewConnectionEnd(types.OPEN, clientA1, counterparty2, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
- conn3 := types.NewConnectionEnd(types.INIT, clientA, counterparty3, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
+ conn1 := types.NewConnectionEnd(types.OPEN, path1.EndpointA.ClientID, counterparty1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
+ conn2 := types.NewConnectionEnd(types.OPEN, path2.EndpointA.ClientID, counterparty2, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
+ conn3 := types.NewConnectionEnd(types.INIT, path3.EndpointA.ClientID, counterparty3, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0)
- iconn1 := types.NewIdentifiedConnection(connA0.ID, conn1)
- iconn2 := types.NewIdentifiedConnection(connA1.ID, conn2)
- iconn3 := types.NewIdentifiedConnection(connA2.ID, conn3)
+ iconn1 := types.NewIdentifiedConnection(path1.EndpointA.ConnectionID, conn1)
+ iconn2 := types.NewIdentifiedConnection(path2.EndpointA.ConnectionID, conn2)
+ iconn3 := types.NewIdentifiedConnection(path3.EndpointA.ConnectionID, conn3)
expConnections = []*types.IdentifiedConnection{&iconn1, &iconn2, &iconn3}
@@ -197,13 +202,21 @@ func (suite *KeeperTestSuite) TestQueryClientConnections() {
{
"success",
func() {
- clientA, clientB, connA0, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- connA1, _ := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
- expPaths = []string{connA0.ID, connA1.ID}
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), clientA, expPaths)
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path1)
+
+ // create another connection using same underlying clients
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path2.EndpointA.ClientID = path1.EndpointA.ClientID
+ path2.EndpointB.ClientID = path1.EndpointB.ClientID
+
+ suite.coordinator.CreateConnections(path2)
+
+ expPaths = []string{path1.EndpointA.ConnectionID, path2.EndpointA.ConnectionID}
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), path1.EndpointA.ClientID, expPaths)
req = &types.QueryClientConnectionsRequest{
- ClientId: clientA,
+ ClientId: path1.EndpointA.ClientID,
}
},
true,
@@ -269,26 +282,28 @@ func (suite *KeeperTestSuite) TestQueryConnectionClientState() {
{
"client state not found",
func() {
- _, _, connA, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// set connection to empty so clientID is empty
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, types.ConnectionEnd{})
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, types.ConnectionEnd{})
req = &types.QueryConnectionClientStateRequest{
- ConnectionId: connA.ID,
+ ConnectionId: path.EndpointA.ConnectionID,
}
}, false,
},
{
"success",
func() {
- clientA, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
- expClientState := suite.chainA.GetClientState(clientA)
- expIdentifiedClientState = clienttypes.NewIdentifiedClientState(clientA, expClientState)
+ expClientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ expIdentifiedClientState = clienttypes.NewIdentifiedClientState(path.EndpointA.ClientID, expClientState)
req = &types.QueryConnectionClientStateRequest{
- ConnectionId: connA.ID,
+ ConnectionId: path.EndpointA.ConnectionID,
}
},
true,
@@ -363,10 +378,11 @@ func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() {
{
"consensus state not found",
func() {
- _, _, connA, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
req = &types.QueryConnectionConsensusStateRequest{
- ConnectionId: connA.ID,
+ ConnectionId: path.EndpointA.ConnectionID,
RevisionNumber: 0,
RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height
}
@@ -375,15 +391,16 @@ func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() {
{
"success",
func() {
- clientA, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
- clientState := suite.chainA.GetClientState(clientA)
- expConsensusState, _ = suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ clientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ expConsensusState, _ = suite.chainA.GetConsensusState(path.EndpointA.ClientID, clientState.GetLatestHeight())
suite.Require().NotNil(expConsensusState)
- expClientID = clientA
+ expClientID = path.EndpointA.ClientID
req = &types.QueryConnectionConsensusStateRequest{
- ConnectionId: connA.ID,
+ ConnectionId: path.EndpointA.ConnectionID,
RevisionNumber: clientState.GetLatestHeight().GetRevisionNumber(),
RevisionHeight: clientState.GetLatestHeight().GetRevisionHeight(),
}
diff --git a/modules/core/03-connection/keeper/handshake_test.go b/modules/core/03-connection/keeper/handshake_test.go
index 1a94fb39..0fc12f3e 100644
--- a/modules/core/03-connection/keeper/handshake_test.go
+++ b/modules/core/03-connection/keeper/handshake_test.go
@@ -8,14 +8,14 @@ import (
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
// TestConnOpenInit - chainA initializes (INIT state) a connection with
// chainB which is yet UNINITIALIZED
func (suite *KeeperTestSuite) TestConnOpenInit() {
var (
- clientA string
- clientB string
+ path *ibctesting.Path
version *types.Version
delayPeriod uint64
emptyConnBID bool
@@ -27,29 +27,23 @@ func (suite *KeeperTestSuite) TestConnOpenInit() {
expPass bool
}{
{"success", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
}, true},
{"success with empty counterparty identifier", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
emptyConnBID = true
}, true},
{"success with non empty version", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
version = types.ExportedVersionsToProto(types.GetCompatibleVersions())[0]
}, true},
{"success with non zero delayPeriod", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
delayPeriod = uint64(time.Hour.Nanoseconds())
}, true},
{"invalid version", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
version = &types.Version{}
}, false},
{"couldn't add connection to client", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- // set clientA to invalid client identifier
- clientA = "clientidentifier"
+ // set path.EndpointA.ClientID to invalid client identifier
+ path.EndpointA.ClientID = "clientidentifier"
}, false},
}
@@ -59,16 +53,17 @@ func (suite *KeeperTestSuite) TestConnOpenInit() {
suite.SetupTest() // reset
emptyConnBID = false // must be explicitly changed
version = nil // must be explicitly changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
tc.malleate()
- connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
if emptyConnBID {
- connB.ID = ""
+ path.EndpointB.ConnectionID = ""
}
- counterparty := types.NewCounterparty(clientB, connB.ID, suite.chainB.GetPrefix())
+ counterparty := types.NewCounterparty(path.EndpointB.ClientID, path.EndpointB.ConnectionID, suite.chainB.GetPrefix())
- connectionID, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.ConnOpenInit(suite.chainA.GetContext(), clientA, counterparty, version, delayPeriod)
+ connectionID, err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.ConnOpenInit(suite.chainA.GetContext(), path.EndpointA.ClientID, counterparty, version, delayPeriod)
if tc.expPass {
suite.Require().NoError(err)
@@ -85,8 +80,7 @@ func (suite *KeeperTestSuite) TestConnOpenInit() {
// connection on chainA is INIT
func (suite *KeeperTestSuite) TestConnOpenTry() {
var (
- clientA string
- clientB string
+ path *ibctesting.Path
delayPeriod uint64
previousConnectionID string
versions []exported.Version
@@ -100,112 +94,102 @@ func (suite *KeeperTestSuite) TestConnOpenTry() {
expPass bool
}{
{"success", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
}, true},
{"success with crossing hellos", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, connB, err := suite.coordinator.ConnOpenInitOnBothChains(suite.chainA, suite.chainB, clientA, clientB)
+ err := suite.coordinator.ConnOpenInitOnBothChains(path)
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
- previousConnectionID = connB.ID
+ previousConnectionID = path.EndpointB.ConnectionID
}, true},
{"success with delay period", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
delayPeriod = uint64(time.Hour.Nanoseconds())
// set delay period on counterparty to non-zero value
- conn := suite.chainA.GetConnection(connA)
+ conn := path.EndpointA.GetConnection()
conn.DelayPeriod = delayPeriod
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, conn)
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, conn)
// commit in order for proof to return correct value
suite.coordinator.CommitBlock(suite.chainA)
- suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ path.EndpointB.UpdateClient()
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
}, true},
{"invalid counterparty client", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
// Set an invalid client of chainA on chainB
tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClient.ChainId = "wrongchainid"
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmClient)
}, false},
{"consensus height >= latest height", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
consensusHeight = clienttypes.GetSelfHeight(suite.chainB.GetContext())
}, false},
{"self consensus state not found", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
consensusHeight = clienttypes.NewHeight(0, 1)
}, false},
{"counterparty versions is empty", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
versions = nil
}, false},
{"counterparty versions don't have a match", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
version := types.NewVersion("0.0", nil)
versions = []exported.Version{version}
}, false},
{"connection state verification failed", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
// chainA connection not created
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
}, false},
{"client state verification failed", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
// modify counterparty client without setting in store so it still passes validate but fails proof verification
tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
@@ -213,70 +197,64 @@ func (suite *KeeperTestSuite) TestConnOpenTry() {
tmClient.LatestHeight = tmClient.LatestHeight.Increment().(clienttypes.Height)
}, false},
{"consensus state verification failed", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
-
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
// give chainA wrong consensus state for chainB
- consState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainA.GetContext(), clientA)
+ consState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetLatestClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
tmConsState, ok := consState.(*ibctmtypes.ConsensusState)
suite.Require().True(ok)
tmConsState.Timestamp = time.Now()
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, counterpartyClient.GetLatestHeight(), tmConsState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, counterpartyClient.GetLatestHeight(), tmConsState)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
}, false},
{"invalid previous connection is in TRYOPEN", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
-
// open init chainA
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// open try chainB
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ err = path.EndpointB.UpdateClient()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
- previousConnectionID = connB.ID
+ previousConnectionID = path.EndpointB.ConnectionID
}, false},
{"invalid previous connection has invalid versions", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
-
// open init chainA
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// open try chainB
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
// modify connB to be in INIT with incorrect versions
- connection, found := suite.chainB.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainB.GetContext(), connB.ID)
+ connection, found := suite.chainB.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainB.GetContext(), path.EndpointB.ConnectionID)
suite.Require().True(found)
connection.State = types.INIT
connection.Versions = []*types.Version{{}}
- suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection)
+ suite.chainB.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainB.GetContext(), path.EndpointB.ConnectionID, connection)
- err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ err = path.EndpointB.UpdateClient()
suite.Require().NoError(err)
// retrieve client state of chainA to pass as counterpartyClient
- counterpartyClient = suite.chainA.GetClientState(clientA)
+ counterpartyClient = suite.chainA.GetClientState(path.EndpointA.ClientID)
- previousConnectionID = connB.ID
+ previousConnectionID = path.EndpointB.ConnectionID
}, false},
}
@@ -288,28 +266,33 @@ func (suite *KeeperTestSuite) TestConnOpenTry() {
consensusHeight = clienttypes.ZeroHeight() // must be explicitly changed in malleate
versions = types.GetCompatibleVersions() // must be explicitly changed in malleate
previousConnectionID = ""
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
tc.malleate()
- connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
- counterparty := types.NewCounterparty(clientA, connA.ID, suite.chainA.GetPrefix())
+ counterparty := types.NewCounterparty(path.EndpointA.ClientID, path.EndpointA.ConnectionID, suite.chainA.GetPrefix())
- connectionKey := host.ConnectionKey(connA.ID)
+ // ensure client is up to date to receive proof
+ err := path.EndpointB.UpdateClient()
+ suite.Require().NoError(err)
+
+ connectionKey := host.ConnectionKey(path.EndpointA.ConnectionID)
proofInit, proofHeight := suite.chainA.QueryProof(connectionKey)
if consensusHeight.IsZero() {
// retrieve consensus state height to provide proof for
consensusHeight = counterpartyClient.GetLatestHeight()
}
- consensusKey := host.FullConsensusStateKey(clientA, consensusHeight)
+ consensusKey := host.FullConsensusStateKey(path.EndpointA.ClientID, consensusHeight)
proofConsensus, _ := suite.chainA.QueryProof(consensusKey)
// retrieve proof of counterparty clientstate on chainA
- clientKey := host.FullClientStateKey(clientA)
+ clientKey := host.FullClientStateKey(path.EndpointA.ClientID)
proofClient, _ := suite.chainA.QueryProof(clientKey)
- connectionID, err := suite.chainB.App.IBCKeeper.ConnectionKeeper.ConnOpenTry(
- suite.chainB.GetContext(), previousConnectionID, counterparty, delayPeriod, clientB, counterpartyClient,
+ connectionID, err := suite.chainB.App.GetIBCKeeper().ConnectionKeeper.ConnOpenTry(
+ suite.chainB.GetContext(), previousConnectionID, counterparty, delayPeriod, path.EndpointB.ClientID, counterpartyClient,
versions, proofInit, proofClient, proofConsensus,
proofHeight, consensusHeight,
)
@@ -329,8 +312,7 @@ func (suite *KeeperTestSuite) TestConnOpenTry() {
// the initialization (TRYINIT) of the connection on Chain B (ID #2).
func (suite *KeeperTestSuite) TestConnOpenAck() {
var (
- clientA string
- clientB string
+ path *ibctesting.Path
consensusHeight exported.Height
version *types.Version
counterpartyClient exported.ClientState
@@ -342,251 +324,237 @@ func (suite *KeeperTestSuite) TestConnOpenAck() {
expPass bool
}{
{"success", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
}, true},
{"success from tryopen", func() {
// chainA is in TRYOPEN, chainB is in TRYOPEN
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ err := path.EndpointB.ConnOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenTry(suite.chainA, suite.chainB, connA, connB)
+ err = path.EndpointA.ConnOpenTry()
suite.Require().NoError(err)
// set chainB to TRYOPEN
- connection := suite.chainB.GetConnection(connB)
+ connection := path.EndpointB.GetConnection()
connection.State = types.TRYOPEN
- connection.Counterparty.ConnectionId = connA.ID
- suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection)
- // update clientB so state change is committed
- suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ connection.Counterparty.ConnectionId = path.EndpointA.ConnectionID
+ suite.chainB.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainB.GetContext(), path.EndpointB.ConnectionID, connection)
+ // update path.EndpointB.ClientID so state change is committed
+ path.EndpointB.UpdateClient()
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
}, true},
{"invalid counterparty client", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
+ suite.Require().NoError(err)
+
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
// Set an invalid client of chainA on chainB
tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClient.ChainId = "wrongchainid"
- suite.chainB.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainB.GetContext(), clientB, tmClient)
+ suite.chainB.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainB.GetContext(), path.EndpointB.ClientID, tmClient)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
- suite.Require().NoError(err)
}, false},
{"consensus height >= latest height", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
consensusHeight = clienttypes.GetSelfHeight(suite.chainA.GetContext())
}, false},
{"connection not found", func() {
// connections are never created
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
}, false},
{"invalid counterparty connection ID", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
// modify connB to set counterparty connection identifier to wrong identifier
- connection, found := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID)
+ connection, found := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID)
suite.Require().True(found)
connection.Counterparty.ConnectionId = "badconnectionid"
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection)
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, connection)
- err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err = path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
+ err = path.EndpointB.UpdateClient()
suite.Require().NoError(err)
}, false},
{"connection state is not INIT", func() {
// connection state is already OPEN on chainA
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenAck(suite.chainA, suite.chainB, connA, connB)
+ err = path.EndpointA.ConnOpenAck()
suite.Require().NoError(err)
}, false},
{"connection is in INIT but the proposed version is invalid", func() {
// chainA is in INIT, chainB is in TRYOPEN
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
version = types.NewVersion("2.0", nil)
}, false},
{"connection is in TRYOPEN but the set version in the connection is invalid", func() {
// chainA is in TRYOPEN, chainB is in TRYOPEN
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ err := path.EndpointB.ConnOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenTry(suite.chainA, suite.chainB, connA, connB)
+ err = path.EndpointA.ConnOpenTry()
suite.Require().NoError(err)
// set chainB to TRYOPEN
- connection := suite.chainB.GetConnection(connB)
+ connection := path.EndpointB.GetConnection()
connection.State = types.TRYOPEN
- suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection)
+ suite.chainB.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainB.GetContext(), path.EndpointB.ConnectionID, connection)
- // update clientB so state change is committed
- suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ // update path.EndpointB.ClientID so state change is committed
+ path.EndpointB.UpdateClient()
+ path.EndpointA.UpdateClient()
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
version = types.NewVersion("2.0", nil)
}, false},
{"incompatible IBC versions", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
// set version to a non-compatible version
version = types.NewVersion("2.0", nil)
}, false},
{"empty version", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
version = &types.Version{}
}, false},
{"feature set verification failed - unsupported feature", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
version = types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED", "ORDER_UNORDERED", "ORDER_DAG"})
}, false},
{"self consensus state not found", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
consensusHeight = clienttypes.NewHeight(0, 1)
}, false},
{"connection state verification failed", func() {
// chainB connection is not in INIT
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
}, false},
{"client state verification failed", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
// modify counterparty client without setting in store so it still passes validate but fails proof verification
tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState)
suite.Require().True(ok)
tmClient.LatestHeight = tmClient.LatestHeight.Increment().(clienttypes.Height)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
}, false},
{"consensus state verification failed", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// retrieve client state of chainB to pass as counterpartyClient
- counterpartyClient = suite.chainB.GetClientState(clientB)
+ counterpartyClient = suite.chainB.GetClientState(path.EndpointB.ClientID)
// give chainB wrong consensus state for chainA
- consState, found := suite.chainB.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), clientB)
+ consState, found := suite.chainB.App.GetIBCKeeper().ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), path.EndpointB.ClientID)
suite.Require().True(found)
tmConsState, ok := consState.(*ibctmtypes.ConsensusState)
suite.Require().True(ok)
- tmConsState.Timestamp = time.Now()
- suite.chainB.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), clientB, counterpartyClient.GetLatestHeight(), tmConsState)
+ tmConsState.Timestamp = tmConsState.Timestamp.Add(time.Second)
+ suite.chainB.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), path.EndpointB.ClientID, counterpartyClient.GetLatestHeight(), tmConsState)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
+
}, false},
}
@@ -596,29 +564,32 @@ func (suite *KeeperTestSuite) TestConnOpenAck() {
suite.SetupTest() // reset
version = types.ExportedVersionsToProto(types.GetCompatibleVersions())[0] // must be explicitly changed in malleate
consensusHeight = clienttypes.ZeroHeight() // must be explicitly changed in malleate
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
tc.malleate()
- connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
- connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+ // ensure client is up to date to receive proof
+ err := path.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
- connectionKey := host.ConnectionKey(connB.ID)
+ connectionKey := host.ConnectionKey(path.EndpointB.ConnectionID)
proofTry, proofHeight := suite.chainB.QueryProof(connectionKey)
if consensusHeight.IsZero() {
// retrieve consensus state height to provide proof for
- clientState := suite.chainB.GetClientState(clientB)
+ clientState := suite.chainB.GetClientState(path.EndpointB.ClientID)
consensusHeight = clientState.GetLatestHeight()
}
- consensusKey := host.FullConsensusStateKey(clientB, consensusHeight)
+ consensusKey := host.FullConsensusStateKey(path.EndpointB.ClientID, consensusHeight)
proofConsensus, _ := suite.chainB.QueryProof(consensusKey)
// retrieve proof of counterparty clientstate on chainA
- clientKey := host.FullClientStateKey(clientB)
+ clientKey := host.FullClientStateKey(path.EndpointB.ClientID)
proofClient, _ := suite.chainB.QueryProof(clientKey)
- err := suite.chainA.App.IBCKeeper.ConnectionKeeper.ConnOpenAck(
- suite.chainA.GetContext(), connA.ID, counterpartyClient, version, connB.ID,
+ err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.ConnOpenAck(
+ suite.chainA.GetContext(), path.EndpointA.ConnectionID, counterpartyClient, version, path.EndpointB.ConnectionID,
proofTry, proofClient, proofConsensus, proofHeight, consensusHeight,
)
@@ -634,41 +605,35 @@ func (suite *KeeperTestSuite) TestConnOpenAck() {
// TestConnOpenConfirm - chainB calls ConnOpenConfirm to confirm that
// chainA state is now OPEN.
func (suite *KeeperTestSuite) TestConnOpenConfirm() {
- var (
- clientA string
- clientB string
- )
+ var path *ibctesting.Path
testCases := []struct {
msg string
malleate func()
expPass bool
}{
{"success", func() {
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenAck(suite.chainA, suite.chainB, connA, connB)
+ err = path.EndpointA.ConnOpenAck()
suite.Require().NoError(err)
}, true},
{"connection not found", func() {
// connections are never created
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
}, false},
{"chain B's connection state is not TRYOPEN", func() {
// connections are OPEN
- clientA, clientB, _, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.CreateConnections(path)
}, false},
{"connection state verification failed", func() {
// chainA is in INIT
- clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
suite.Require().NoError(err)
}, false},
}
@@ -678,17 +643,20 @@ func (suite *KeeperTestSuite) TestConnOpenConfirm() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
tc.malleate()
- connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
- connB := suite.chainB.GetFirstTestConnection(clientB, clientA)
+ // ensure client is up to date to receive proof
+ err := path.EndpointB.UpdateClient()
+ suite.Require().NoError(err)
- connectionKey := host.ConnectionKey(connA.ID)
+ connectionKey := host.ConnectionKey(path.EndpointA.ConnectionID)
proofAck, proofHeight := suite.chainA.QueryProof(connectionKey)
- err := suite.chainB.App.IBCKeeper.ConnectionKeeper.ConnOpenConfirm(
- suite.chainB.GetContext(), connB.ID, proofAck, proofHeight,
+ err = suite.chainB.App.GetIBCKeeper().ConnectionKeeper.ConnOpenConfirm(
+ suite.chainB.GetContext(), path.EndpointB.ConnectionID, proofAck, proofHeight,
)
if tc.expPass {
diff --git a/modules/core/03-connection/keeper/keeper_test.go b/modules/core/03-connection/keeper/keeper_test.go
index a6a6807e..d2eb86f6 100644
--- a/modules/core/03-connection/keeper/keeper_test.go
+++ b/modules/core/03-connection/keeper/keeper_test.go
@@ -7,7 +7,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/cosmos/ibc-go/modules/core/03-connection/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -32,62 +31,80 @@ func TestKeeperTestSuite(t *testing.T) {
}
func (suite *KeeperTestSuite) TestSetAndGetConnection() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- connA := suite.chainA.GetFirstTestConnection(clientA, clientB)
- _, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ firstConnection := "connection-0"
+
+ // check first connection does not exist
+ _, existed := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), firstConnection)
suite.Require().False(existed)
- suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
- _, existed = suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID)
+ suite.coordinator.CreateConnections(path)
+ _, existed = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), firstConnection)
suite.Require().True(existed)
}
func (suite *KeeperTestSuite) TestSetAndGetClientConnectionPaths() {
- clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- _, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), clientA)
+
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+
+ _, existed := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.False(existed)
connections := []string{"connectionA", "connectionB"}
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), clientA, connections)
- paths, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), clientA)
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), path.EndpointA.ClientID, connections)
+ paths, existed := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.True(existed)
suite.EqualValues(connections, paths)
}
// create 2 connections: A0 - B0, A1 - B1
func (suite KeeperTestSuite) TestGetAllConnections() {
- clientA, clientB, connA0, connB0 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- connA1, connB1 := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path1)
- counterpartyB0 := types.NewCounterparty(clientB, connB0.ID, suite.chainB.GetPrefix()) // connection B0
- counterpartyB1 := types.NewCounterparty(clientB, connB1.ID, suite.chainB.GetPrefix()) // connection B1
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path2.EndpointA.ClientID = path1.EndpointA.ClientID
+ path2.EndpointB.ClientID = path1.EndpointB.ClientID
- conn1 := types.NewConnectionEnd(types.OPEN, clientA, counterpartyB0, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A0 - B0
- conn2 := types.NewConnectionEnd(types.OPEN, clientA, counterpartyB1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A1 - B1
+ suite.coordinator.CreateConnections(path2)
- iconn1 := types.NewIdentifiedConnection(connA0.ID, conn1)
- iconn2 := types.NewIdentifiedConnection(connA1.ID, conn2)
+ counterpartyB0 := types.NewCounterparty(path1.EndpointB.ClientID, path1.EndpointB.ConnectionID, suite.chainB.GetPrefix()) // connection B0
+ counterpartyB1 := types.NewCounterparty(path2.EndpointB.ClientID, path2.EndpointB.ConnectionID, suite.chainB.GetPrefix()) // connection B1
+
+ conn1 := types.NewConnectionEnd(types.OPEN, path1.EndpointA.ClientID, counterpartyB0, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A0 - B0
+ conn2 := types.NewConnectionEnd(types.OPEN, path2.EndpointA.ClientID, counterpartyB1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A1 - B1
+
+ iconn1 := types.NewIdentifiedConnection(path1.EndpointA.ConnectionID, conn1)
+ iconn2 := types.NewIdentifiedConnection(path2.EndpointA.ConnectionID, conn2)
expConnections := []types.IdentifiedConnection{iconn1, iconn2}
- connections := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetAllConnections(suite.chainA.GetContext())
+ connections := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetAllConnections(suite.chainA.GetContext())
suite.Require().Len(connections, len(expConnections))
suite.Require().Equal(expConnections, connections)
}
-// the test creates 2 clients clientA0 and clientA1. clientA0 has a single
-// connection and clientA1 has 2 connections.
+// the test creates 2 clients path.EndpointA.ClientID0 and path.EndpointA.ClientID1. path.EndpointA.ClientID0 has a single
+// connection and path.EndpointA.ClientID1 has 2 connections.
func (suite KeeperTestSuite) TestGetAllClientConnectionPaths() {
- clientA0, _, connA0, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- clientA1, clientB1, connA1, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- connA2, _ := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA1, clientB1)
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path1)
+ suite.coordinator.SetupConnections(path2)
+
+ path3 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path3.EndpointA.ClientID = path2.EndpointA.ClientID
+ path3.EndpointB.ClientID = path2.EndpointB.ClientID
+ suite.coordinator.CreateConnections(path3)
expPaths := []types.ConnectionPaths{
- types.NewConnectionPaths(clientA0, []string{connA0.ID}),
- types.NewConnectionPaths(clientA1, []string{connA1.ID, connA2.ID}),
+ types.NewConnectionPaths(path1.EndpointA.ClientID, []string{path1.EndpointA.ConnectionID}),
+ types.NewConnectionPaths(path2.EndpointA.ClientID, []string{path2.EndpointA.ConnectionID, path3.EndpointA.ConnectionID}),
}
- connPaths := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetAllClientConnectionPaths(suite.chainA.GetContext())
+ connPaths := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetAllClientConnectionPaths(suite.chainA.GetContext())
suite.Require().Len(connPaths, 2)
suite.Require().Equal(expPaths, connPaths)
}
@@ -103,8 +120,9 @@ func (suite *KeeperTestSuite) TestGetTimestampAtHeight() {
expPass bool
}{
{"verification success", func() {
- _, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- connection = suite.chainA.GetConnection(connA)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ connection = path.EndpointA.GetConnection()
}, true},
{"consensus state not found", func() {
// any non-nil value of connection is valid
@@ -118,7 +136,7 @@ func (suite *KeeperTestSuite) TestGetTimestampAtHeight() {
tc.malleate()
- actualTimestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight(
+ actualTimestamp, err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetTimestampAtHeight(
suite.chainA.GetContext(), connection, suite.chainB.LastHeader.GetHeight(),
)
diff --git a/modules/core/03-connection/keeper/verify_test.go b/modules/core/03-connection/keeper/verify_test.go
index 9afd4816..2c63ea97 100644
--- a/modules/core/03-connection/keeper/verify_test.go
+++ b/modules/core/03-connection/keeper/verify_test.go
@@ -17,7 +17,7 @@ import (
var defaultTimeoutHeight = clienttypes.NewHeight(0, 100000)
// TestVerifyClientState verifies a client state of chainA
-// stored on clientB (which is on chainB)
+// stored on path.EndpointB (which is on chainB)
func (suite *KeeperTestSuite) TestVerifyClientState() {
cases := []struct {
msg string
@@ -38,9 +38,10 @@ func (suite *KeeperTestSuite) TestVerifyClientState() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
- _, clientB, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
- counterpartyClient, clientProof := suite.chainB.QueryClientStateProof(clientB)
+ counterpartyClient, clientProof := path.EndpointB.QueryClientStateProof()
proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1))
if tc.malleateCounterparty {
@@ -48,12 +49,12 @@ func (suite *KeeperTestSuite) TestVerifyClientState() {
tmClient.ChainId = "wrongChainID"
}
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
- err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyClientState(
+ err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyClientState(
suite.chainA.GetContext(), connection,
malleateHeight(proofHeight, tc.heightDiff), clientProof, counterpartyClient,
)
@@ -68,12 +69,11 @@ func (suite *KeeperTestSuite) TestVerifyClientState() {
}
// TestVerifyClientConsensusState verifies that the consensus state of
-// chainA stored on clientB (which is on chainB) matches the consensus
+// chainA stored on path.EndpointB.ClientID (which is on chainB) matches the consensus
// state for chainA at that height.
func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
+ path *ibctesting.Path
changeClientID bool
heightDiff uint64
)
@@ -83,32 +83,25 @@ func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
expPass bool
}{
{"verification success", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
}, true},
{"client state not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
-
changeClientID = true
}, false},
{"consensus state not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
-
heightDiff = 5
}, false},
{"verification failed", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- clientB := connB.ClientID
- clientState := suite.chainB.GetClientState(clientB)
+ clientState := suite.chainB.GetClientState(path.EndpointB.ClientID)
// give chainB wrong consensus state for chainA
- consState, found := suite.chainB.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), clientB)
+ consState, found := suite.chainB.App.GetIBCKeeper().ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), path.EndpointB.ClientID)
suite.Require().True(found)
tmConsState, ok := consState.(*ibctmtypes.ConsensusState)
suite.Require().True(ok)
tmConsState.Timestamp = time.Now()
- suite.chainB.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), clientB, clientState.GetLatestHeight(), tmConsState)
+ suite.chainB.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), path.EndpointB.ClientID, clientState.GetLatestHeight(), tmConsState)
suite.coordinator.CommitBlock(suite.chainB)
}, false},
@@ -121,20 +114,22 @@ func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
suite.SetupTest() // reset
heightDiff = 0 // must be explicitly changed in malleate
changeClientID = false // must be explicitly changed in malleate
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
tc.malleate()
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
if changeClientID {
connection.ClientId = ibctesting.InvalidID
}
- proof, consensusHeight := suite.chainB.QueryConsensusStateProof(connB.ClientID)
+ proof, consensusHeight := suite.chainB.QueryConsensusStateProof(path.EndpointB.ClientID)
proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1))
- consensusState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetSelfConsensusState(suite.chainA.GetContext(), consensusHeight)
+ consensusState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetSelfConsensusState(suite.chainA.GetContext(), consensusHeight)
suite.Require().True(found)
- err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyClientConsensusState(
+ err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyClientConsensusState(
suite.chainA.GetContext(), connection,
malleateHeight(proofHeight, heightDiff), consensusHeight, proof, consensusState,
)
@@ -170,24 +165,25 @@ func (suite *KeeperTestSuite) TestVerifyConnectionState() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
- expectedConnection := suite.chainB.GetConnection(connB)
+ expectedConnection := path.EndpointB.GetConnection()
- connectionKey := host.ConnectionKey(connB.ID)
+ connectionKey := host.ConnectionKey(path.EndpointB.ConnectionID)
proof, proofHeight := suite.chainB.QueryProof(connectionKey)
if tc.changeConnectionState {
expectedConnection.State = types.TRYOPEN
}
- err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyConnectionState(
+ err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyConnectionState(
suite.chainA.GetContext(), connection,
- malleateHeight(proofHeight, tc.heightDiff), proof, connB.ID, expectedConnection,
+ malleateHeight(proofHeight, tc.heightDiff), proof, path.EndpointB.ConnectionID, expectedConnection,
)
if tc.expPass {
@@ -221,23 +217,24 @@ func (suite *KeeperTestSuite) TestVerifyChannelState() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
- _, _, connA, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- connection := suite.chainA.GetConnection(connA)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ connection := path.EndpointA.GetConnection()
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
- channelKey := host.ChannelKey(channelB.PortID, channelB.ID)
+ channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
proof, proofHeight := suite.chainB.QueryProof(channelKey)
- channel := suite.chainB.GetChannel(channelB)
+ channel := path.EndpointB.GetChannel()
if tc.changeChannelState {
channel.State = channeltypes.TRYOPEN
}
- err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyChannelState(
+ err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyChannelState(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
- channelB.PortID, channelB.ID, channel,
+ path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel,
)
if tc.expPass {
@@ -275,16 +272,17 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
- _, clientB, _, connB, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- connection := suite.chainB.GetConnection(connB)
+ connection := path.EndpointB.GetConnection()
connection.DelayPeriod = tc.delayPeriod
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
commitmentKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
@@ -294,8 +292,8 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
packet.Data = []byte(ibctesting.InvalidID)
}
- commitment := channeltypes.CommitPacket(suite.chainB.App.IBCKeeper.Codec(), packet)
- err = suite.chainB.App.IBCKeeper.ConnectionKeeper.VerifyPacketCommitment(
+ commitment := channeltypes.CommitPacket(suite.chainB.App.GetIBCKeeper().Codec(), packet)
+ err = suite.chainB.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketCommitment(
suite.chainB.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment,
)
@@ -335,24 +333,25 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
- clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
connection.DelayPeriod = tc.delayPeriod
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
// send and receive packet
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// increment receiving chain's (chainB) time by 2 hour to always pass receive
suite.coordinator.IncrementTimeBy(time.Hour * 2)
suite.coordinator.CommitBlock(suite.chainB)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
packetAckKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
@@ -363,7 +362,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
ack = ibcmock.MockFailAcknowledgement
}
- err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketAcknowledgement(
+ err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketAcknowledgement(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack.Acknowledgement(),
)
@@ -403,17 +402,18 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
- clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
connection.DelayPeriod = tc.delayPeriod
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
// send, only receive if specified
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
if tc.recvAck {
@@ -421,18 +421,18 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
suite.coordinator.IncrementTimeBy(time.Hour * 2)
suite.coordinator.CommitBlock(suite.chainB)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
} else {
// need to update height to prove absence
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
}
packetReceiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetReceiptKey)
- err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketReceiptAbsence(
+ err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketReceiptAbsence(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
)
@@ -472,30 +472,31 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
suite.Run(tc.msg, func() {
suite.SetupTest() // reset
- clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
connection.DelayPeriod = tc.delayPeriod
if tc.changeClientID {
connection.ClientId = ibctesting.InvalidID
}
// send and receive packet
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// increment receiving chain's (chainB) time by 2 hour to always pass receive
suite.coordinator.IncrementTimeBy(time.Hour * 2)
suite.coordinator.CommitBlock(suite.chainB)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
proof, proofHeight := suite.chainB.QueryProof(nextSeqRecvKey)
- err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyNextSequenceRecv(
+ err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyNextSequenceRecv(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+tc.offsetSeq,
)
diff --git a/modules/core/04-channel/keeper/grpc_query_test.go b/modules/core/04-channel/keeper/grpc_query_test.go
index c8af41fe..88e3a717 100644
--- a/modules/core/04-channel/keeper/grpc_query_test.go
+++ b/modules/core/04-channel/keeper/grpc_query_test.go
@@ -62,16 +62,19 @@ func (suite *KeeperTestSuite) TestQueryChannel() {
{
"success",
func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
// init channel
- channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- expChannel = suite.chainA.GetChannel(channelA)
+ expChannel = path.EndpointA.GetChannel()
req = &types.QueryChannelRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
}
},
true,
@@ -126,31 +129,39 @@ func (suite *KeeperTestSuite) TestQueryChannels() {
{
"success",
func() {
- _, _, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// channel0 on first connection on chainA
counterparty0 := types.Counterparty{
- PortId: connB0.Channels[0].PortID,
- ChannelId: connB0.Channels[0].ID,
+ PortId: path.EndpointB.ChannelConfig.PortID,
+ ChannelId: path.EndpointB.ChannelID,
}
- // channel1 is second channel on first connection on chainA
- testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED)
+ // path1 creates a second channel on first connection on chainA
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path1.SetChannelOrdered()
+ path1.EndpointA.ClientID = path.EndpointA.ClientID
+ path1.EndpointB.ClientID = path.EndpointB.ClientID
+ path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID
+ path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID
+
+ suite.coordinator.CreateMockChannels(path1)
counterparty1 := types.Counterparty{
- PortId: connB0.Channels[1].PortID,
- ChannelId: connB0.Channels[1].ID,
+ PortId: path1.EndpointB.ChannelConfig.PortID,
+ ChannelId: path1.EndpointB.ChannelID,
}
channel0 := types.NewChannel(
types.OPEN, types.UNORDERED,
- counterparty0, []string{connA0.ID}, testchannel0.Version,
+ counterparty0, []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version,
)
channel1 := types.NewChannel(
types.OPEN, types.ORDERED,
- counterparty1, []string{connA0.ID}, testchannel1.Version,
+ counterparty1, []string{path.EndpointA.ConnectionID}, path1.EndpointA.ChannelConfig.Version,
)
- idCh0 := types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0)
- idCh1 := types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1)
+ idCh0 := types.NewIdentifiedChannel(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel0)
+ idCh1 := types.NewIdentifiedChannel(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, channel1)
expChannels = []*types.IdentifiedChannel{&idCh0, &idCh1}
@@ -217,36 +228,44 @@ func (suite *KeeperTestSuite) TestQueryConnectionChannels() {
{
"success",
func() {
- _, _, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// channel0 on first connection on chainA
counterparty0 := types.Counterparty{
- PortId: connB0.Channels[0].PortID,
- ChannelId: connB0.Channels[0].ID,
+ PortId: path.EndpointB.ChannelConfig.PortID,
+ ChannelId: path.EndpointB.ChannelID,
}
- // channel1 is second channel on first connection on chainA
- testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED)
+ // path1 creates a second channel on first connection on chainA
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path1.SetChannelOrdered()
+ path1.EndpointA.ClientID = path.EndpointA.ClientID
+ path1.EndpointB.ClientID = path.EndpointB.ClientID
+ path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID
+ path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID
+
+ suite.coordinator.CreateMockChannels(path1)
counterparty1 := types.Counterparty{
- PortId: connB0.Channels[1].PortID,
- ChannelId: connB0.Channels[1].ID,
+ PortId: path1.EndpointB.ChannelConfig.PortID,
+ ChannelId: path1.EndpointB.ChannelID,
}
channel0 := types.NewChannel(
types.OPEN, types.UNORDERED,
- counterparty0, []string{connA0.ID}, testchannel0.Version,
+ counterparty0, []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version,
)
channel1 := types.NewChannel(
types.OPEN, types.ORDERED,
- counterparty1, []string{connA0.ID}, testchannel1.Version,
+ counterparty1, []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version,
)
- idCh0 := types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0)
- idCh1 := types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1)
+ idCh0 := types.NewIdentifiedChannel(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel0)
+ idCh1 := types.NewIdentifiedChannel(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, channel1)
expChannels = []*types.IdentifiedChannel{&idCh0, &idCh1}
req = &types.QueryConnectionChannelsRequest{
- Connection: connA0.ID,
+ Connection: path.EndpointA.ConnectionID,
Pagination: &query.PageRequest{
Key: nil,
Limit: 2,
@@ -259,7 +278,8 @@ func (suite *KeeperTestSuite) TestQueryConnectionChannels() {
{
"success, empty response",
func() {
- suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expChannels = []*types.IdentifiedChannel{}
req = &types.QueryConnectionChannelsRequest{
Connection: "externalConnID",
@@ -345,49 +365,54 @@ func (suite *KeeperTestSuite) TestQueryChannelClientState() {
{
"connection not found",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- channel := suite.chainA.GetChannel(channelA)
+ channel := path.EndpointA.GetChannel()
// update channel to reference a connection that does not exist
channel.ConnectionHops[0] = "doesnotexist"
// set connection hops to wrong connection ID
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)
req = &types.QueryChannelClientStateRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
}
}, false,
},
{
"client state for channel's connection not found",
func() {
- _, _, connA, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// set connection to empty so clientID is empty
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connectiontypes.ConnectionEnd{})
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, connectiontypes.ConnectionEnd{})
req = &types.QueryChannelClientStateRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
}
}, false,
},
{
"success",
func() {
- clientA, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
// init channel
- channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- expClientState := suite.chainA.GetClientState(clientA)
- expIdentifiedClientState = clienttypes.NewIdentifiedClientState(clientA, expClientState)
+ expClientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ expIdentifiedClientState = clienttypes.NewIdentifiedClientState(path.EndpointA.ClientID, expClientState)
req = &types.QueryChannelClientStateRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
}
},
true,
@@ -476,18 +501,19 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() {
{
"connection not found",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- channel := suite.chainA.GetChannel(channelA)
+ channel := path.EndpointA.GetChannel()
// update channel to reference a connection that does not exist
channel.ConnectionHops[0] = "doesnotexist"
// set connection hops to wrong connection ID
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)
req = &types.QueryChannelConsensusStateRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
RevisionNumber: 0,
RevisionHeight: 1,
}
@@ -496,11 +522,12 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() {
{
"consensus state for channel's connection not found",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
req = &types.QueryChannelConsensusStateRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
RevisionNumber: 0,
RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height
}
@@ -509,19 +536,22 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() {
{
"success",
func() {
- clientA, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
// init channel
- channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- clientState := suite.chainA.GetClientState(clientA)
- expConsensusState, _ = suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ clientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ expConsensusState, _ = suite.chainA.GetConsensusState(path.EndpointA.ClientID, clientState.GetLatestHeight())
suite.Require().NotNil(expConsensusState)
- expClientID = clientA
+ expClientID = path.EndpointA.ClientID
req = &types.QueryChannelConsensusStateRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
RevisionNumber: clientState.GetLatestHeight().GetRevisionNumber(),
RevisionHeight: clientState.GetLatestHeight().GetRevisionHeight(),
}
@@ -620,13 +650,14 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() {
{
"success",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expCommitment = []byte("hash")
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, expCommitment)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expCommitment)
req = &types.QueryPacketCommitmentRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
Sequence: 1,
}
},
@@ -702,19 +733,20 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitments() {
{
"success",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expCommitments = make([]*types.PacketState, 9)
for i := uint64(0); i < 9; i++ {
- commitment := types.NewPacketState(channelA.PortID, channelA.ID, i, []byte(fmt.Sprintf("hash_%d", i)))
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data)
+ commitment := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, []byte(fmt.Sprintf("hash_%d", i)))
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data)
expCommitments[i] = &commitment
}
req = &types.QueryPacketCommitmentsRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
Pagination: &query.PageRequest{
Key: nil,
Limit: 11,
@@ -799,12 +831,13 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() {
{
"success: receipt not found",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1)
req = &types.QueryPacketReceiptRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
Sequence: 3,
}
expReceived = false
@@ -814,12 +847,13 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() {
{
"success: receipt found",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1)
req = &types.QueryPacketReceiptRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
Sequence: 1,
}
expReceived = true
@@ -911,13 +945,14 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() {
{
"success",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expAck = []byte("hash")
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, expAck)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expAck)
req = &types.QueryPacketAcknowledgementRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
Sequence: 1,
}
},
@@ -993,19 +1028,20 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() {
{
"success",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expAcknowledgements = make([]*types.PacketState, 9)
for i := uint64(0); i < 9; i++ {
- ack := types.NewPacketState(channelA.PortID, channelA.ID, i, []byte(fmt.Sprintf("hash_%d", i)))
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
+ ack := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, []byte(fmt.Sprintf("hash_%d", i)))
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
expAcknowledgements[i] = &ack
}
req = &types.QueryPacketAcknowledgementsRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
Pagination: &query.PageRequest{
Key: nil,
Limit: 11,
@@ -1089,14 +1125,15 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() {
{
"basic success unreceived packet commitments",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// no ack exists
expSeq = []uint64{1}
req = &types.QueryUnreceivedPacketsRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
PacketCommitmentSequences: []uint64{1},
}
},
@@ -1105,14 +1142,15 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() {
{
"basic success unreceived packet commitments, nothing to relay",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1)
expSeq = []uint64{}
req = &types.QueryUnreceivedPacketsRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
PacketCommitmentSequences: []uint64{1},
}
},
@@ -1121,7 +1159,8 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() {
{
"success multiple unreceived packet commitments",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expSeq = []uint64{} // reset
packetCommitments := []uint64{}
@@ -1130,15 +1169,15 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() {
packetCommitments = append(packetCommitments, seq)
if seq%2 == 0 {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, seq)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)
} else {
expSeq = append(expSeq, seq)
}
}
req = &types.QueryUnreceivedPacketsRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
PacketCommitmentSequences: packetCommitments,
}
},
@@ -1218,14 +1257,15 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() {
{
"basic success unreceived packet acks",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, []byte("commitment"))
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte("commitment"))
expSeq = []uint64{1}
req = &types.QueryUnreceivedAcksRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
PacketAckSequences: []uint64{1},
}
},
@@ -1234,12 +1274,13 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() {
{
"basic success unreceived packet acknowledgements, nothing to relay",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expSeq = []uint64{}
req = &types.QueryUnreceivedAcksRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
PacketAckSequences: []uint64{1},
}
},
@@ -1248,7 +1289,8 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() {
{
"success multiple unreceived packet acknowledgements",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expSeq = []uint64{} // reset
packetAcks := []uint64{}
@@ -1257,14 +1299,14 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() {
packetAcks = append(packetAcks, seq)
if seq%2 == 0 {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, seq, []byte("commitement"))
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, []byte("commitement"))
expSeq = append(expSeq, seq)
}
}
req = &types.QueryUnreceivedAcksRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
PacketAckSequences: packetAcks,
}
},
@@ -1342,13 +1384,14 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() {
{
"success",
func() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
expSeq = 1
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(suite.chainA.GetContext(), channelA.PortID, channelA.ID, expSeq)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, expSeq)
req = &types.QueryNextSequenceReceiveRequest{
- PortId: channelA.PortID,
- ChannelId: channelA.ID,
+ PortId: path.EndpointA.ChannelConfig.PortID,
+ ChannelId: path.EndpointA.ChannelID,
}
},
true,
diff --git a/modules/core/04-channel/keeper/handshake_test.go b/modules/core/04-channel/keeper/handshake_test.go
index a79de391..19535792 100644
--- a/modules/core/04-channel/keeper/handshake_test.go
+++ b/modules/core/04-channel/keeper/handshake_test.go
@@ -24,66 +24,65 @@ type testCase = struct {
// can succeed.
func (suite *KeeperTestSuite) TestChanOpenInit() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
+ path *ibctesting.Path
features []string
portCap *capabilitytypes.Capability
)
testCases := []testCase{
{"success", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupConnections(path)
features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"}
- suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
- portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ suite.chainA.CreatePortCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainA.GetPortCapability(ibctesting.MockPort)
}, true},
{"channel already exists", func() {
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
}, false},
{"connection doesn't exist", func() {
- // any non-nil values of connA and connB are acceptable
- suite.Require().NotNil(connA)
- suite.Require().NotNil(connB)
+ // any non-empty values
+ path.EndpointA.ConnectionID = "connection-0"
+ path.EndpointB.ConnectionID = "connection-0"
}, false},
{"capability is incorrect", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupConnections(path)
features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"}
portCap = capabilitytypes.NewCapability(3)
}, false},
{"connection version not negotiated", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupConnections(path)
// modify connA versions
- conn := suite.chainA.GetConnection(connA)
+ conn := path.EndpointA.GetConnection()
version := connectiontypes.NewVersion("2", []string{"ORDER_ORDERED", "ORDER_UNORDERED"})
conn.Versions = append(conn.Versions, version)
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(
suite.chainA.GetContext(),
- connA.ID, conn,
+ path.EndpointA.ConnectionID, conn,
)
features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"}
- suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
- portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ suite.chainA.CreatePortCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainA.GetPortCapability(ibctesting.MockPort)
}, false},
{"connection does not support ORDERED channels", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupConnections(path)
// modify connA versions to only support UNORDERED channels
- conn := suite.chainA.GetConnection(connA)
+ conn := path.EndpointA.GetConnection()
version := connectiontypes.NewVersion("1", []string{"ORDER_UNORDERED"})
conn.Versions = []*connectiontypes.Version{version}
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(
suite.chainA.GetContext(),
- connA.ID, conn,
+ path.EndpointA.ConnectionID, conn,
)
// NOTE: Opening UNORDERED channels is still expected to pass but ORDERED channels should fail
features = []string{"ORDER_UNORDERED"}
- suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
- portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ suite.chainA.CreatePortCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainA.GetPortCapability(ibctesting.MockPort)
}, true},
}
@@ -93,14 +92,17 @@ func (suite *KeeperTestSuite) TestChanOpenInit() {
// run test for all types of ordering
for _, order := range []types.Order{types.UNORDERED, types.ORDERED} {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ path.EndpointA.ChannelConfig.Order = order
+ path.EndpointB.ChannelConfig.Order = order
+
tc.malleate()
- counterparty := types.NewCounterparty(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID, connB.FirstOrNextTestChannel(ibctesting.MockPort).ID)
- channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+ counterparty := types.NewCounterparty(ibctesting.MockPort, ibctesting.FirstChannelID)
- channelID, cap, err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanOpenInit(
- suite.chainA.GetContext(), order, []string{connA.ID},
- channelA.PortID, portCap, counterparty, channelA.Version,
+ channelID, cap, err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenInit(
+ suite.chainA.GetContext(), path.EndpointA.ChannelConfig.Order, []string{path.EndpointA.ConnectionID},
+ path.EndpointA.ChannelConfig.PortID, portCap, counterparty, path.EndpointA.ChannelConfig.Version,
)
// check if order is supported by channel to determine expected behaviour
@@ -118,9 +120,9 @@ func (suite *KeeperTestSuite) TestChanOpenInit() {
suite.Require().NotNil(cap)
suite.Require().Equal(types.FormatChannelIdentifier(0), channelID)
- chanCap, ok := suite.chainA.App.ScopedIBCKeeper.GetCapability(
+ chanCap, ok := suite.chainA.App.GetScopedIBCKeeper().GetCapability(
suite.chainA.GetContext(),
- host.ChannelCapabilityPath(channelA.PortID, channelA.ID),
+ host.ChannelCapabilityPath(path.EndpointA.ChannelConfig.PortID, channelID),
)
suite.Require().True(ok, "could not retrieve channel capability after successful ChanOpenInit")
suite.Require().Equal(chanCap.String(), cap.String(), "channel capability is not correct")
@@ -140,8 +142,7 @@ func (suite *KeeperTestSuite) TestChanOpenInit() {
// ChanOpenTry can succeed.
func (suite *KeeperTestSuite) TestChanOpenTry() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
+ path *ibctesting.Path
previousChannelID string
portCap *capabilitytypes.Capability
heightDiff uint64
@@ -149,98 +150,102 @@ func (suite *KeeperTestSuite) TestChanOpenTry() {
testCases := []testCase{
{"success", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ path.EndpointA.ChanOpenInit()
- suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
- portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ suite.chainB.CreatePortCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
}, true},
{"success with crossing hello", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- _, channelB, err := suite.coordinator.ChanOpenInitOnBothChains(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ err := suite.coordinator.ChanOpenInitOnBothChains(path)
suite.Require().NoError(err)
- previousChannelID = channelB.ID
- portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ previousChannelID = path.EndpointB.ChannelID
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
}, true},
{"previous channel with invalid state", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupConnections(path)
// make previous channel have wrong ordering
- suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.UNORDERED)
+ path.EndpointA.ChanOpenInit()
}, false},
{"connection doesn't exist", func() {
- // any non-nil values of connA and connB are acceptable
- suite.Require().NotNil(connA)
- suite.Require().NotNil(connB)
+ path.EndpointA.ConnectionID = ibctesting.FirstConnectionID
+ path.EndpointB.ConnectionID = ibctesting.FirstConnectionID
// pass capability check
- suite.chainB.CreatePortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
- portCap = suite.chainB.GetPortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
+ suite.chainB.CreatePortCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
}, false},
{"connection is not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
// pass capability check
- suite.chainB.CreatePortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
- portCap = suite.chainB.GetPortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID)
+ suite.chainB.CreatePortCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
- var err error
- connB, connA, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ err := path.EndpointB.ConnOpenInit()
suite.Require().NoError(err)
}, false},
{"consensus state not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ path.EndpointA.ChanOpenInit()
- suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
- portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ suite.chainB.CreatePortCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
heightDiff = 3 // consensus state doesn't exist at this height
}, false},
{"channel verification failed", func() {
// not creating a channel on chainA will result in an invalid proof of existence
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ suite.coordinator.SetupConnections(path)
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
}, false},
{"port capability not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ path.EndpointA.ChanOpenInit()
portCap = capabilitytypes.NewCapability(3)
}, false},
{"connection version not negotiated", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ path.EndpointA.ChanOpenInit()
// modify connB versions
- conn := suite.chainB.GetConnection(connB)
+ conn := path.EndpointB.GetConnection()
version := connectiontypes.NewVersion("2", []string{"ORDER_ORDERED", "ORDER_UNORDERED"})
conn.Versions = append(conn.Versions, version)
- suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainB.App.GetIBCKeeper().ConnectionKeeper.SetConnection(
suite.chainB.GetContext(),
- connB.ID, conn,
+ path.EndpointB.ConnectionID, conn,
)
- suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
- portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID)
+ suite.chainB.CreatePortCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainB.GetPortCapability(ibctesting.MockPort)
}, false},
{"connection does not support ORDERED channels", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ path.EndpointA.ChanOpenInit()
// modify connA versions to only support UNORDERED channels
- conn := suite.chainA.GetConnection(connA)
+ conn := path.EndpointA.GetConnection()
version := connectiontypes.NewVersion("1", []string{"ORDER_UNORDERED"})
conn.Versions = []*connectiontypes.Version{version}
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(
suite.chainA.GetContext(),
- connA.ID, conn,
+ path.EndpointA.ConnectionID, conn,
)
- suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
- portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID)
+ suite.chainA.CreatePortCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, ibctesting.MockPort)
+ portCap = suite.chainA.GetPortCapability(ibctesting.MockPort)
}, false},
}
@@ -250,18 +255,24 @@ func (suite *KeeperTestSuite) TestChanOpenTry() {
suite.SetupTest() // reset
heightDiff = 0 // must be explicitly changed in malleate
previousChannelID = ""
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
- channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
- counterparty := types.NewCounterparty(channelA.PortID, channelA.ID)
+
+ if path.EndpointB.ClientID != "" {
+ // ensure client is up to date
+ err := path.EndpointB.UpdateClient()
+ suite.Require().NoError(err)
+ }
+
+ counterparty := types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)
channelKey := host.ChannelKey(counterparty.PortId, counterparty.ChannelId)
proof, proofHeight := suite.chainA.QueryProof(channelKey)
- channelID, cap, err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanOpenTry(
- suite.chainB.GetContext(), types.ORDERED, []string{connB.ID},
- channelB.PortID, previousChannelID, portCap, counterparty, channelB.Version, connA.FirstOrNextTestChannel(ibctesting.MockPort).Version,
+ channelID, cap, err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.ChanOpenTry(
+ suite.chainB.GetContext(), types.ORDERED, []string{path.EndpointB.ConnectionID},
+ path.EndpointB.ChannelConfig.PortID, previousChannelID, portCap, counterparty, path.EndpointB.ChannelConfig.Version, path.EndpointA.ChannelConfig.Version,
proof, malleateHeight(proofHeight, heightDiff),
)
@@ -269,9 +280,9 @@ func (suite *KeeperTestSuite) TestChanOpenTry() {
suite.Require().NoError(err)
suite.Require().NotNil(cap)
- chanCap, ok := suite.chainB.App.ScopedIBCKeeper.GetCapability(
+ chanCap, ok := suite.chainB.App.GetScopedIBCKeeper().GetCapability(
suite.chainB.GetContext(),
- host.ChannelCapabilityPath(channelB.PortID, channelID),
+ host.ChannelCapabilityPath(path.EndpointB.ChannelConfig.PortID, channelID),
)
suite.Require().True(ok, "could not retrieve channel capapbility after successful ChanOpenTry")
suite.Require().Equal(chanCap.String(), cap.String(), "channel capability is not correct")
@@ -287,8 +298,7 @@ func (suite *KeeperTestSuite) TestChanOpenTry() {
// call is occurring on chainA.
func (suite *KeeperTestSuite) TestChanOpenAck() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
+ path *ibctesting.Path
counterpartyChannelID string
channelCap *capabilitytypes.Capability
heightDiff uint64
@@ -296,111 +306,122 @@ func (suite *KeeperTestSuite) TestChanOpenAck() {
testCases := []testCase{
{"success", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"success with empty stored counterparty channel ID", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
// set the channel's counterparty channel identifier to empty string
- channel := suite.chainA.GetChannel(channelA)
+ channel := path.EndpointA.GetChannel()
channel.Counterparty.ChannelId = ""
// use a different channel identifier
- counterpartyChannelID = channelB.ID
+ counterpartyChannelID = path.EndpointB.ChannelID
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"channel doesn't exist", func() {}, false},
{"channel state is not INIT or TRYOPEN", func() {
// create fully open channels on both chains
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelA := connA.Channels[0]
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
// set the channel's connection hops to wrong connection ID
- channel := suite.chainA.GetChannel(channelA)
+ channel := path.EndpointA.GetChannel()
channel.ConnectionHops[0] = "doesnotexist"
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)
}, false},
{"connection is not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
- var err error
- connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// create channel in init
- channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ path.SetChannelOrdered()
+
+ err = path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"consensus state not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
heightDiff = 3 // consensus state doesn't exist at this height
}, false},
{"invalid counterparty channel identifier", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
counterpartyChannelID = "otheridentifier"
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel verification failed", func() {
// chainB is INIT, chainA in TRYOPEN
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelB, channelA, err := suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointB.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainA, suite.chainB, channelA, channelB, connA, types.ORDERED)
+ err = path.EndpointA.ChanOpenTry()
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel capability not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ path.EndpointB.ChanOpenTry()
channelCap = capabilitytypes.NewCapability(6)
}, false},
@@ -412,21 +433,25 @@ func (suite *KeeperTestSuite) TestChanOpenAck() {
suite.SetupTest() // reset
counterpartyChannelID = "" // must be explicitly changed in malleate
heightDiff = 0 // must be explicitly changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
- channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
-
if counterpartyChannelID == "" {
- counterpartyChannelID = channelB.ID
+ counterpartyChannelID = ibctesting.FirstChannelID
+ }
+
+ if path.EndpointA.ClientID != "" {
+ // ensure client is up to date
+ err := path.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
}
- channelKey := host.ChannelKey(channelB.PortID, channelB.ID)
+ channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)
proof, proofHeight := suite.chainB.QueryProof(channelKey)
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanOpenAck(
- suite.chainA.GetContext(), channelA.PortID, channelA.ID, channelCap, channelB.Version, counterpartyChannelID,
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenAck(
+ suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channelCap, path.EndpointB.ChannelConfig.Version, counterpartyChannelID,
proof, malleateHeight(proofHeight, heightDiff),
)
@@ -444,95 +469,102 @@ func (suite *KeeperTestSuite) TestChanOpenAck() {
// call is occurring on chainB.
func (suite *KeeperTestSuite) TestChanOpenConfirm() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
+ path *ibctesting.Path
channelCap *capabilitytypes.Capability
heightDiff uint64
)
testCases := []testCase{
{"success", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ err = path.EndpointA.ChanOpenAck()
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, true},
{"channel doesn't exist", func() {}, false},
{"channel state is not TRYOPEN", func() {
// create fully open channels on both cahins
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelB := connB.Channels[0]
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"connection not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ err = path.EndpointA.ChanOpenAck()
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
// set the channel's connection hops to wrong connection ID
- channel := suite.chainB.GetChannel(channelB)
+ channel := path.EndpointB.GetChannel()
channel.ConnectionHops[0] = "doesnotexist"
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), channelB.PortID, channelB.ID, channel)
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel)
}, false},
{"connection is not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
- var err error
- connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ err := path.EndpointB.ConnOpenInit()
suite.Require().NoError(err)
- channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
- suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+
+ suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)
}, false},
{"consensus state not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ err = path.EndpointA.ChanOpenAck()
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
heightDiff = 3
}, false},
{"channel verification failed", func() {
// chainA is INIT, chainB in TRYOPEN
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel capability not found", func() {
- _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ suite.coordinator.SetupConnections(path)
+ path.SetChannelOrdered()
+
+ err := path.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED)
+ err = path.EndpointB.ChanOpenTry()
suite.Require().NoError(err)
- err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB)
+ err = path.EndpointA.ChanOpenAck()
suite.Require().NoError(err)
channelCap = capabilitytypes.NewCapability(6)
@@ -544,17 +576,22 @@ func (suite *KeeperTestSuite) TestChanOpenConfirm() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
heightDiff = 0 // must be explicitly changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
- channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort)
+ if path.EndpointB.ClientID != "" {
+ // ensure client is up to date
+ err := path.EndpointB.UpdateClient()
+ suite.Require().NoError(err)
+
+ }
- channelKey := host.ChannelKey(channelA.PortID, channelA.ID)
+ channelKey := host.ChannelKey(path.EndpointA.ChannelConfig.PortID, ibctesting.FirstChannelID)
proof, proofHeight := suite.chainA.QueryProof(channelKey)
- err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanOpenConfirm(
- suite.chainB.GetContext(), channelB.PortID, channelB.ID,
+ err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.ChanOpenConfirm(
+ suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID,
channelCap, proof, malleateHeight(proofHeight, heightDiff),
)
@@ -571,62 +608,60 @@ func (suite *KeeperTestSuite) TestChanOpenConfirm() {
// ChanCloseInit. Both chains will use message passing to setup OPEN channels.
func (suite *KeeperTestSuite) TestChanCloseInit() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
+ path *ibctesting.Path
channelCap *capabilitytypes.Capability
)
testCases := []testCase{
{"success", func() {
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelA := connA.Channels[0]
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"channel doesn't exist", func() {
// any non-nil values work for connections
- suite.Require().NotNil(connA)
- suite.Require().NotNil(connB)
- channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
+ path.EndpointA.ConnectionID = ibctesting.FirstConnectionID
+ path.EndpointB.ConnectionID = ibctesting.FirstConnectionID
+
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
+ path.EndpointB.ChannelID = ibctesting.FirstChannelID
// ensure channel capability check passes
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel state is CLOSED", func() {
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelA := connA.Channels[0]
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
// close channel
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
}, false},
{"connection not found", func() {
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelA := connA.Channels[0]
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
// set the channel's connection hops to wrong connection ID
- channel := suite.chainA.GetChannel(channelA)
+ channel := path.EndpointA.GetChannel()
channel.ConnectionHops[0] = "doesnotexist"
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel)
}, false},
{"connection is not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
- var err error
- connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
// create channel in init
- channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ path.SetChannelOrdered()
+ err = path.EndpointA.ChanOpenInit()
// ensure channel capability check passes
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel capability not found", func() {
- _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
channelCap = capabilitytypes.NewCapability(3)
}, false},
}
@@ -635,13 +670,12 @@ func (suite *KeeperTestSuite) TestChanCloseInit() {
tc := tc
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort)
-
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanCloseInit(
- suite.chainA.GetContext(), channelA.PortID, channelA.ID, channelCap,
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanCloseInit(
+ suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, ibctesting.FirstChannelID, channelCap,
)
if tc.expPass {
@@ -658,82 +692,78 @@ func (suite *KeeperTestSuite) TestChanCloseInit() {
// bypassed on chainA by setting the channel state in the ChannelKeeper.
func (suite *KeeperTestSuite) TestChanCloseConfirm() {
var (
- connA *ibctesting.TestConnection
- connB *ibctesting.TestConnection
- channelA ibctesting.TestChannel
- channelB ibctesting.TestChannel
+ path *ibctesting.Path
channelCap *capabilitytypes.Capability
heightDiff uint64
)
testCases := []testCase{
{"success", func() {
- _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
}, true},
{"channel doesn't exist", func() {
// any non-nil values work for connections
- suite.Require().NotNil(connA)
- suite.Require().NotNil(connB)
- channelB = connB.FirstOrNextTestChannel(ibctesting.MockPort)
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
+ path.EndpointB.ChannelID = ibctesting.FirstChannelID
// ensure channel capability check passes
- suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID)
}, false},
{"channel state is CLOSED", func() {
- _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
- err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ err := path.EndpointB.SetChannelClosed()
suite.Require().NoError(err)
}, false},
{"connection not found", func() {
- _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
// set the channel's connection hops to wrong connection ID
- channel := suite.chainB.GetChannel(channelB)
+ channel := path.EndpointB.GetChannel()
channel.ConnectionHops[0] = "doesnotexist"
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), channelB.PortID, channelB.ID, channel)
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel)
}, false},
{"connection is not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
- var err error
- connB, connA, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ err := path.EndpointB.ConnOpenInit()
suite.Require().NoError(err)
// create channel in init
- channelB, _, err := suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ path.SetChannelOrdered()
+ err = path.EndpointB.ChanOpenInit()
suite.Require().NoError(err)
// ensure channel capability check passes
- suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"consensus state not found", func() {
- _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
heightDiff = 3
}, false},
{"channel verification failed", func() {
// channel not closed
- _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel capability not found", func() {
- _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
channelCap = capabilitytypes.NewCapability(3)
@@ -745,17 +775,15 @@ func (suite *KeeperTestSuite) TestChanCloseConfirm() {
suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
suite.SetupTest() // reset
heightDiff = 0 // must explicitly be changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- channelA = connA.FirstOrNextTestChannel(ibctesting.MockPort)
- channelB = connB.FirstOrNextTestChannel(ibctesting.MockPort)
-
- channelKey := host.ChannelKey(channelA.PortID, channelA.ID)
+ channelKey := host.ChannelKey(path.EndpointA.ChannelConfig.PortID, ibctesting.FirstChannelID)
proof, proofHeight := suite.chainA.QueryProof(channelKey)
- err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanCloseConfirm(
- suite.chainB.GetContext(), channelB.PortID, channelB.ID, channelCap,
+ err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.ChanCloseConfirm(
+ suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, ibctesting.FirstChannelID, channelCap,
proof, malleateHeight(proofHeight, heightDiff),
)
diff --git a/modules/core/04-channel/keeper/keeper_test.go b/modules/core/04-channel/keeper/keeper_test.go
index 531cbb50..74899f4a 100644
--- a/modules/core/04-channel/keeper/keeper_test.go
+++ b/modules/core/04-channel/keeper/keeper_test.go
@@ -6,7 +6,6 @@ import (
"github.com/stretchr/testify/suite"
"github.com/cosmos/ibc-go/modules/core/04-channel/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
ibctesting "github.com/cosmos/ibc-go/testing"
)
@@ -40,20 +39,22 @@ func (suite *KeeperTestSuite) SetupTest() {
// and existence of a channel in INIT on chainA.
func (suite *KeeperTestSuite) TestSetChannel() {
// create client and connections on both chains
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
// check for channel to be created on chainA
- channelA := suite.chainA.NextTestChannel(connA, ibctesting.MockPort)
- _, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID)
+ _, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.False(found)
+ path.SetChannelOrdered()
+
// init channel
- channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED)
+ err := path.EndpointA.ChanOpenInit()
suite.NoError(err)
- storedChannel, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID)
+ storedChannel, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
// counterparty channel id is empty after open init
- expectedCounterparty := types.NewCounterparty(channelB.PortID, "")
+ expectedCounterparty := types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, "")
suite.True(found)
suite.Equal(types.INIT, storedChannel.State)
@@ -64,54 +65,63 @@ func (suite *KeeperTestSuite) TestSetChannel() {
// TestGetAllChannels creates multiple channels on chain A through various connections
// and tests their retrieval. 2 channels are on connA0 and 1 channel is on connA1
func (suite KeeperTestSuite) TestGetAllChannels() {
- clientA, clientB, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// channel0 on first connection on chainA
counterparty0 := types.Counterparty{
- PortId: connB0.Channels[0].PortID,
- ChannelId: connB0.Channels[0].ID,
+ PortId: path.EndpointB.ChannelConfig.PortID,
+ ChannelId: path.EndpointB.ChannelID,
}
- // channel1 is second channel on first connection on chainA
- testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED)
+ // path1 creates a second channel on first connection on chainA
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path1.SetChannelOrdered()
+ path1.EndpointA.ClientID = path.EndpointA.ClientID
+ path1.EndpointB.ClientID = path.EndpointB.ClientID
+ path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID
+ path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID
+
+ suite.coordinator.CreateMockChannels(path1)
counterparty1 := types.Counterparty{
- PortId: connB0.Channels[1].PortID,
- ChannelId: connB0.Channels[1].ID,
+ PortId: path1.EndpointB.ChannelConfig.PortID,
+ ChannelId: path1.EndpointB.ChannelID,
}
- connA1, connB1 := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB)
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path2)
- // channel2 is on a second connection on chainA
- testchannel2, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA1, connB1, ibctesting.MockPort, ibctesting.MockPort, types.UNORDERED)
+ // path2 creates a second channel on chainA
+ err := path2.EndpointA.ChanOpenInit()
suite.Require().NoError(err)
// counterparty channel id is empty after open init
counterparty2 := types.Counterparty{
- PortId: connB1.Channels[0].PortID,
+ PortId: path2.EndpointB.ChannelConfig.PortID,
ChannelId: "",
}
channel0 := types.NewChannel(
types.OPEN, types.UNORDERED,
- counterparty0, []string{connA0.ID}, testchannel0.Version,
+ counterparty0, []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version,
)
channel1 := types.NewChannel(
types.OPEN, types.ORDERED,
- counterparty1, []string{connA0.ID}, testchannel1.Version,
+ counterparty1, []string{path1.EndpointA.ConnectionID}, path1.EndpointA.ChannelConfig.Version,
)
channel2 := types.NewChannel(
types.INIT, types.UNORDERED,
- counterparty2, []string{connA1.ID}, testchannel2.Version,
+ counterparty2, []string{path2.EndpointA.ConnectionID}, path2.EndpointA.ChannelConfig.Version,
)
expChannels := []types.IdentifiedChannel{
- types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0),
- types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1),
- types.NewIdentifiedChannel(testchannel2.PortID, testchannel2.ID, channel2),
+ types.NewIdentifiedChannel(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel0),
+ types.NewIdentifiedChannel(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, channel1),
+ types.NewIdentifiedChannel(path2.EndpointA.ChannelConfig.PortID, path2.EndpointA.ChannelID, channel2),
}
ctxA := suite.chainA.GetContext()
- channels := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllChannels(ctxA)
+ channels := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllChannels(ctxA)
suite.Require().Len(channels, len(expChannels))
suite.Require().Equal(expChannels, channels)
}
@@ -119,12 +129,21 @@ func (suite KeeperTestSuite) TestGetAllChannels() {
// TestGetAllSequences sets all packet sequences for two different channels on chain A and
// tests their retrieval.
func (suite KeeperTestSuite) TestGetAllSequences() {
- _, _, connA, connB, channelA0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path1.SetChannelOrdered()
+ path1.EndpointA.ClientID = path.EndpointA.ClientID
+ path1.EndpointB.ClientID = path.EndpointB.ClientID
+ path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID
+ path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID
+
+ suite.coordinator.CreateMockChannels(path1)
- seq1 := types.NewPacketSequence(channelA0.PortID, channelA0.ID, 1)
- seq2 := types.NewPacketSequence(channelA0.PortID, channelA0.ID, 2)
- seq3 := types.NewPacketSequence(channelA1.PortID, channelA1.ID, 3)
+ seq1 := types.NewPacketSequence(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1)
+ seq2 := types.NewPacketSequence(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 2)
+ seq3 := types.NewPacketSequence(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, 3)
// seq1 should be overwritten by seq2
expSeqs := []types.PacketSequence{seq2, seq3}
@@ -132,14 +151,14 @@ func (suite KeeperTestSuite) TestGetAllSequences() {
ctxA := suite.chainA.GetContext()
for _, seq := range []types.PacketSequence{seq1, seq2, seq3} {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(ctxA, seq.PortId, seq.ChannelId, seq.Sequence)
}
- sendSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketSendSeqs(ctxA)
- recvSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketRecvSeqs(ctxA)
- ackSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketAckSeqs(ctxA)
+ sendSeqs := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketSendSeqs(ctxA)
+ recvSeqs := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketRecvSeqs(ctxA)
+ ackSeqs := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketAckSeqs(ctxA)
suite.Len(sendSeqs, 2)
suite.Len(recvSeqs, 2)
suite.Len(ackSeqs, 2)
@@ -152,35 +171,43 @@ func (suite KeeperTestSuite) TestGetAllSequences() {
// TestGetAllPacketState creates a set of acks, packet commitments, and receipts on two different
// channels on chain A and tests their retrieval.
func (suite KeeperTestSuite) TestGetAllPacketState() {
- _, _, connA, connB, channelA0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path1.EndpointA.ClientID = path.EndpointA.ClientID
+ path1.EndpointB.ClientID = path.EndpointB.ClientID
+ path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID
+ path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID
+
+ suite.coordinator.CreateMockChannels(path1)
// channel 0 acks
- ack1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte("ack"))
- ack2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("ack"))
+ ack1 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte("ack"))
+ ack2 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 2, []byte("ack"))
// duplicate ack
- ack2dup := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("ack"))
+ ack2dup := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 2, []byte("ack"))
// channel 1 acks
- ack3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte("ack"))
+ ack3 := types.NewPacketState(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, 1, []byte("ack"))
// create channel 0 receipts
receipt := string([]byte{byte(1)})
- rec1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte(receipt))
- rec2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte(receipt))
+ rec1 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte(receipt))
+ rec2 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 2, []byte(receipt))
// channel 1 receipts
- rec3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte(receipt))
- rec4 := types.NewPacketState(channelA1.PortID, channelA1.ID, 2, []byte(receipt))
+ rec3 := types.NewPacketState(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, 1, []byte(receipt))
+ rec4 := types.NewPacketState(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, 2, []byte(receipt))
// channel 0 packet commitments
- comm1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte("hash"))
- comm2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("hash"))
+ comm1 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte("hash"))
+ comm2 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 2, []byte("hash"))
// channel 1 packet commitments
- comm3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte("hash"))
- comm4 := types.NewPacketState(channelA1.PortID, channelA1.ID, 2, []byte("hash"))
+ comm3 := types.NewPacketState(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, 1, []byte("hash"))
+ comm4 := types.NewPacketState(path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, 2, []byte("hash"))
expAcks := []types.PacketState{ack1, ack2, ack3}
expReceipts := []types.PacketState{rec1, rec2, rec3, rec4}
@@ -190,22 +217,22 @@ func (suite KeeperTestSuite) TestGetAllPacketState() {
// set acknowledgements
for _, ack := range []types.PacketState{ack1, ack2, ack2dup, ack3} {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(ctxA, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data)
}
// set packet receipts
for _, rec := range expReceipts {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(ctxA, rec.PortId, rec.ChannelId, rec.Sequence)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(ctxA, rec.PortId, rec.ChannelId, rec.Sequence)
}
// set packet commitments
for _, comm := range expCommitments {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, comm.PortId, comm.ChannelId, comm.Sequence, comm.Data)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, comm.PortId, comm.ChannelId, comm.Sequence, comm.Data)
}
- acks := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketAcks(ctxA)
- receipts := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketReceipts(ctxA)
- commitments := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketCommitments(ctxA)
+ acks := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketAcks(ctxA)
+ receipts := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketReceipts(ctxA)
+ commitments := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitments(ctxA)
suite.Require().Len(acks, len(expAcks))
suite.Require().Len(commitments, len(expCommitments))
@@ -218,40 +245,41 @@ func (suite KeeperTestSuite) TestGetAllPacketState() {
// TestSetSequence verifies that the keeper correctly sets the sequence counters.
func (suite *KeeperTestSuite) TestSetSequence() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
ctxA := suite.chainA.GetContext()
one := uint64(1)
// initialized channel has next send seq of 1
- seq, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID)
+ seq, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.True(found)
suite.Equal(one, seq)
// initialized channel has next seq recv of 1
- seq, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceRecv(ctxA, channelA.PortID, channelA.ID)
+ seq, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.True(found)
suite.Equal(one, seq)
// initialized channel has next seq ack of
- seq, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(ctxA, channelA.PortID, channelA.ID)
+ seq, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.True(found)
suite.Equal(one, seq)
nextSeqSend, nextSeqRecv, nextSeqAck := uint64(10), uint64(10), uint64(10)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(ctxA, channelA.PortID, channelA.ID, nextSeqSend)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(ctxA, channelA.PortID, channelA.ID, nextSeqRecv)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(ctxA, channelA.PortID, channelA.ID, nextSeqAck)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqSend)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqRecv)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqAck)
- storedNextSeqSend, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID)
+ storedNextSeqSend, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.True(found)
suite.Equal(nextSeqSend, storedNextSeqSend)
- storedNextSeqRecv, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID)
+ storedNextSeqRecv, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.True(found)
suite.Equal(nextSeqRecv, storedNextSeqRecv)
- storedNextSeqAck, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(ctxA, channelA.PortID, channelA.ID)
+ storedNextSeqAck, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.True(found)
suite.Equal(nextSeqAck, storedNextSeqAck)
}
@@ -261,10 +289,18 @@ func (suite *KeeperTestSuite) TestSetSequence() {
// value of "seq" and then add non-consecutive up to the value of "maxSeq". A final commitment
// with the value maxSeq + 1 is set on a different channel.
func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {
- _, _, connA, connB, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
// create second channel
- channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED)
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path1.SetChannelOrdered()
+ path1.EndpointA.ClientID = path.EndpointA.ClientID
+ path1.EndpointB.ClientID = path.EndpointB.ClientID
+ path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID
+ path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID
+
+ suite.coordinator.CreateMockChannels(path1)
ctxA := suite.chainA.GetContext()
expectedSeqs := make(map[uint64]bool)
@@ -276,20 +312,20 @@ func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {
// create consecutive commitments
for i := uint64(1); i < seq; i++ {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA.PortID, channelA.ID, i, hash)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash)
expectedSeqs[i] = true
}
// add non-consecutive commitments
for i := seq; i < maxSeq; i += 2 {
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA.PortID, channelA.ID, i, hash)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash)
expectedSeqs[i] = true
}
// add sequence on different channel/port
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA1.PortID, channelA1.ID, maxSeq+1, hash)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, maxSeq+1, hash)
- commitments := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, channelA.PortID, channelA.ID)
+ commitments := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
suite.Equal(len(expectedSeqs), len(commitments))
// ensure above for loops occurred
@@ -298,8 +334,8 @@ func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {
// verify that all the packet commitments were stored
for _, packet := range commitments {
suite.True(expectedSeqs[packet.Sequence])
- suite.Equal(channelA.PortID, packet.PortId)
- suite.Equal(channelA.ID, packet.ChannelId)
+ suite.Equal(path.EndpointA.ChannelConfig.PortID, packet.PortId)
+ suite.Equal(path.EndpointA.ChannelID, packet.ChannelId)
suite.Equal(hash, packet.Data)
// prevent duplicates from passing checks
@@ -310,20 +346,21 @@ func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() {
// TestSetPacketAcknowledgement verifies that packet acknowledgements are correctly
// set in the keeper.
func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() {
- _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
ctxA := suite.chainA.GetContext()
seq := uint64(10)
- storedAckHash, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq)
+ storedAckHash, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)
suite.Require().False(found)
suite.Require().Nil(storedAckHash)
ackHash := []byte("ackhash")
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq, ackHash)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, ackHash)
- storedAckHash, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq)
+ storedAckHash, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)
suite.Require().True(found)
suite.Require().Equal(ackHash, storedAckHash)
- suite.Require().True(suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq))
+ suite.Require().True(suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq))
}
diff --git a/modules/core/04-channel/keeper/packet_test.go b/modules/core/04-channel/keeper/packet_test.go
index 9a503e34..4916ae9d 100644
--- a/modules/core/04-channel/keeper/packet_test.go
+++ b/modules/core/04-channel/keeper/packet_test.go
@@ -31,143 +31,143 @@ var (
// TestSendPacket tests SendPacket from chainA to chainB
func (suite *KeeperTestSuite) TestSendPacket() {
var (
+ path *ibctesting.Path
packet exported.PacketI
channelCap *capabilitytypes.Capability
)
testCases := []testCase{
{"success: UNORDERED channel", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"success: ORDERED channel", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"sending packet out of order on UNORDERED channel", func() {
// setup creates an unordered channel
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 5, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"sending packet out of order on ORDERED channel", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 5, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet basic validation failed, empty packet data", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket([]byte{}, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket([]byte{}, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel closed", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
}, false},
{"packet dest port ≠ channel counterparty port", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong port for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet dest channel ID ≠ channel counterparty channel ID", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong channel for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not found", func() {
- channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
- channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
// pass channel check
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{connIDA}, path.EndpointA.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"client state not found", func() {
- _, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// change connection client ID
- connection := suite.chainA.GetConnection(connA)
+ connection := path.EndpointA.GetConnection()
connection.ClientId = ibctesting.InvalidID
- suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection)
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, connection)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"client state is frozen", func() {
- _, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
- connection := suite.chainA.GetConnection(connA)
- clientState := suite.chainA.GetClientState(connection.ClientId)
+ connection := path.EndpointA.GetConnection()
+ clientState := path.EndpointA.GetClientState()
cs, ok := clientState.(*ibctmtypes.ClientState)
suite.Require().True(ok)
// freeze client
cs.FrozenHeight = clienttypes.NewHeight(0, 1)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"timeout height passed", func() {
- clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use client state latest height for timeout
- clientState := suite.chainA.GetClientState(clientA)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clientState.GetLatestHeight().(clienttypes.Height), disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ clientState := path.EndpointA.GetClientState()
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clientState.GetLatestHeight().(clienttypes.Height), disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"timeout timestamp passed", func() {
- clientA, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use latest time on client state
- clientState := suite.chainA.GetClientState(clientA)
- connection := suite.chainA.GetConnection(connA)
- timestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight(suite.chainA.GetContext(), connection, clientState.GetLatestHeight())
+ clientState := path.EndpointA.GetClientState()
+ connection := path.EndpointA.GetConnection()
+ timestamp, err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetTimestampAtHeight(suite.chainA.GetContext(), connection, clientState.GetLatestHeight())
suite.Require().NoError(err)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, timestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, disabledTimeoutHeight, timestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"next sequence send not found", func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// manually creating channel prevents next sequence from being set
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version),
)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"next sequence wrong", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 5)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 5)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"channel capability not found", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = capabilitytypes.NewCapability(5)
}, false},
}
@@ -176,10 +176,11 @@ func (suite *KeeperTestSuite) TestSendPacket() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.SendPacket(suite.chainA.GetContext(), channelCap, packet)
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.SendPacket(suite.chainA.GetContext(), channelCap, packet)
if tc.expPass {
suite.Require().NoError(err)
@@ -196,161 +197,162 @@ func (suite *KeeperTestSuite) TestSendPacket() {
// verification tests need to simulate sending a packet from chainA to chainB.
func (suite *KeeperTestSuite) TestRecvPacket() {
var (
+ path *ibctesting.Path
packet exported.PacketI
channelCap *capabilitytypes.Capability
)
testCases := []testCase{
{"success: ORDERED channel", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, true},
{"success UNORDERED channel", func() {
// setup uses an UNORDERED channel
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, true},
{"success with out of order packet: UNORDERED channel", func() {
// setup uses an UNORDERED channel
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// send 2 packets
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// set sequence to 2
- packet = types.NewPacket(ibctesting.MockPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ packet = types.NewPacket(ibctesting.MockPacketData, 2, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err = path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// attempts to receive packet 2 without receiving packet 1
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, true},
{"out of order packet failure with ORDERED channel", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// send 2 packets
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// set sequence to 2
- packet = types.NewPacket(ibctesting.MockPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ packet = types.NewPacket(ibctesting.MockPacketData, 2, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err = path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// attempts to receive packet 2 without receiving packet 1
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel not open", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ err := path.EndpointB.SetChannelClosed()
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
- {"capability cannot authenticate", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ {"capability cannot authenticate ORDERED", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
channelCap = capabilitytypes.NewCapability(3)
}, false},
{"packet source port ≠ channel counterparty port", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong port for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"packet source channel ID ≠ channel counterparty channel ID", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong port for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"connection not found", func() {
- channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
- channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
// pass channel check
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainB.GetContext(),
- channelB.PortID, channelB.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version),
+ path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{connIDB}, path.EndpointB.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"connection not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
+
// connection on chainB is in INIT
- connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA)
+ err := path.EndpointB.ConnOpenInit()
suite.Require().NoError(err)
- channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
// pass channel check
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainB.GetContext(),
- channelB.PortID, channelB.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version),
+ path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointB.ConnectionID}, path.EndpointB.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"timeout height passed", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"timeout timestamp passed", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"next receive sequence is not found", func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
// manually creating channel prevents next recv sequence from being set
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainB.GetContext(),
- channelB.PortID, channelB.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version),
+ path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointB.ConnectionID}, path.EndpointB.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// manually set packet commitment
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.MockPacketData)
- suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), ibctesting.MockPacketData)
+ suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"receipt already stored", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), channelB.PortID, channelB.ID, 1)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, 1)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"validation failed", func() {
// packet commitment not set resulting in invalid proof
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
}
@@ -358,21 +360,23 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+
tc.malleate()
// get proof of packet commitment from chainA
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
proof, proofHeight := suite.chainA.QueryProof(packetKey)
- err := suite.chainB.App.IBCKeeper.ChannelKeeper.RecvPacket(suite.chainB.GetContext(), channelCap, packet, proof, proofHeight)
+ err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacket(suite.chainB.GetContext(), channelCap, packet, proof, proofHeight)
if tc.expPass {
suite.Require().NoError(err)
- channelB, _ := suite.chainB.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel())
- nextSeqRecv, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetNextSequenceRecv(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel())
+ channelB, _ := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel())
+ nextSeqRecv, found := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel())
suite.Require().True(found)
- receipt, receiptStored := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketReceipt(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ receipt, receiptStored := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetPacketReceipt(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
if channelB.Ordering == types.ORDERED {
suite.Require().Equal(packet.GetSequence()+1, nextSeqRecv, "sequence not incremented in ordered channel")
@@ -392,6 +396,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
var (
+ path *ibctesting.Path
ack []byte
packet exported.PacketI
channelCap *capabilitytypes.Capability
@@ -401,34 +406,34 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
{
"success",
func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
ack = ibctesting.MockAcknowledgement
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
},
true,
},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
ack = ibctesting.MockAcknowledgement
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel not open", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
ack = ibctesting.MockAcknowledgement
- err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ err := path.EndpointB.SetChannelClosed()
suite.Require().NoError(err)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{
"capability authentication failed",
func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
ack = ibctesting.MockAcknowledgement
channelCap = capabilitytypes.NewCapability(3)
},
@@ -437,21 +442,21 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
{
"no-op, already acked",
func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
ack = ibctesting.MockAcknowledgement
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack)
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
},
false,
},
{
"empty acknowledgement",
func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
ack = nil
- channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
},
false,
},
@@ -460,10 +465,11 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- err := suite.chainB.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(suite.chainB.GetContext(), channelCap, packet, ack)
+ err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.WriteAcknowledgement(suite.chainB.GetContext(), channelCap, packet, ack)
if tc.expPass {
suite.Require().NoError(err)
@@ -477,6 +483,7 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() {
// TestAcknowledgePacket tests the call AcknowledgePacket on chainA.
func (suite *KeeperTestSuite) TestAcknowledgePacket() {
var (
+ path *ibctesting.Path
packet types.Packet
ack = ibcmock.MockAcknowledgement
@@ -485,150 +492,148 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
testCases := []testCase{
{"success on ordered channel", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// create packet receipt and acknowledgement
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"success on unordered channel", func() {
// setup uses an UNORDERED channel
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// create packet receipt and acknowledgement
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"channel not open", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
- {"capability authentication failed", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ {"capability authentication failed ORDERED", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// create packet receipt and acknowledgement
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
channelCap = capabilitytypes.NewCapability(3)
}, false},
{"packet destination port ≠ channel counterparty port", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong port for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet destination channel ID ≠ channel counterparty channel ID", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong channel for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not found", func() {
- channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
- channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
// pass channel check
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainB.GetContext(),
- channelB.PortID, channelB.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version),
+ path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{connIDB}, path.EndpointB.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not OPEN", func() {
- clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
// connection on chainA is in INIT
- connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
suite.Require().NoError(err)
- channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
// pass channel check
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet hasn't been sent", func() {
// packet commitment never written
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet ack verification failed", func() {
// ack never written
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ path.EndpointA.SendPacket(packet)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"next ack sequence not found", func() {
- _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint)
- channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort)
- channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupConnections(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// manually creating channel prevents next sequence acknowledgement from being set
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version),
)
// manually set packet commitment
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.MockPacketData)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), ibctesting.MockPacketData)
// manually set packet acknowledgement and capability
- suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), channelB.PortID, channelB.ID, packet.GetSequence(), ibctesting.MockAcknowledgement)
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
- }, false},
- {"next ack sequence mismatch", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, packet.GetSequence(), ibctesting.MockAcknowledgement)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ }, false},
+ {"next ack sequence mismatch ORDERED", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// create packet acknowledgement
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
// set next sequence ack wrong
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 10)
- channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 10)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
}
@@ -636,16 +641,18 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+
tc.malleate()
packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack.Acknowledgement(), proof, proofHeight)
- pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack.Acknowledgement(), proof, proofHeight)
+ pc := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
- channelA, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
- sequenceAck, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
+ channelA, _ := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
+ sequenceAck, _ := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel())
if tc.expPass {
suite.NoError(err)
diff --git a/modules/core/04-channel/keeper/timeout_test.go b/modules/core/04-channel/keeper/timeout_test.go
index 94c4b6a0..ab6c4e49 100644
--- a/modules/core/04-channel/keeper/timeout_test.go
+++ b/modules/core/04-channel/keeper/timeout_test.go
@@ -16,6 +16,7 @@ import (
// verification must specify which proof to use using the ordered bool.
func (suite *KeeperTestSuite) TestTimeoutPacket() {
var (
+ path *ibctesting.Path
packet types.Packet
nextSeqRecv uint64
ordered bool
@@ -24,92 +25,98 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
testCases := []testCase{
{"success: ORDERED", func() {
ordered = true
+ path.SetChannelOrdered()
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
// need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
}, true},
{"success: UNORDERED", func() {
ordered = false
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
// need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
}, true},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"channel not open", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
- err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA)
+ err := path.EndpointA.SetChannelClosed()
suite.Require().NoError(err)
}, false},
{"packet destination port ≠ channel counterparty port", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong port for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"packet destination channel ID ≠ channel counterparty channel ID", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong channel for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"connection not found", func() {
- channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
- channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
// pass channel check
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{connIDA}, path.EndpointA.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"timeout", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ path.EndpointA.UpdateClient()
}, false},
{"packet already received ", func() {
ordered = true
+ path.SetChannelOrdered()
+
nextSeqRecv = 2
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ path.EndpointA.UpdateClient()
}, false},
{"packet hasn't been sent", func() {
- clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ ordered = true
+ path.SetChannelOrdered()
+
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.UpdateClient()
}, false},
{"next seq receive verification failed", func() {
// set ordered to false resulting in wrong proof provided
ordered = false
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.SetChannelOrdered()
+
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ path.EndpointA.UpdateClient()
}, false},
{"packet ack verification failed", func() {
// set ordered to true resulting in wrong proof provided
ordered = true
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ path.EndpointA.UpdateClient()
}, false},
}
@@ -123,6 +130,8 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
suite.SetupTest() // reset
nextSeqRecv = 1 // must be explicitly changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+
tc.malleate()
orderedPacketKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
@@ -134,7 +143,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
proof, proofHeight = suite.chainB.QueryProof(unorderedPacketKey)
}
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutPacket(suite.chainA.GetContext(), packet, proof, proofHeight, nextSeqRecv)
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutPacket(suite.chainA.GetContext(), packet, proof, proofHeight, nextSeqRecv)
if tc.expPass {
suite.Require().NoError(err)
@@ -149,27 +158,32 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
// channel capabilities are verified.
func (suite *KeeperTestSuite) TestTimeoutExecuted() {
var (
+ path *ibctesting.Path
packet types.Packet
chanCap *capabilitytypes.Capability
)
testCases := []testCase{
{"success ORDERED", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
- {"incorrect capability", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ {"incorrect capability ORDERED", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
chanCap = capabilitytypes.NewCapability(100)
}, false},
@@ -179,11 +193,12 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutExecuted(suite.chainA.GetContext(), chanCap, packet)
- pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutExecuted(suite.chainA.GetContext(), chanCap, packet)
+ pc := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
if tc.expPass {
suite.NoError(err)
@@ -199,6 +214,7 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() {
// channel on chainB after the packet commitment has been created.
func (suite *KeeperTestSuite) TestTimeoutOnClose() {
var (
+ path *ibctesting.Path
packet types.Packet
chanCap *capabilitytypes.Capability
nextSeqRecv uint64
@@ -208,110 +224,119 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
testCases := []testCase{
{"success: ORDERED", func() {
ordered = true
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ path.EndpointB.SetChannelClosed()
// need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"success: UNORDERED", func() {
ordered = false
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ path.EndpointB.SetChannelClosed()
// need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
{"channel not found", func() {
// use wrong channel naming
- _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"packet dest port ≠ channel counterparty port", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong port for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet dest channel ID ≠ channel counterparty channel ID", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
+ suite.coordinator.Setup(path)
// use wrong channel for dest
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not found", func() {
- channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA}
- channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB}
// pass channel check
- suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
- channelA.PortID, channelA.ID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{connIDA}, path.EndpointA.ChannelConfig.Version),
)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create chancap
- suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
- {"packet hasn't been sent", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ {"packet hasn't been sent ORDERED", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
- {"packet already received", func() {
+ {"packet already received ORDERED", func() {
+ path.SetChannelOrdered()
nextSeqRecv = 2
ordered = true
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ path.EndpointB.SetChannelClosed()
// need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
- {"channel verification failed", func() {
+ {"channel verification failed ORDERED", func() {
ordered = true
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
- {"next seq receive verification failed", func() {
+ {"next seq receive verification failed ORDERED", func() {
// set ordered to false providing the wrong proof for ORDERED case
ordered = false
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ path.EndpointB.SetChannelClosed()
+ path.EndpointA.UpdateClient()
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet ack verification failed", func() {
// set ordered to true providing the wrong proof for UNORDERED case
ordered = true
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
- chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID)
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ path.EndpointB.SetChannelClosed()
+ path.EndpointA.UpdateClient()
+ chanCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
- {"channel capability not found", func() {
+ {"channel capability not found ORDERED", func() {
ordered = true
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
- suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ path.EndpointB.SetChannelClosed()
// need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
chanCap = capabilitytypes.NewCapability(100)
}, false},
@@ -324,6 +349,8 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
suite.SetupTest() // reset
nextSeqRecv = 1 // must be explicitly changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+
tc.malleate()
channelKey := host.ChannelKey(packet.GetDestPort(), packet.GetDestChannel())
@@ -338,7 +365,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() {
proof, _ = suite.chainB.QueryProof(unorderedPacketKey)
}
- err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutOnClose(suite.chainA.GetContext(), chanCap, packet, proof, proofClosed, proofHeight, nextSeqRecv)
+ err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutOnClose(suite.chainA.GetContext(), chanCap, packet, proof, proofClosed, proofHeight, nextSeqRecv)
if tc.expPass {
suite.Require().NoError(err)
diff --git a/modules/core/genesis_test.go b/modules/core/genesis_test.go
index 0e17f6cd..924fb42e 100644
--- a/modules/core/genesis_test.go
+++ b/modules/core/genesis_test.go
@@ -8,7 +8,6 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/ibc-go/testing/simapp"
ibc "github.com/cosmos/ibc-go/modules/core"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
@@ -19,6 +18,7 @@ import (
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
localhosttypes "github.com/cosmos/ibc-go/modules/light-clients/09-localhost/types"
ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
@@ -331,10 +331,10 @@ func (suite *IBCTestSuite) TestExportGenesis() {
"success",
func() {
// creates clients
- suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ suite.coordinator.Setup(ibctesting.NewPath(suite.chainA, suite.chainB))
// create extra clients
- suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
- suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(ibctesting.NewPath(suite.chainA, suite.chainB))
+ suite.coordinator.SetupClients(ibctesting.NewPath(suite.chainA, suite.chainB))
},
},
}
@@ -347,23 +347,23 @@ func (suite *IBCTestSuite) TestExportGenesis() {
var gs *types.GenesisState
suite.NotPanics(func() {
- gs = ibc.ExportGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper)
+ gs = ibc.ExportGenesis(suite.chainA.GetContext(), *suite.chainA.App.GetIBCKeeper())
})
// init genesis based on export
suite.NotPanics(func() {
- ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper, true, gs)
+ ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.GetIBCKeeper(), true, gs)
})
suite.NotPanics(func() {
- cdc := codec.NewProtoCodec(suite.chainA.App.InterfaceRegistry())
+ cdc := codec.NewProtoCodec(suite.chainA.GetSimApp().InterfaceRegistry())
genState := cdc.MustMarshalJSON(gs)
cdc.MustUnmarshalJSON(genState, gs)
})
// init genesis based on marshal and unmarshal
suite.NotPanics(func() {
- ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper, true, gs)
+ ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.GetIBCKeeper(), true, gs)
})
})
}
diff --git a/modules/core/keeper/msg_server_test.go b/modules/core/keeper/msg_server_test.go
index 97d2c4ab..5660c32d 100644
--- a/modules/core/keeper/msg_server_test.go
+++ b/modules/core/keeper/msg_server_test.go
@@ -40,9 +40,12 @@ func (suite *KeeperTestSuite) SetupTest() {
suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+
+ // TODO: remove
// commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)
suite.coordinator.CommitNBlocks(suite.chainA, 2)
suite.coordinator.CommitNBlocks(suite.chainB, 2)
+
}
func TestIBCTestSuite(t *testing.T) {
@@ -57,6 +60,7 @@ func TestIBCTestSuite(t *testing.T) {
func (suite *KeeperTestSuite) TestHandleRecvPacket() {
var (
packet channeltypes.Packet
+ path *ibctesting.Path
async bool // indicate no ack written
)
@@ -67,62 +71,65 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
expRevert bool
}{
{"success: ORDERED", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}, true, false},
{"success: UNORDERED", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}, true, false},
{"success: UNORDERED out of order packet", func() {
// setup uses an UNORDERED channel
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ suite.coordinator.Setup(path)
// attempts to receive packet with sequence 10 without receiving packet with sequence 1
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}
}, true, false},
{"success: OnRecvPacket callback returns revert=true", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockFailPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockFailPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}, true, true},
{"success: ORDERED - async acknowledgement", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
async = true
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}, true, false},
{"success: UNORDERED - async acknowledgement", func() {
+ suite.coordinator.Setup(path)
async = true
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}, true, false},
{"failure: ORDERED out of order packet", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
// attempts to receive packet with sequence 10 without receiving packet with sequence 1
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}
}, false, false},
@@ -131,28 +138,28 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
suite.Require().NotNil(packet)
}, false, false},
{"packet not sent", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
}, false, false},
{"ORDERED: packet already received (replay)", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
}, false, false},
{"UNORDERED: packet already received (replay)", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
-
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
}, false, false},
}
@@ -163,6 +170,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
async = false // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
@@ -172,17 +180,17 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress().String())
- _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
+ _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
if tc.expPass {
suite.Require().NoError(err)
// replay should fail since state changes occur
- _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
+ _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainB.GetContext()), msg)
suite.Require().Error(err)
// check that callback state was handled correctly
- _, exists := suite.chainB.App.ScopedIBCMockKeeper.GetCapability(suite.chainB.GetContext(), ibctesting.MockCanaryCapabilityName)
+ _, exists := suite.chainB.GetSimApp().ScopedIBCMockKeeper.GetCapability(suite.chainB.GetContext(), ibctesting.MockCanaryCapabilityName)
if tc.expRevert {
suite.Require().False(exists, "capability exists in store even after callback reverted")
} else {
@@ -190,7 +198,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
}
// verify if ack was written
- ack, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ ack, found := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
if async {
suite.Require().Nil(ack)
@@ -215,6 +223,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() {
func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
var (
packet channeltypes.Packet
+ path *ibctesting.Path
)
testCases := []struct {
@@ -223,51 +232,53 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
expPass bool
}{
{"success: ORDERED", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
}, true},
{"success: UNORDERED", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
}, true},
{"success: UNORDERED acknowledge out of order packet", func() {
// setup uses an UNORDERED channel
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ suite.coordinator.Setup(path)
// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment)
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
}
}, true},
{"failure: ORDERED acknowledge out of order packet", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
// attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment
for i := uint64(1); i < 10; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
}
}, false},
@@ -276,37 +287,37 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
suite.Require().NotNil(packet)
}, false},
{"packet not received", func() {
- _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}, false},
{"ORDERED: packet already acknowledged (replay)", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.MockAcknowledgement)
+ err = path.EndpointA.AcknowledgePacket(packet, ibctesting.MockAcknowledgement)
suite.Require().NoError(err)
}, false},
{"UNORDERED: packet already acknowledged (replay)", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ suite.coordinator.Setup(path)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
- err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.MockAcknowledgement)
+ err = path.EndpointA.AcknowledgePacket(packet, ibctesting.MockAcknowledgement)
suite.Require().NoError(err)
}, false},
}
@@ -316,6 +327,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
@@ -324,17 +336,17 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() {
msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement.Acknowledgement(), proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
- _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
if tc.expPass {
suite.Require().NoError(err)
// replay should an error
- _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
suite.Require().Error(err)
// verify packet commitment was deleted on source chain
- has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ has := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
suite.Require().False(has)
} else {
@@ -353,6 +365,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
var (
packet channeltypes.Packet
packetKey []byte
+ path *ibctesting.Path
)
testCases := []struct {
@@ -361,62 +374,64 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
expPass bool
}{
{"success: ORDERED", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// need to update chainA client to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
}, true},
{"success: UNORDERED", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// need to update chainA client to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
}, true},
{"success: UNORDERED timeout out of order packet", func() {
// setup uses an UNORDERED channel
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
+ suite.coordinator.Setup(path)
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
}, true},
{"success: ORDERED timeout out of order packet", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
}, true},
@@ -427,8 +442,8 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
}, false},
{"UNORDERED: packet not sent", func() {
- _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
}, false},
}
@@ -438,6 +453,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
@@ -445,17 +461,17 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
- _, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.Timeout(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
if tc.expPass {
suite.Require().NoError(err)
// replay should return an error
- _, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.Timeout(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
suite.Require().Error(err)
// verify packet commitment was deleted on source chain
- has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ has := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
suite.Require().False(has)
} else {
@@ -472,9 +488,9 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() {
//'TimeoutExecuted' can be found in the 04-channel/keeper/timeout_test.go.
func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
var (
- packet channeltypes.Packet
- packetKey []byte
- counterpartyChannel ibctesting.TestChannel
+ packet channeltypes.Packet
+ packetKey []byte
+ path *ibctesting.Path
)
testCases := []struct {
@@ -483,95 +499,77 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
expPass bool
}{
{"success: ORDERED", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
- counterpartyChannel = ibctesting.TestChannel{
- PortID: channelB.PortID,
- ID: channelB.ID,
- CounterpartyClientID: clientA,
- }
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// need to update chainA client to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
// close counterparty channel
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ path.EndpointB.SetChannelClosed()
}, true},
{"success: UNORDERED", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
- counterpartyChannel = ibctesting.TestChannel{
- PortID: channelB.PortID,
- ID: channelB.ID,
- CounterpartyClientID: clientA,
- }
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// need to update chainA client to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
// close counterparty channel
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ path.EndpointB.SetChannelClosed()
}, true},
{"success: UNORDERED timeout out of order packet", func() {
// setup uses an UNORDERED channel
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- counterpartyChannel = ibctesting.TestChannel{
- PortID: channelB.PortID,
- ID: channelB.ID,
- CounterpartyClientID: clientA,
- }
+ suite.coordinator.Setup(path)
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
// close counterparty channel
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ path.EndpointB.SetChannelClosed()
}, true},
{"success: ORDERED timeout out of order packet", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- counterpartyChannel = ibctesting.TestChannel{
- PortID: channelB.PortID,
- ID: channelB.ID,
- CounterpartyClientID: clientA,
- }
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
// attempts to timeout the last packet sent without timing out the first packet
// packet sequences begin at 1
for i := uint64(1); i < maxSequence; i++ {
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, i, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
}
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
// close counterparty channel
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ path.EndpointB.SetChannelClosed()
}, true},
{"channel does not exist", func() {
// any non-nil value of packet is valid
@@ -580,33 +578,24 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
}, false},
{"UNORDERED: packet not sent", func() {
- clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
packetKey = host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
- counterpartyChannel = ibctesting.TestChannel{
- PortID: channelB.PortID,
- ID: channelB.ID,
- CounterpartyClientID: clientA,
- }
// close counterparty channel
- suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel)
+ path.EndpointB.SetChannelClosed()
}, false},
{"ORDERED: channel not closed", func() {
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)
- counterpartyChannel = ibctesting.TestChannel{
- PortID: channelB.PortID,
- ID: channelB.ID,
- CounterpartyClientID: clientA,
- }
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0)
// create packet commitment
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// need to update chainA client to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ path.EndpointA.UpdateClient()
packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
}, false},
@@ -617,27 +606,28 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
proof, proofHeight := suite.chainB.QueryProof(packetKey)
- channelKey := host.ChannelKey(counterpartyChannel.PortID, counterpartyChannel.ID)
+ channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
proofClosed, _ := suite.chainB.QueryProof(channelKey)
msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, proofClosed, proofHeight, suite.chainA.SenderAccount.GetAddress().String())
- _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
if tc.expPass {
suite.Require().NoError(err)
// replay should return an error
- _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
suite.Require().Error(err)
// verify packet commitment was deleted on source chain
- has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ has := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
suite.Require().False(has)
} else {
@@ -649,7 +639,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() {
func (suite *KeeperTestSuite) TestUpgradeClient() {
var (
- clientA string
+ path *ibctesting.Path
upgradedClient exported.ClientState
upgradedConsState exported.ConsensusState
lastHeight exported.Height
@@ -684,21 +674,21 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
suite.Require().NoError(err)
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err = path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradeClient, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
proofUpgradedConsState, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
- msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState,
+ msg, err = clienttypes.NewMsgUpgradeClient(path.EndpointA.ClientID, upgradedClient, upgradedConsState,
proofUpgradeClient, proofUpgradedConsState, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
@@ -725,15 +715,15 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
suite.Require().NoError(err)
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err = path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress().String())
+ msg, err = clienttypes.NewMsgUpgradeClient(path.EndpointA.ClientID, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress().String())
suite.Require().NoError(err)
},
expPass: false,
@@ -742,15 +732,16 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
for _, tc := range cases {
tc := tc
- clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
tc.setup()
- _, err := keeper.Keeper.UpgradeClient(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
+ _, err := keeper.Keeper.UpgradeClient(*suite.chainA.App.GetIBCKeeper(), sdk.WrapSDKContext(suite.chainA.GetContext()), msg)
if tc.expPass {
suite.Require().NoError(err, "upgrade handler failed on valid case: %s", tc.name)
- newClient, ok := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ newClient, ok := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(ok)
newChainSpecifiedClient := newClient.ZeroCustomFields()
suite.Require().Equal(upgradedClient, newChainSpecifiedClient)
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index 654ab1ba..6bab40ff 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -119,7 +119,7 @@ func (suite *SoloMachineTestSuite) TestInitialize() {
for _, tc := range testCases {
err := solomachine.ClientState().Initialize(
suite.chainA.GetContext(), suite.chainA.Codec,
- suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "solomachine"),
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "solomachine"),
tc.consState,
)
@@ -134,8 +134,9 @@ func (suite *SoloMachineTestSuite) TestInitialize() {
func (suite *SoloMachineTestSuite) TestVerifyClientState() {
// create client for tendermint so we can use client state for verification
- clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- clientState := suite.chainA.GetClientState(clientA)
+ tmPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(tmPath)
+ clientState := suite.chainA.GetClientState(tmPath.EndpointA.ClientID)
path := suite.solomachine.GetClientStatePath(counterpartyClientIdentifier)
// test singlesig and multisig public keys
@@ -261,9 +262,10 @@ func (suite *SoloMachineTestSuite) TestVerifyClientState() {
func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
// create client for tendermint so we can use consensus state for verification
- clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- clientState := suite.chainA.GetClientState(clientA)
- consensusState, found := suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight())
+ tmPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(tmPath)
+ clientState := suite.chainA.GetClientState(tmPath.EndpointA.ClientID)
+ consensusState, found := suite.chainA.GetConsensusState(tmPath.EndpointA.ClientID, clientState.GetLatestHeight())
suite.Require().True(found)
path := suite.solomachine.GetConsensusStatePath(counterpartyClientIdentifier, consensusHeight)
diff --git a/modules/light-clients/06-solomachine/types/proposal_handle_test.go b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
index 4b797e6d..db99bbe2 100644
--- a/modules/light-clients/06-solomachine/types/proposal_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
@@ -67,8 +67,8 @@ func (suite *SoloMachineTestSuite) TestCheckSubstituteAndUpdateState() {
tc.malleate()
- subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID)
- substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute.ClientID)
+ subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID)
+ substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute.ClientID)
updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, nil)
diff --git a/modules/light-clients/06-solomachine/types/solomachine_test.go b/modules/light-clients/06-solomachine/types/solomachine_test.go
index 42d1a384..2f8e559a 100644
--- a/modules/light-clients/06-solomachine/types/solomachine_test.go
+++ b/modules/light-clients/06-solomachine/types/solomachine_test.go
@@ -40,7 +40,7 @@ func (suite *SoloMachineTestSuite) SetupTest() {
suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1)
suite.solomachineMulti = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinemulti", "testing", 4)
- suite.store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), exported.Solomachine)
+ suite.store = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), exported.Solomachine)
}
func TestSoloMachineTestSuite(t *testing.T) {
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index 2838ebb4..94de0199 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -125,11 +125,12 @@ func (suite *TendermintTestSuite) TestInitialize() {
},
}
- clientA, err := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ err := path.EndpointA.CreateClient()
suite.Require().NoError(err)
- clientState := suite.chainA.GetClientState(clientA)
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ clientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
for _, tc := range testCases {
err := clientState.Initialize(suite.chainA.GetContext(), suite.chainA.Codec, store, tc.consensusState)
@@ -262,26 +263,27 @@ func (suite *TendermintTestSuite) TestVerifyConnectionState() {
suite.SetupTest() // reset
// setup testing conditions
- clientA, _, _, connB, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- connection := suite.chainB.GetConnection(connB)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ connection := path.EndpointB.GetConnection()
var ok bool
- clientStateI := suite.chainA.GetClientState(clientA)
+ clientStateI := suite.chainA.GetClientState(path.EndpointA.ClientID)
clientState, ok = clientStateI.(*types.ClientState)
suite.Require().True(ok)
prefix = suite.chainB.GetPrefix()
// make connection proof
- connectionKey := host.ConnectionKey(connB.ID)
+ connectionKey := host.ConnectionKey(path.EndpointB.ConnectionID)
proof, proofHeight = suite.chainB.QueryProof(connectionKey)
tc.malleate() // make changes as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
err := clientState.VerifyConnectionState(
- store, suite.chainA.Codec, proofHeight, &prefix, proof, connB.ID, connection,
+ store, suite.chainA.Codec, proofHeight, &prefix, proof, path.EndpointB.ConnectionID, connection,
)
if tc.expPass {
@@ -340,27 +342,28 @@ func (suite *TendermintTestSuite) TestVerifyChannelState() {
suite.SetupTest() // reset
// setup testing conditions
- clientA, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- channel := suite.chainB.GetChannel(channelB)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ channel := path.EndpointB.GetChannel()
var ok bool
- clientStateI := suite.chainA.GetClientState(clientA)
+ clientStateI := suite.chainA.GetClientState(path.EndpointA.ClientID)
clientState, ok = clientStateI.(*types.ClientState)
suite.Require().True(ok)
prefix = suite.chainB.GetPrefix()
// make channel proof
- channelKey := host.ChannelKey(channelB.PortID, channelB.ID)
+ channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
proof, proofHeight = suite.chainB.QueryProof(channelKey)
tc.malleate() // make changes as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
err := clientState.VerifyChannelState(
store, suite.chainA.Codec, proofHeight, &prefix, proof,
- channelB.PortID, channelB.ID, channel,
+ path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel,
)
if tc.expPass {
@@ -434,13 +437,14 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
suite.SetupTest() // reset
// setup testing conditions
- clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 100), 0)
- err := suite.coordinator.SendPacket(suite.chainB, suite.chainA, packet, clientA)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, clienttypes.NewHeight(0, 100), 0)
+ err := path.EndpointB.SendPacket(packet)
suite.Require().NoError(err)
var ok bool
- clientStateI := suite.chainA.GetClientState(clientA)
+ clientStateI := suite.chainA.GetClientState(path.EndpointA.ClientID)
clientState, ok = clientStateI.(*types.ClientState)
suite.Require().True(ok)
@@ -448,14 +452,14 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
// make packet commitment proof
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
- proof, proofHeight = suite.chainB.QueryProof(packetKey)
+ proof, proofHeight = path.EndpointB.QueryProof(packetKey)
tc.malleate() // make changes as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
- commitment := channeltypes.CommitPacket(suite.chainA.App.IBCKeeper.Codec(), packet)
+ commitment := channeltypes.CommitPacket(suite.chainA.App.GetIBCKeeper().Codec(), packet)
err = clientState.VerifyPacketCommitment(
store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment,
@@ -533,19 +537,20 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
suite.SetupTest() // reset
// setup testing conditions
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.NewHeight(0, 100), 0)
// send packet
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// write receipt and ack
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
var ok bool
- clientStateI := suite.chainA.GetClientState(clientA)
+ clientStateI := suite.chainA.GetClientState(path.EndpointA.ClientID)
clientState, ok = clientStateI.(*types.ClientState)
suite.Require().True(ok)
@@ -557,7 +562,7 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
tc.malleate() // make changes as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyPacketAcknowledgement(
@@ -637,18 +642,16 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
suite.SetupTest() // reset
// setup testing conditions
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.Setup(path)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.NewHeight(0, 100), 0)
// send packet, but no recv
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- // need to update chainA's client representing chainB to prove missing ack
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
-
var ok bool
- clientStateI := suite.chainA.GetClientState(clientA)
+ clientStateI := suite.chainA.GetClientState(path.EndpointA.ClientID)
clientState, ok = clientStateI.(*types.ClientState)
suite.Require().True(ok)
@@ -656,11 +659,11 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
// make packet receipt absence proof
receiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
- proof, proofHeight = suite.chainB.QueryProof(receiptKey)
+ proof, proofHeight = path.EndpointB.QueryProof(receiptKey)
tc.malleate() // make changes as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyPacketReceiptAbsence(
@@ -740,22 +743,21 @@ func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
suite.SetupTest() // reset
// setup testing conditions
- clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED)
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.NewHeight(0, 100), 0)
// send packet
- err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)
+ err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
// next seq recv incremented
- err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet)
+ err = path.EndpointB.RecvPacket(packet)
suite.Require().NoError(err)
- // need to update chainA's client representing chainB
- suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
-
var ok bool
- clientStateI := suite.chainA.GetClientState(clientA)
+ clientStateI := suite.chainA.GetClientState(path.EndpointA.ClientID)
clientState, ok = clientStateI.(*types.ClientState)
suite.Require().True(ok)
@@ -767,7 +769,7 @@ func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
tc.malleate() // make changes as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyNextSequenceRecv(
diff --git a/modules/light-clients/07-tendermint/types/genesis_test.go b/modules/light-clients/07-tendermint/types/genesis_test.go
index 9699c669..72b876e0 100644
--- a/modules/light-clients/07-tendermint/types/genesis_test.go
+++ b/modules/light-clients/07-tendermint/types/genesis_test.go
@@ -11,12 +11,12 @@ import (
func (suite *TendermintTestSuite) TestExportMetadata() {
clientState := types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), "clientA", clientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), "clientA", clientState)
- gm := clientState.ExportMetadata(suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
+ gm := clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
suite.Require().Nil(gm, "client with no metadata returned non-nil exported metadata")
- clientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA")
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA")
// set some processed times
timestamp1 := uint64(time.Now().UnixNano())
@@ -26,7 +26,7 @@ func (suite *TendermintTestSuite) TestExportMetadata() {
types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 1), timestamp1)
types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 2), timestamp2)
- gm = clientState.ExportMetadata(suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
+ gm = clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
suite.Require().NotNil(gm, "client with metadata returned nil exported metadata")
suite.Require().Len(gm, 2, "exported metadata has unexpected length")
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
index da1cd6fb..ee811847 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
@@ -344,16 +344,16 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
// Set trusted consensus states in client store
if tc.consensusState1 != nil {
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height1, tc.consensusState1)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height1, tc.consensusState1)
}
if tc.consensusState2 != nil {
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height2, tc.consensusState2)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height2, tc.consensusState2)
}
clientState, err := tc.clientState.CheckMisbehaviourAndUpdateState(
ctx,
suite.cdc,
- suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(ctx, clientID), // pass in clientID prefixed clientStore
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID), // pass in clientID prefixed clientStore
tc.misbehaviour,
)
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle_test.go b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
index c9d3e71e..6208aec3 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
@@ -15,9 +15,9 @@ var (
func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
var (
- substitute string
substituteClientState exported.ClientState
initialHeight clienttypes.Height
+ substitutePath *ibctesting.Path
)
testCases := []struct {
name string
@@ -35,8 +35,8 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
},
{
"non-matching substitute", func() {
- substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ suite.coordinator.SetupClients(substitutePath)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
tmClientState, ok := substituteClientState.(*types.ClientState)
suite.Require().True(ok)
@@ -45,8 +45,8 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
},
{
"updated client is invalid - revision height is zero", func() {
- substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ suite.coordinator.SetupClients(substitutePath)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
tmClientState, ok := substituteClientState.(*types.ClientState)
suite.Require().True(ok)
// match subject
@@ -60,8 +60,8 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
},
{
"updated client is expired", func() {
- substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ suite.coordinator.SetupClients(substitutePath)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
tmClientState, ok := substituteClientState.(*types.ClientState)
suite.Require().True(ok)
initialHeight = tmClientState.LatestHeight
@@ -69,21 +69,21 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
// match subject
tmClientState.AllowUpdateAfterMisbehaviour = true
tmClientState.AllowUpdateAfterExpiry = true
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, tmClientState)
// update substitute a few times
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ err := substitutePath.EndpointA.UpdateClient()
suite.Require().NoError(err)
- substituteClientState = suite.chainA.GetClientState(substitute)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID)
- err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ err = substitutePath.EndpointA.UpdateClient()
suite.Require().NoError(err)
- suite.chainA.ExpireClient(tmClientState.TrustingPeriod)
- suite.chainB.ExpireClient(tmClientState.TrustingPeriod)
+ // expire client
+ suite.coordinator.IncrementTimeBy(tmClientState.TrustingPeriod)
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
- substituteClientState = suite.chainA.GetClientState(substitute)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID)
},
},
}
@@ -94,21 +94,22 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ substitutePath = ibctesting.NewPath(suite.chainA, suite.chainB)
- subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- subjectClientState := suite.chainA.GetClientState(subject).(*types.ClientState)
+ suite.coordinator.SetupClients(subjectPath)
+ subjectClientState := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*types.ClientState)
subjectClientState.AllowUpdateAfterMisbehaviour = true
subjectClientState.AllowUpdateAfterExpiry = true
- // expire subject
- suite.chainA.ExpireClient(subjectClientState.TrustingPeriod)
- suite.chainB.ExpireClient(subjectClientState.TrustingPeriod)
+ // expire subject client
+ suite.coordinator.IncrementTimeBy(subjectClientState.TrustingPeriod)
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
tc.malleate()
- subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), subject)
- substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute)
+ subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID)
+ substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID)
updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight)
suite.Require().Error(err)
@@ -270,8 +271,9 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
suite.SetupTest() // reset
// construct subject using test case parameters
- subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- subjectClientState := suite.chainA.GetClientState(subject).(*types.ClientState)
+ subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(subjectPath)
+ subjectClientState := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*types.ClientState)
subjectClientState.AllowUpdateAfterExpiry = tc.AllowUpdateAfterExpiry
subjectClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour
@@ -280,8 +282,8 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
subjectClientState.FrozenHeight = frozenHeight
}
if tc.ExpireClient {
- suite.chainA.ExpireClient(subjectClientState.TrustingPeriod)
- suite.chainB.ExpireClient(subjectClientState.TrustingPeriod)
+ // expire subject client
+ suite.coordinator.IncrementTimeBy(subjectClientState.TrustingPeriod)
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
}
@@ -291,27 +293,28 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
// the natural flow of events in practice. The subject will become frozen/expired
// and a substitute will be created along with a governance proposal as a response
- substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- substituteClientState := suite.chainA.GetClientState(substitute).(*types.ClientState)
+ substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(substitutePath)
+ substituteClientState := suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
substituteClientState.AllowUpdateAfterExpiry = tc.AllowUpdateAfterExpiry
substituteClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, substituteClientState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, substituteClientState)
initialHeight := substituteClientState.GetLatestHeight()
// update substitute a few times
for i := 0; i < 3; i++ {
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint)
+ err := substitutePath.EndpointA.UpdateClient()
suite.Require().NoError(err)
// skip a block
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
}
// get updated substitute
- substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
- subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), subject)
- substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute)
+ subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID)
+ substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID)
updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight)
if tc.expPass {
@@ -328,7 +331,7 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
func (suite *TendermintTestSuite) TestIsMatchingClientState() {
var (
- subject, substitute string
+ subjectPath, substitutePath *ibctesting.Path
subjectClientState, substituteClientState *types.ClientState
)
@@ -339,8 +342,8 @@ func (suite *TendermintTestSuite) TestIsMatchingClientState() {
}{
{
"matching clients", func() {
- subjectClientState = suite.chainA.GetClientState(subject).(*types.ClientState)
- substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState)
+ subjectClientState = suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*types.ClientState)
+ substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
}, true,
},
{
@@ -375,8 +378,10 @@ func (suite *TendermintTestSuite) TestIsMatchingClientState() {
suite.Run(tc.name, func() {
suite.SetupTest() // reset
- subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
- substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ subjectPath = ibctesting.NewPath(suite.chainA, suite.chainB)
+ substitutePath = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(subjectPath)
+ suite.coordinator.SetupClients(substitutePath)
tc.malleate()
diff --git a/modules/light-clients/07-tendermint/types/store_test.go b/modules/light-clients/07-tendermint/types/store_test.go
index ed9dc5d9..e0230e08 100644
--- a/modules/light-clients/07-tendermint/types/store_test.go
+++ b/modules/light-clients/07-tendermint/types/store_test.go
@@ -2,7 +2,6 @@ package types_test
import (
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
@@ -12,8 +11,8 @@ import (
func (suite *TendermintTestSuite) TestGetConsensusState() {
var (
- height exported.Height
- clientA string
+ height exported.Height
+ path *ibctesting.Path
)
testCases := []struct {
@@ -33,16 +32,16 @@ func (suite *TendermintTestSuite) TestGetConsensusState() {
{
"not a consensus state interface", func() {
// marshal an empty client state and set as consensus state
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
- clientStateBz := suite.chainA.App.IBCKeeper.ClientKeeper.MustMarshalClientState(&types.ClientState{})
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ clientStateBz := suite.chainA.App.GetIBCKeeper().ClientKeeper.MustMarshalClientState(&types.ClientState{})
store.Set(host.ConsensusStateKey(height), clientStateBz)
}, false,
},
{
"invalid consensus state (solomachine)", func() {
// marshal and set solomachine consensus state
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
- consensusStateBz := suite.chainA.App.IBCKeeper.ClientKeeper.MustMarshalConsensusState(&solomachinetypes.ConsensusState{})
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ consensusStateBz := suite.chainA.App.GetIBCKeeper().ClientKeeper.MustMarshalConsensusState(&solomachinetypes.ConsensusState{})
store.Set(host.ConsensusStateKey(height), consensusStateBz)
}, false,
},
@@ -53,19 +52,20 @@ func (suite *TendermintTestSuite) TestGetConsensusState() {
suite.Run(tc.name, func() {
suite.SetupTest()
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
- clientA, _, _, _, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED)
- clientState := suite.chainA.GetClientState(clientA)
+ suite.coordinator.Setup(path)
+ clientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
height = clientState.GetLatestHeight()
tc.malleate() // change vars as necessary
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
consensusState, err := types.GetConsensusState(store, suite.chainA.Codec, height)
if tc.expPass {
suite.Require().NoError(err)
- expConsensusState, found := suite.chainA.GetConsensusState(clientA, height)
+ expConsensusState, found := suite.chainA.GetConsensusState(path.EndpointA.ClientID, height)
suite.Require().True(found)
suite.Require().Equal(expConsensusState, consensusState)
} else {
@@ -77,32 +77,37 @@ func (suite *TendermintTestSuite) TestGetConsensusState() {
}
func (suite *TendermintTestSuite) TestGetProcessedTime() {
- // Verify ProcessedTime on CreateClient
+ // setup
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+
+ suite.coordinator.UpdateTime()
// coordinator increments time before creating client
expectedTime := suite.chainA.CurrentHeader.Time.Add(ibctesting.TimeIncrement)
- clientA, err := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint)
+ // Verify ProcessedTime on CreateClient
+ err := path.EndpointA.CreateClient()
suite.Require().NoError(err)
- clientState := suite.chainA.GetClientState(clientA)
+ clientState := suite.chainA.GetClientState(path.EndpointA.ClientID)
height := clientState.GetLatestHeight()
- store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
actualTime, ok := types.GetProcessedTime(store, height)
suite.Require().True(ok, "could not retrieve processed time for stored consensus state")
suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value")
- // Verify ProcessedTime on UpdateClient
+ suite.coordinator.UpdateTime()
// coordinator increments time before updating client
expectedTime = suite.chainA.CurrentHeader.Time.Add(ibctesting.TimeIncrement)
- err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ // Verify ProcessedTime on UpdateClient
+ err = path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- clientState = suite.chainA.GetClientState(clientA)
+ clientState = suite.chainA.GetClientState(path.EndpointA.ClientID)
height = clientState.GetLatestHeight()
- store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ store = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
actualTime, ok = types.GetProcessedTime(store, height)
suite.Require().True(ok, "could not retrieve processed time for stored consensus state")
suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value")
diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go
index f72d6fba..672defe7 100644
--- a/modules/light-clients/07-tendermint/types/update_test.go
+++ b/modules/light-clients/07-tendermint/types/update_test.go
@@ -242,7 +242,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
// Set trusted consensus state in client store
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState)
height := newHeader.GetHeight()
expectedConsensus := &types.ConsensusState{
@@ -254,7 +254,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newClientState, consensusState, err := clientState.CheckHeaderAndUpdateState(
ctx,
suite.cdc,
- suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore
newHeader,
)
diff --git a/modules/light-clients/07-tendermint/types/upgrade_test.go b/modules/light-clients/07-tendermint/types/upgrade_test.go
index 263c11cd..6c1baef6 100644
--- a/modules/light-clients/07-tendermint/types/upgrade_test.go
+++ b/modules/light-clients/07-tendermint/types/upgrade_test.go
@@ -1,11 +1,13 @@
package types_test
import (
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
"github.com/cosmos/ibc-go/modules/core/exported"
"github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
func (suite *TendermintTestSuite) TestVerifyUpgrade() {
@@ -13,7 +15,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
upgradedClient exported.ClientState
upgradedConsState exported.ConsensusState
lastHeight clienttypes.Height
- clientA string
+ path *ibctesting.Path
proofUpgradedClient, proofUpgradedConsState []byte
upgradedClientBz, upgradedConsStateBz []byte
err error
@@ -31,16 +33,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -61,16 +63,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -86,16 +88,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+10))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -115,16 +117,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -139,17 +141,17 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
upgradedClient = types.NewClientState("wrongchainID", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, true)
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -161,17 +163,17 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
name: "unsuccessful upgrade: client-specified parameters do not match previous client",
setup: func() {
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, false)
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -186,8 +188,8 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change submitted upgradedConsensusState
upgradedConsState = &types.ConsensusState{
@@ -197,10 +199,10 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -211,9 +213,9 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: client proof unmarshal failed",
setup: func() {
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -225,9 +227,9 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
{
name: "unsuccessful upgrade: consensus state proof unmarshal failed",
setup: func() {
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -244,9 +246,9 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -262,9 +264,9 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
// upgrade Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -279,15 +281,15 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -296,7 +298,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
// SetClientState with empty upgrade path
tmClient, _ := cs.(*types.ClientState)
tmClient.UpgradePath = []string{""}
- suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmClient)
},
expPass: false,
},
@@ -307,15 +309,15 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -330,15 +332,15 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+100))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -350,18 +352,18 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
name: "unsuccessful upgrade: client is expired",
setup: func() {
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
// expire chainB's client
suite.chainA.ExpireClient(ubdPeriod)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -376,15 +378,15 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -404,16 +406,16 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
// zero custom fields and store in upgrade store
- suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
- suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz)
+ suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// commit upgrade store changes and update clients
suite.coordinator.CommitBlock(suite.chainB)
- err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
+ err := path.EndpointA.UpdateClient()
suite.Require().NoError(err)
- cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA)
+ cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID)
suite.Require().True(found)
proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight())
@@ -428,8 +430,9 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
// reset suite
suite.SetupTest()
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
- clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
+ suite.coordinator.SetupClients(path)
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
upgradedClient = upgradedClient.ZeroCustomFields()
upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
@@ -443,8 +446,8 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
tc.setup()
- cs := suite.chainA.GetClientState(clientA)
- clientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA)
+ cs := suite.chainA.GetClientState(path.EndpointA.ClientID)
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
// Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
upgradedClient = upgradedClient.ZeroCustomFields()
diff --git a/testing/README.md b/testing/README.md
new file mode 100644
index 00000000..7808980b
--- /dev/null
+++ b/testing/README.md
@@ -0,0 +1,289 @@
+# IBC Testing Package
+
+## Components
+
+The testing package comprises of four parts constructed as a stack.
+- coordinator
+- chain
+- path
+- endpoint
+
+A coordinator sits at the highest level and contains all the chains which have been initialized.
+It also stores and updates the current global time. The time is manually incremented by a `TimeIncrement`.
+This allows all the chains to remain in synchrony avoiding the issue of a counterparty being perceived to
+be in the future. The coordinator also contains functions to do basic setup of clients, connections, and channels
+between two chains.
+
+A chain is an SDK application (as represented by an app.go file). Inside the chain is an `TestingApp` which allows
+the chain to simulate block production and transaction processing. The chain contains by default a single tendermint
+validator. A chain is used to process SDK messages.
+
+A path connects two channel endpoints. It contains all the information needed to relay between two endpoints.
+
+An endpoint represents a channel (and its associated client and connections) on some specific chain. It contains
+references to the chain it is on and the counterparty endpoint it is connected to. The endpoint contains functions
+to interact with initialization and updates of its associated clients, connections, and channels. It can send, receive,
+and acknowledge packets.
+
+In general:
+- endpoints are used for initialization and execution of IBC logic on one side of an IBC connection
+- paths are used to relay packets
+- chains are used to commit SDK messages
+- coordinator is used to setup a path between two chains
+
+## Integration
+
+To integrate the testing package into your tests, you will need to define:
+- a testing application
+- a function to initialize the testing application
+
+### TestingApp
+
+Your project will likely already have an application defined. This application
+will need to be extended to fulfill the `TestingApp` interface.
+
+```go
+type TestingApp interface {
+ abci.Application
+
+ // ibc-go additions
+ GetBaseApp() *baseapp.BaseApp
+ GetStakingKeeper() stakingkeeper.Keeper
+ GetIBCKeeper() *keeper.Keeper
+ GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper
+ GetTxConfig() client.TxConfig
+
+ // Implemented by SimApp
+ AppCodec() codec.Marshaler
+
+ // Implemented by BaseApp
+ LastCommitID() sdk.CommitID
+ LastBlockHeight() int64
+}
+```
+
+To begin, you will need to extend your application by adding the following functions:
+
+```go
+// TestingApp functions
+// Example using SimApp to implement TestingApp
+
+// GetBaseApp implements the TestingApp interface.
+func (app *SimApp) GetBaseApp() *baseapp.BaseApp {
+ return app.BaseApp
+}
+
+// GetStakingKeeper implements the TestingApp interface.
+func (app *SimApp) GetStakingKeeper() stakingkeeper.Keeper {
+ return app.StakingKeeper
+}
+
+// GetIBCKeeper implements the TestingApp interface.
+func (app *SimApp) GetIBCKeeper() *ibckeeper.Keeper {
+ return app.IBCKeeper
+}
+
+// GetScopedIBCKeeper implements the TestingApp interface.
+func (app *SimApp) GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper {
+ return app.ScopedIBCKeeper
+}
+
+// GetTxConfig implements the TestingApp interface.
+func (app *SimApp) GetTxConfig() client.TxConfig {
+ return MakeTestEncodingConfig().TxConfig
+}
+
+```
+
+Your application may need to define `AppCodec()` if it does not already exist:
+
+```go
+// AppCodec returns SimApp's app codec.
+//
+// NOTE: This is solely to be used for testing purposes as it may be desirable
+// for modules to register their own custom testing types.
+func (app *SimApp) AppCodec() codec.Marshaler {
+ return app.appCodec
+}
+```
+
+It is assumed your application contains an embedded BaseApp and thus implements the abci.Application interface, `LastCommitID()` and `LastBlockHeight()`
+
+### Initialize TestingApp
+
+The testing package requires that you provide a function to initialize your TestingApp. This is how ibc-go implements the initialize function with its `SimApp`:
+
+```go
+func SetupTestingApp() (TestingApp, map[string]json.RawMessage) {
+ db := dbm.NewMemDB()
+ encCdc := simapp.MakeTestEncodingConfig()
+ app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5, encCdc, simapp.EmptyAppOptions{})
+ return app, simapp.NewDefaultGenesisState(encCdc.Marshaler)
+}
+```
+
+This function returns the TestingApp and the default genesis state used to initialize the testing app.
+
+Change the value of `DefaultTestingAppInit` to use your function:
+```go
+func init() {
+ ibctesting.DefaultTestingAppInit = MySetupTestingAppFunction
+}
+
+```
+
+## Example
+
+Here is an example of how to setup your testing environment in every package you are testing:
+```go
+// KeeperTestSuite is a testing suite to test keeper functions.
+type KeeperTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+// TestKeeperTestSuite runs all the tests within this package.
+func TestKeeperTestSuite(t *testing.T) {
+ suite.Run(t, new(KeeperTestSuite))
+}
+
+// SetupTest creates a coordinator with 2 test chains.
+func (suite *KeeperTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) // initializes 2 test chains
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) // convenience and readability
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) // convenience and readability
+}
+
+```
+
+To create interaction between chainA and chainB, we need to contruct a `Path` these chains will use.
+A path contains two endpoints, `EndpointA` and `EndpointB` (corresponding to the order of the chains passed
+into the `NewPath` function). A path is a pointer and its values will be filled in as necessary during the
+setup portion of testing.
+
+Endpoint Struct:
+```go
+// Endpoint is a which represents a channel endpoint and its associated
+// client and connections. It contains client, connection, and channel
+// configuration parameters. Endpoint functions will utilize the parameters
+// set in the configuration structs when executing IBC messages.
+type Endpoint struct {
+ Chain *TestChain
+ Counterparty *Endpoint
+ ClientID string
+ ConnectionID string
+ ChannelID string
+
+ ClientConfig ClientConfig
+ ConnectionConfig *ConnectionConfig
+ ChannelConfig *ChannelConfig
+}
+```
+
+The fields empty after `NewPath` is called are `ClientID`, `ConnectionID` and
+`ChannelID` as the clients, connections, and channels for these endpoints have not yet been created. The
+`ClientConfig`, `ConnectionConfig` and `ChannelConfig` contain all the necessary information for clients,
+connections, and channels to be initialized. If you would like to use endpoints which are intitialized to
+use your Port IDs, you might add a helper function similar to the one found in transfer:
+
+```go
+func NewTransferPath(chainA, chainB *ibctesting.TestChain) *ibctesting.Path {
+ path := ibctesting.NewPath(chainA, chainB)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.TransferPort
+ path.EndpointB.ChannelConfig.PortID = ibctesting.TransferPort
+
+ return path
+}
+
+```
+
+Path configurations should be set to the desired values before calling any `Setup` coordinator functions.
+
+To initialize the clients, connections, and channels for a path we can call the Setup functions of the coordinator:
+- Setup() -> setup clients, connections, channels
+- SetupClients() -> setup clients only
+- SetupConnections() -> setup clients and connections only
+
+
+Here is a basic example of the testing package being used to simulate IBC functionality:
+
+```go
+ path := ibctesting.NewPath(suite.chainA, suite.chainB) // clientID, connectionID, channelID empty
+ suite.coordinator.Setup(path) // clientID, connectionID, channelID filled
+ suite.Require().Equal("07-tendermint-0", path.EndpointA.ClientID)
+ suite.Require().Equal("connection-0", path.EndpointA.ClientID)
+ suite.Require().Equal("channel-0", path.EndpointA.ClientID)
+
+ // create packet 1
+ packet1 := NewPacket() // NewPacket would construct your packet
+
+ // send on endpointA
+ path.EndpointA.SendPacket(packet1)
+
+ // receive on endpointB
+ path.EndpointB.RecvPacket(packet1)
+
+ // acknowledge the receipt of the packet
+ path.EndpointA.AcknowledgePacket(packet1, ack)
+
+ // we can also relay
+ packet2 := NewPacket()
+
+ path.EndpointA.SendPacket(packet2)
+
+ path.Relay(packet2, expectedAck)
+
+ // if needed we can update our clients
+ path.EndpointB.UpdateClient()
+```
+
+### Transfer Testing Example
+
+If ICS 20 had its own simapp, its testing setup might include a `testing/app.go` file with the following contents:
+
+```go
+package transfertesting
+
+import (
+ "encoding/json"
+
+ "github.com/tendermint/tendermint/libs/log"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/ibc-go/modules/apps/transfer/simapp"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+)
+
+func SetupTransferTestingApp() (ibctesting.TestingApp, map[string]json.RawMessage) {
+ db := dbm.NewMemDB()
+ encCdc := simapp.MakeTestEncodingConfig()
+ app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5, encCdc, simapp.EmptyAppOptions{})
+ return app, simapp.NewDefaultGenesisState(encCdc.Marshaler)
+}
+
+func init() {
+ ibctesting.DefaultTestingAppInit = SetupTransferTestingApp
+}
+
+func NewTransferPath(chainA, chainB *ibctesting.TestChain) *ibctesting.Path {
+ path := ibctesting.NewPath(chainA, chainB)
+ path.EndpointA.ChannelConfig.PortID = ibctesting.TransferPort
+ path.EndpointB.ChannelConfig.PortID = ibctesting.TransferPort
+
+ return path
+}
+
+func GetTransferSimApp(chain *ibctesting.TestChain) *simapp.SimApp {
+ app, ok := chain.App.(*simapp.SimApp)
+ if !ok {
+ panic("not transfer app")
+ }
+
+ return app
+}
+```
diff --git a/testing/app.go b/testing/app.go
new file mode 100644
index 00000000..fb4ecbcb
--- /dev/null
+++ b/testing/app.go
@@ -0,0 +1,136 @@
+package ibctesting
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/cosmos/cosmos-sdk/baseapp"
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
+ banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
+ stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/libs/log"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+ dbm "github.com/tendermint/tm-db"
+
+ "github.com/cosmos/ibc-go/modules/core/keeper"
+ "github.com/cosmos/ibc-go/testing/simapp"
+)
+
+var DefaultTestingAppInit func() (TestingApp, map[string]json.RawMessage) = SetupTestingApp
+
+type TestingApp interface {
+ abci.Application
+
+ // ibc-go additions
+ GetBaseApp() *baseapp.BaseApp
+ GetStakingKeeper() stakingkeeper.Keeper
+ GetIBCKeeper() *keeper.Keeper
+ GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper
+ GetTxConfig() client.TxConfig
+
+ // Implemented by SimApp
+ AppCodec() codec.Marshaler
+
+ // Implemented by BaseApp
+ LastCommitID() sdk.CommitID
+ LastBlockHeight() int64
+}
+
+func SetupTestingApp() (TestingApp, map[string]json.RawMessage) {
+ db := dbm.NewMemDB()
+ encCdc := simapp.MakeTestEncodingConfig()
+ app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5, encCdc, simapp.EmptyAppOptions{})
+ return app, simapp.NewDefaultGenesisState(encCdc.Marshaler)
+}
+
+// SetupWithGenesisValSet initializes a new SimApp with a validator set and genesis accounts
+// that also act as delegators. For simplicity, each validator is bonded with a delegation
+// of one consensus engine unit (10^6) in the default token of the simapp from first genesis
+// account. A Nop logger is set in SimApp.
+func SetupWithGenesisValSet(t *testing.T, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) TestingApp {
+ app, genesisState := DefaultTestingAppInit()
+ // set genesis accounts
+ authGenesis := authtypes.NewGenesisState(authtypes.DefaultParams(), genAccs)
+ genesisState[authtypes.ModuleName] = app.AppCodec().MustMarshalJSON(authGenesis)
+
+ validators := make([]stakingtypes.Validator, 0, len(valSet.Validators))
+ delegations := make([]stakingtypes.Delegation, 0, len(valSet.Validators))
+
+ bondAmt := sdk.NewInt(1000000)
+
+ for _, val := range valSet.Validators {
+ pk, err := cryptocodec.FromTmPubKeyInterface(val.PubKey)
+ require.NoError(t, err)
+ pkAny, err := codectypes.NewAnyWithValue(pk)
+ require.NoError(t, err)
+ validator := stakingtypes.Validator{
+ OperatorAddress: sdk.ValAddress(val.Address).String(),
+ ConsensusPubkey: pkAny,
+ Jailed: false,
+ Status: stakingtypes.Bonded,
+ Tokens: bondAmt,
+ DelegatorShares: sdk.OneDec(),
+ Description: stakingtypes.Description{},
+ UnbondingHeight: int64(0),
+ UnbondingTime: time.Unix(0, 0).UTC(),
+ Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()),
+ MinSelfDelegation: sdk.ZeroInt(),
+ }
+ validators = append(validators, validator)
+ delegations = append(delegations, stakingtypes.NewDelegation(genAccs[0].GetAddress(), val.Address.Bytes(), sdk.OneDec()))
+
+ }
+ // set validators and delegations
+ stakingGenesis := stakingtypes.NewGenesisState(stakingtypes.DefaultParams(), validators, delegations)
+ genesisState[stakingtypes.ModuleName] = app.AppCodec().MustMarshalJSON(stakingGenesis)
+
+ totalSupply := sdk.NewCoins()
+ for _, b := range balances {
+ // add genesis acc tokens and delegated tokens to total supply
+ totalSupply = totalSupply.Add(b.Coins.Add(sdk.NewCoin(sdk.DefaultBondDenom, bondAmt))...)
+ }
+
+ // add bonded amount to bonded pool module account
+ balances = append(balances, banktypes.Balance{
+ Address: authtypes.NewModuleAddress(stakingtypes.BondedPoolName).String(),
+ Coins: sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, bondAmt)},
+ })
+
+ // update total supply
+ bankGenesis := banktypes.NewGenesisState(banktypes.DefaultGenesisState().Params, balances, totalSupply, []banktypes.Metadata{})
+ genesisState[banktypes.ModuleName] = app.AppCodec().MustMarshalJSON(bankGenesis)
+
+ stateBytes, err := json.MarshalIndent(genesisState, "", " ")
+ require.NoError(t, err)
+
+ // init chain will set the validator set and initialize the genesis accounts
+ app.InitChain(
+ abci.RequestInitChain{
+ Validators: []abci.ValidatorUpdate{},
+ ConsensusParams: simapp.DefaultConsensusParams,
+ AppStateBytes: stateBytes,
+ },
+ )
+
+ // commit genesis changes
+ app.Commit()
+ app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{
+ Height: app.LastBlockHeight() + 1,
+ AppHash: app.LastCommitID().Hash,
+ ValidatorsHash: valSet.Hash(),
+ NextValidatorsHash: valSet.Hash(),
+ }})
+
+ return app
+}
diff --git a/testing/chain.go b/testing/chain.go
index 19aa1f2c..19ee2183 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -3,18 +3,9 @@ package ibctesting
import (
"bytes"
"fmt"
- "strconv"
"testing"
"time"
- "github.com/stretchr/testify/require"
- abci "github.com/tendermint/tendermint/abci/types"
- "github.com/tendermint/tendermint/crypto/tmhash"
- tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
- tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version"
- tmtypes "github.com/tendermint/tendermint/types"
- tmversion "github.com/tendermint/tendermint/version"
-
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
@@ -23,13 +14,19 @@ import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
+ capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
"github.com/cosmos/cosmos-sdk/x/staking/teststaking"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
- ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ "github.com/stretchr/testify/require"
+ abci "github.com/tendermint/tendermint/abci/types"
+ "github.com/tendermint/tendermint/crypto/tmhash"
+ tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
+ tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version"
+ tmtypes "github.com/tendermint/tendermint/types"
+ tmversion "github.com/tendermint/tendermint/version"
+
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
@@ -39,44 +36,6 @@ import (
"github.com/cosmos/ibc-go/testing/simapp"
)
-const (
- // Default params constants used to create a TM client
- TrustingPeriod time.Duration = time.Hour * 24 * 7 * 2
- UnbondingPeriod time.Duration = time.Hour * 24 * 7 * 3
- MaxClockDrift time.Duration = time.Second * 10
- DefaultDelayPeriod uint64 = 0
-
- DefaultChannelVersion = ibctransfertypes.Version
- InvalidID = "IDisInvalid"
-
- ConnectionIDPrefix = "conn"
- ChannelIDPrefix = "chan"
-
- TransferPort = ibctransfertypes.ModuleName
- MockPort = mock.ModuleName
-
- // used for testing UpdateClientProposal
- Title = "title"
- Description = "description"
-)
-
-var (
- DefaultOpenInitVersion *connectiontypes.Version
-
- // Default params variables used to create a TM client
- DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel
- TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
-
- UpgradePath = []string{"upgrade", "upgradedIBCState"}
-
- ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0]
-
- MockAcknowledgement = mock.MockAcknowledgement.Acknowledgement()
- MockPacketData = mock.MockPacketData
- MockFailPacketData = mock.MockFailPacketData
- MockCanaryCapabilityName = mock.MockCanaryCapabilityName
-)
-
// TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI
// header and the validators of the TestChain. It also contains a field called ChainID. This
// is the clientID that *other* chains use to refer to this TestChain. The SenderAccount
@@ -85,7 +44,8 @@ var (
type TestChain struct {
t *testing.T
- App *simapp.SimApp
+ Coordinator *Coordinator
+ App TestingApp
ChainID string
LastHeader *ibctmtypes.Header // header for last block height committed
CurrentHeader tmproto.Header // header for current block height
@@ -98,10 +58,6 @@ type TestChain struct {
senderPrivKey cryptotypes.PrivKey
SenderAccount authtypes.AccountI
-
- // IBC specific helpers
- ClientIDs []string // ClientID's used on this chain
- Connections []*TestConnection // track connectionID's created for this chain
}
// NewTestChain initializes a new TestChain instance with a single validator set using a
@@ -112,7 +68,7 @@ type TestChain struct {
//
// Time management is handled by the Coordinator in order to ensure synchrony between chains.
// Each update of any chain increments the block header time for all chains by 5 seconds.
-func NewTestChain(t *testing.T, chainID string) *TestChain {
+func NewTestChain(t *testing.T, coord *Coordinator, chainID string) *TestChain {
// generate validator private/public key
privVal := mock.NewPV()
pubKey, err := privVal.GetPubKey()
@@ -131,54 +87,65 @@ func NewTestChain(t *testing.T, chainID string) *TestChain {
Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100000000000000))),
}
- app := simapp.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)
+ app := SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)
// create current header and call begin block
header := tmproto.Header{
ChainID: chainID,
Height: 1,
- Time: globalStartTime,
+ Time: coord.CurrentTime.UTC(),
}
- txConfig := simapp.MakeTestEncodingConfig().TxConfig
+ txConfig := app.GetTxConfig()
// create an account to send transactions from
chain := &TestChain{
t: t,
+ Coordinator: coord,
ChainID: chainID,
App: app,
CurrentHeader: header,
- QueryServer: app.IBCKeeper,
+ QueryServer: app.GetIBCKeeper(),
TxConfig: txConfig,
Codec: app.AppCodec(),
Vals: valSet,
Signers: signers,
senderPrivKey: senderPrivKey,
SenderAccount: acc,
- ClientIDs: make([]string, 0),
- Connections: make([]*TestConnection, 0),
}
- cap := chain.App.IBCKeeper.PortKeeper.BindPort(chain.GetContext(), MockPort)
- err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(MockPort))
- require.NoError(t, err)
-
- chain.NextBlock()
+ coord.CommitBlock(chain)
return chain
}
// GetContext returns the current context for the application.
func (chain *TestChain) GetContext() sdk.Context {
- return chain.App.BaseApp.NewContext(false, chain.CurrentHeader)
+ return chain.App.GetBaseApp().NewContext(false, chain.CurrentHeader)
+}
+
+// GetSimApp returns the SimApp to allow usage ofnon-interface fields.
+// CONTRACT: This function should not be called by third parties implementing
+// their own SimApp.
+func (chain *TestChain) GetSimApp() *simapp.SimApp {
+ app, ok := chain.App.(*simapp.SimApp)
+ require.True(chain.t, ok)
+
+ return app
}
// QueryProof performs an abci query with the given key and returns the proto encoded merkle proof
// for the query and the height at which the proof will succeed on a tendermint verifier.
func (chain *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) {
+ return chain.QueryProofAtHeight(key, chain.App.LastBlockHeight())
+}
+
+// QueryProof performs an abci query with the given key and returns the proto encoded merkle proof
+// for the query and the height at which the proof will succeed on a tendermint verifier.
+func (chain *TestChain) QueryProofAtHeight(key []byte, height int64) ([]byte, clienttypes.Height) {
res := chain.App.Query(abci.RequestQuery{
Path: fmt.Sprintf("store/%s/key", host.StoreKey),
- Height: chain.App.LastBlockHeight() - 1,
+ Height: height - 1,
Data: key,
Prove: true,
})
@@ -221,19 +188,6 @@ func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, cl
return proof, clienttypes.NewHeight(revision, uint64(res.Height+1))
}
-// QueryClientStateProof performs and abci query for a client state
-// stored with a given clientID and returns the ClientState along with the proof
-func (chain *TestChain) QueryClientStateProof(clientID string) (exported.ClientState, []byte) {
- // retrieve client state to provide proof for
- clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
- require.True(chain.t, found)
-
- clientKey := host.FullClientStateKey(clientID)
- proofClient, _ := chain.QueryProof(clientKey)
-
- return clientState, proofClient
-}
-
// QueryConsensusStateProof performs an abci query for a consensus state
// stored on the given clientID. The proof and consensusHeight are returned.
func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) {
@@ -268,7 +222,6 @@ func (chain *TestChain) NextBlock() {
}
chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
-
}
// sendMsgs delivers a transaction through the application without returning the result.
@@ -281,10 +234,14 @@ func (chain *TestChain) sendMsgs(msgs ...sdk.Msg) error {
// number and updates the TestChain's headers. It returns the result and error if one
// occurred.
func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*sdk.Result, error) {
- _, r, err := simapp.SignCheckDeliver(
+
+ // ensure the chain has the latest time
+ chain.Coordinator.UpdateTimeForChain(chain)
+
+ _, r, err := simapp.SignAndDeliver(
chain.t,
chain.TxConfig,
- chain.App.BaseApp,
+ chain.App.GetBaseApp(),
chain.GetContext().BlockHeader(),
msgs,
chain.ChainID,
@@ -296,19 +253,21 @@ func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*sdk.Result, error) {
return nil, err
}
- // SignCheckDeliver calls app.Commit()
+ // SignAndDeliver calls app.Commit()
chain.NextBlock()
// increment sequence for successful transaction execution
chain.SenderAccount.SetSequence(chain.SenderAccount.GetSequence() + 1)
+ chain.Coordinator.IncrementTime()
+
return r, nil
}
// GetClientState retrieves the client state for the provided clientID. The client is
// expected to exist otherwise testing will fail.
func (chain *TestChain) GetClientState(clientID string) exported.ClientState {
- clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
+ clientState, found := chain.App.GetIBCKeeper().ClientKeeper.GetClientState(chain.GetContext(), clientID)
require.True(chain.t, found)
return clientState
@@ -317,13 +276,13 @@ func (chain *TestChain) GetClientState(clientID string) exported.ClientState {
// GetConsensusState retrieves the consensus state for the provided clientID and height.
// It will return a success boolean depending on if consensus state exists or not.
func (chain *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) {
- return chain.App.IBCKeeper.ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height)
+ return chain.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height)
}
// GetValsAtHeight will return the validator set of the chain at a given height. It will return
// a success boolean depending on if the validator set exists or not at that height.
func (chain *TestChain) GetValsAtHeight(height int64) (*tmtypes.ValidatorSet, bool) {
- histInfo, ok := chain.App.StakingKeeper.GetHistoricalInfo(chain.GetContext(), height)
+ histInfo, ok := chain.App.GetStakingKeeper().GetHistoricalInfo(chain.GetContext(), height)
if !ok {
return nil, false
}
@@ -337,28 +296,10 @@ func (chain *TestChain) GetValsAtHeight(height int64) (*tmtypes.ValidatorSet, bo
return tmtypes.NewValidatorSet(tmValidators), true
}
-// GetConnection retrieves an IBC Connection for the provided TestConnection. The
-// connection is expected to exist otherwise testing will fail.
-func (chain *TestChain) GetConnection(testConnection *TestConnection) connectiontypes.ConnectionEnd {
- connection, found := chain.App.IBCKeeper.ConnectionKeeper.GetConnection(chain.GetContext(), testConnection.ID)
- require.True(chain.t, found)
-
- return connection
-}
-
-// GetChannel retrieves an IBC Channel for the provided TestChannel. The channel
-// is expected to exist otherwise testing will fail.
-func (chain *TestChain) GetChannel(testChannel TestChannel) channeltypes.Channel {
- channel, found := chain.App.IBCKeeper.ChannelKeeper.GetChannel(chain.GetContext(), testChannel.PortID, testChannel.ID)
- require.True(chain.t, found)
-
- return channel
-}
-
// GetAcknowledgement retrieves an acknowledgement for the provided packet. If the
// acknowledgement does not exist then testing will fail.
func (chain *TestChain) GetAcknowledgement(packet exported.PacketI) []byte {
- ack, found := chain.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ ack, found := chain.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
require.True(chain.t, found)
return ack
@@ -366,130 +307,7 @@ func (chain *TestChain) GetAcknowledgement(packet exported.PacketI) []byte {
// GetPrefix returns the prefix for used by a chain in connection creation
func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix {
- return commitmenttypes.NewMerklePrefix(chain.App.IBCKeeper.ConnectionKeeper.GetCommitmentPrefix().Bytes())
-}
-
-// NewClientID appends a new clientID string in the format:
-// ClientFor
-func (chain *TestChain) NewClientID(clientType string) string {
- clientID := fmt.Sprintf("%s-%s", clientType, strconv.Itoa(len(chain.ClientIDs)))
- chain.ClientIDs = append(chain.ClientIDs, clientID)
- return clientID
-}
-
-// AddTestConnection appends a new TestConnection which contains references
-// to the connection id, client id and counterparty client id.
-func (chain *TestChain) AddTestConnection(clientID, counterpartyClientID string) *TestConnection {
- conn := chain.ConstructNextTestConnection(clientID, counterpartyClientID)
-
- chain.Connections = append(chain.Connections, conn)
- return conn
-}
-
-// ConstructNextTestConnection constructs the next test connection to be
-// created given a clientID and counterparty clientID. The connection id
-// format: -conn
-func (chain *TestChain) ConstructNextTestConnection(clientID, counterpartyClientID string) *TestConnection {
- connectionID := connectiontypes.FormatConnectionIdentifier(uint64(len(chain.Connections)))
- return &TestConnection{
- ID: connectionID,
- ClientID: clientID,
- NextChannelVersion: DefaultChannelVersion,
- CounterpartyClientID: counterpartyClientID,
- }
-}
-
-// GetFirstTestConnection returns the first test connection for a given clientID.
-// The connection may or may not exist in the chain state.
-func (chain *TestChain) GetFirstTestConnection(clientID, counterpartyClientID string) *TestConnection {
- if len(chain.Connections) > 0 {
- return chain.Connections[0]
- }
-
- return chain.ConstructNextTestConnection(clientID, counterpartyClientID)
-}
-
-// AddTestChannel appends a new TestChannel which contains references to the port and channel ID
-// used for channel creation and interaction. See 'NextTestChannel' for channel ID naming format.
-func (chain *TestChain) AddTestChannel(conn *TestConnection, portID string) TestChannel {
- channel := chain.NextTestChannel(conn, portID)
- conn.Channels = append(conn.Channels, channel)
- return channel
-}
-
-// NextTestChannel returns the next test channel to be created on this connection, but does not
-// add it to the list of created channels. This function is expected to be used when the caller
-// has not created the associated channel in app state, but would still like to refer to the
-// non-existent channel usually to test for its non-existence.
-//
-// channel ID format: -chan
-//
-// The port is passed in by the caller.
-func (chain *TestChain) NextTestChannel(conn *TestConnection, portID string) TestChannel {
- nextChanSeq := chain.App.IBCKeeper.ChannelKeeper.GetNextChannelSequence(chain.GetContext())
- channelID := channeltypes.FormatChannelIdentifier(nextChanSeq)
- return TestChannel{
- PortID: portID,
- ID: channelID,
- ClientID: conn.ClientID,
- CounterpartyClientID: conn.CounterpartyClientID,
- Version: conn.NextChannelVersion,
- }
-}
-
-// ConstructMsgCreateClient constructs a message to create a new client state (tendermint or solomachine).
-// NOTE: a solo machine client will be created with an empty diversifier.
-func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, clientID string, clientType string) *clienttypes.MsgCreateClient {
- var (
- clientState exported.ClientState
- consensusState exported.ConsensusState
- )
-
- switch clientType {
- case exported.Tendermint:
- height := counterparty.LastHeader.GetHeight().(clienttypes.Height)
- clientState = ibctmtypes.NewClientState(
- counterparty.ChainID, DefaultTrustLevel, TrustingPeriod, UnbondingPeriod, MaxClockDrift,
- height, commitmenttypes.GetSDKSpecs(), UpgradePath, false, false,
- )
- consensusState = counterparty.LastHeader.ConsensusState()
- case exported.Solomachine:
- solo := NewSolomachine(chain.t, chain.Codec, clientID, "", 1)
- clientState = solo.ClientState()
- consensusState = solo.ConsensusState()
- default:
- chain.t.Fatalf("unsupported client state type %s", clientType)
- }
-
- msg, err := clienttypes.NewMsgCreateClient(
- clientState, consensusState, chain.SenderAccount.GetAddress().String(),
- )
- require.NoError(chain.t, err)
- return msg
-}
-
-// CreateTMClient will construct and execute a 07-tendermint MsgCreateClient. A counterparty
-// client will be created on the (target) chain.
-func (chain *TestChain) CreateTMClient(counterparty *TestChain, clientID string) error {
- // construct MsgCreateClient using counterparty
- msg := chain.ConstructMsgCreateClient(counterparty, clientID, exported.Tendermint)
- return chain.sendMsgs(msg)
-}
-
-// UpdateTMClient will construct and execute a 07-tendermint MsgUpdateClient. The counterparty
-// client will be updated on the (target) chain. UpdateTMClient mocks the relayer flow
-// necessary for updating a Tendermint client.
-func (chain *TestChain) UpdateTMClient(counterparty *TestChain, clientID string) error {
- header, err := chain.ConstructUpdateTMClientHeader(counterparty, clientID)
- require.NoError(chain.t, err)
-
- msg, err := clienttypes.NewMsgUpdateClient(
- clientID, header,
- chain.SenderAccount.GetAddress().String(),
- )
- require.NoError(chain.t, err)
-
- return chain.sendMsgs(msg)
+ return commitmenttypes.NewMerklePrefix(chain.App.GetIBCKeeper().ConnectionKeeper.GetCommitmentPrefix().Bytes())
}
// ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the
@@ -534,7 +352,7 @@ func (chain *TestChain) ConstructUpdateTMClientHeader(counterparty *TestChain, c
// ExpireClient fast forwards the chain's block time by the provided amount of time which will
// expire any clients with a trusting period less than or equal to this amount of time.
func (chain *TestChain) ExpireClient(amount time.Duration) {
- chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(amount)
+ chain.Coordinator.IncrementTimeBy(amount)
}
// CurrentTMClientHeader creates a TM header using the current header parameters
@@ -637,105 +455,21 @@ func CreateSortedSignerArray(altPrivVal, suitePrivVal tmtypes.PrivValidator,
}
}
-// ConnectionOpenInit will construct and execute a MsgConnectionOpenInit.
-func (chain *TestChain) ConnectionOpenInit(
- counterparty *TestChain,
- connection, counterpartyConnection *TestConnection,
-) error {
- msg := connectiontypes.NewMsgConnectionOpenInit(
- connection.ClientID,
- connection.CounterpartyClientID,
- counterparty.GetPrefix(), DefaultOpenInitVersion, DefaultDelayPeriod,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ConnectionOpenTry will construct and execute a MsgConnectionOpenTry.
-func (chain *TestChain) ConnectionOpenTry(
- counterparty *TestChain,
- connection, counterpartyConnection *TestConnection,
-) error {
- counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
-
- connectionKey := host.ConnectionKey(counterpartyConnection.ID)
- proofInit, proofHeight := counterparty.QueryProof(connectionKey)
-
- proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
-
- msg := connectiontypes.NewMsgConnectionOpenTry(
- "", connection.ClientID, // does not support handshake continuation
- counterpartyConnection.ID, counterpartyConnection.ClientID,
- counterpartyClient, counterparty.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, DefaultDelayPeriod,
- proofInit, proofClient, proofConsensus,
- proofHeight, consensusHeight,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ConnectionOpenAck will construct and execute a MsgConnectionOpenAck.
-func (chain *TestChain) ConnectionOpenAck(
- counterparty *TestChain,
- connection, counterpartyConnection *TestConnection,
-) error {
- counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
-
- connectionKey := host.ConnectionKey(counterpartyConnection.ID)
- proofTry, proofHeight := counterparty.QueryProof(connectionKey)
-
- proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
-
- msg := connectiontypes.NewMsgConnectionOpenAck(
- connection.ID, counterpartyConnection.ID, counterpartyClient, // testing doesn't use flexible selection
- proofTry, proofClient, proofConsensus,
- proofHeight, consensusHeight,
- ConnectionVersion,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ConnectionOpenConfirm will construct and execute a MsgConnectionOpenConfirm.
-func (chain *TestChain) ConnectionOpenConfirm(
- counterparty *TestChain,
- connection, counterpartyConnection *TestConnection,
-) error {
- connectionKey := host.ConnectionKey(counterpartyConnection.ID)
- proof, height := counterparty.QueryProof(connectionKey)
-
- msg := connectiontypes.NewMsgConnectionOpenConfirm(
- connection.ID,
- proof, height,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
// CreatePortCapability binds and claims a capability for the given portID if it does not
// already exist. This function will fail testing on any resulting error.
// NOTE: only creation of a capbility for a transfer or mock port is supported
// Other applications must bind to the port in InitGenesis or modify this code.
-func (chain *TestChain) CreatePortCapability(portID string) {
+func (chain *TestChain) CreatePortCapability(scopedKeeper capabilitykeeper.ScopedKeeper, portID string) {
// check if the portId is already binded, if not bind it
- _, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
+ _, ok := chain.App.GetScopedIBCKeeper().GetCapability(chain.GetContext(), host.PortPath(portID))
if !ok {
// create capability using the IBC capability keeper
- cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), host.PortPath(portID))
+ cap, err := chain.App.GetScopedIBCKeeper().NewCapability(chain.GetContext(), host.PortPath(portID))
require.NoError(chain.t, err)
- switch portID {
- case MockPort:
- // claim capability using the mock capability keeper
- err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
- require.NoError(chain.t, err)
- case TransferPort:
- // claim capability using the transfer capability keeper
- err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
- require.NoError(chain.t, err)
- default:
- panic(fmt.Sprintf("unsupported ibc testing package port ID %s", portID))
- }
+ // claim capability using the scopedKeeper
+ err = scopedKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
+ require.NoError(chain.t, err)
}
chain.App.Commit()
@@ -746,22 +480,23 @@ func (chain *TestChain) CreatePortCapability(portID string) {
// GetPortCapability returns the port capability for the given portID. The capability must
// exist, otherwise testing will fail.
func (chain *TestChain) GetPortCapability(portID string) *capabilitytypes.Capability {
- cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
+ cap, ok := chain.App.GetScopedIBCKeeper().GetCapability(chain.GetContext(), host.PortPath(portID))
require.True(chain.t, ok)
return cap
}
// CreateChannelCapability binds and claims a capability for the given portID and channelID
-// if it does not already exist. This function will fail testing on any resulting error.
-func (chain *TestChain) CreateChannelCapability(portID, channelID string) {
+// if it does not already exist. This function will fail testing on any resulting error. The
+// scoped keeper passed in will claim the new capability.
+func (chain *TestChain) CreateChannelCapability(scopedKeeper capabilitykeeper.ScopedKeeper, portID, channelID string) {
capName := host.ChannelCapabilityPath(portID, channelID)
// check if the portId is already binded, if not bind it
- _, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), capName)
+ _, ok := chain.App.GetScopedIBCKeeper().GetCapability(chain.GetContext(), capName)
if !ok {
- cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), capName)
+ cap, err := chain.App.GetScopedIBCKeeper().NewCapability(chain.GetContext(), capName)
require.NoError(chain.t, err)
- err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, capName)
+ err = scopedKeeper.ClaimCapability(chain.GetContext(), cap, capName)
require.NoError(chain.t, err)
}
@@ -773,139 +508,8 @@ func (chain *TestChain) CreateChannelCapability(portID, channelID string) {
// GetChannelCapability returns the channel capability for the given portID and channelID.
// The capability must exist, otherwise testing will fail.
func (chain *TestChain) GetChannelCapability(portID, channelID string) *capabilitytypes.Capability {
- cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.ChannelCapabilityPath(portID, channelID))
+ cap, ok := chain.App.GetScopedIBCKeeper().GetCapability(chain.GetContext(), host.ChannelCapabilityPath(portID, channelID))
require.True(chain.t, ok)
return cap
}
-
-// ChanOpenInit will construct and execute a MsgChannelOpenInit.
-func (chain *TestChain) ChanOpenInit(
- ch, counterparty TestChannel,
- order channeltypes.Order,
- connectionID string,
-) error {
- msg := channeltypes.NewMsgChannelOpenInit(
- ch.PortID,
- ch.Version, order, []string{connectionID},
- counterparty.PortID,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ChanOpenTry will construct and execute a MsgChannelOpenTry.
-func (chain *TestChain) ChanOpenTry(
- counterparty *TestChain,
- ch, counterpartyCh TestChannel,
- order channeltypes.Order,
- connectionID string,
-) error {
- proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
-
- msg := channeltypes.NewMsgChannelOpenTry(
- ch.PortID, "", // does not support handshake continuation
- ch.Version, order, []string{connectionID},
- counterpartyCh.PortID, counterpartyCh.ID, counterpartyCh.Version,
- proof, height,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ChanOpenAck will construct and execute a MsgChannelOpenAck.
-func (chain *TestChain) ChanOpenAck(
- counterparty *TestChain,
- ch, counterpartyCh TestChannel,
-) error {
- proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
-
- msg := channeltypes.NewMsgChannelOpenAck(
- ch.PortID, ch.ID,
- counterpartyCh.ID, counterpartyCh.Version, // testing doesn't use flexible selection
- proof, height,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm.
-func (chain *TestChain) ChanOpenConfirm(
- counterparty *TestChain,
- ch, counterpartyCh TestChannel,
-) error {
- proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
-
- msg := channeltypes.NewMsgChannelOpenConfirm(
- ch.PortID, ch.ID,
- proof, height,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// ChanCloseInit will construct and execute a MsgChannelCloseInit.
-//
-// NOTE: does not work with ibc-transfer module
-func (chain *TestChain) ChanCloseInit(
- counterparty *TestChain,
- channel TestChannel,
-) error {
- msg := channeltypes.NewMsgChannelCloseInit(
- channel.PortID, channel.ID,
- chain.SenderAccount.GetAddress().String(),
- )
- return chain.sendMsgs(msg)
-}
-
-// GetPacketData returns a ibc-transfer marshalled packet to be used for
-// callback testing.
-func (chain *TestChain) GetPacketData(counterparty *TestChain) []byte {
- packet := ibctransfertypes.FungibleTokenPacketData{
- Denom: TestCoin.Denom,
- Amount: TestCoin.Amount.Uint64(),
- Sender: chain.SenderAccount.GetAddress().String(),
- Receiver: counterparty.SenderAccount.GetAddress().String(),
- }
-
- return packet.GetBytes()
-}
-
-// SendPacket simulates sending a packet through the channel keeper. No message needs to be
-// passed since this call is made from a module.
-func (chain *TestChain) SendPacket(
- packet exported.PacketI,
-) error {
- channelCap := chain.GetChannelCapability(packet.GetSourcePort(), packet.GetSourceChannel())
-
- // no need to send message, acting as a module
- err := chain.App.IBCKeeper.ChannelKeeper.SendPacket(chain.GetContext(), channelCap, packet)
- if err != nil {
- return err
- }
-
- // commit changes
- chain.App.Commit()
- chain.NextBlock()
-
- return nil
-}
-
-// WriteAcknowledgement simulates writing an acknowledgement to the chain.
-func (chain *TestChain) WriteAcknowledgement(
- packet exported.PacketI,
-) error {
- channelCap := chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel())
-
- // no need to send message, acting as a handler
- err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, MockAcknowledgement)
- if err != nil {
- return err
- }
-
- // commit changes
- chain.App.Commit()
- chain.NextBlock()
-
- return nil
-}
diff --git a/testing/config.go b/testing/config.go
new file mode 100644
index 00000000..7db94150
--- /dev/null
+++ b/testing/config.go
@@ -0,0 +1,65 @@
+package ibctesting
+
+import (
+ "time"
+
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/mock"
+)
+
+type ClientConfig interface {
+ GetClientType() string
+}
+
+type TendermintConfig struct {
+ TrustLevel ibctmtypes.Fraction
+ TrustingPeriod time.Duration
+ UnbondingPeriod time.Duration
+ MaxClockDrift time.Duration
+ AllowUpdateAfterExpiry bool
+ AllowUpdateAfterMisbehaviour bool
+}
+
+func NewTendermintConfig() *TendermintConfig {
+ return &TendermintConfig{
+ TrustLevel: DefaultTrustLevel,
+ TrustingPeriod: TrustingPeriod,
+ UnbondingPeriod: UnbondingPeriod,
+ MaxClockDrift: MaxClockDrift,
+ AllowUpdateAfterExpiry: false,
+ AllowUpdateAfterMisbehaviour: false,
+ }
+}
+
+func (tmcfg *TendermintConfig) GetClientType() string {
+ return exported.Tendermint
+}
+
+type ConnectionConfig struct {
+ DelayPeriod uint64
+ Version *connectiontypes.Version
+}
+
+func NewConnectionConfig() *ConnectionConfig {
+ return &ConnectionConfig{
+ DelayPeriod: DefaultDelayPeriod,
+ Version: ConnectionVersion,
+ }
+}
+
+type ChannelConfig struct {
+ PortID string
+ Version string
+ Order channeltypes.Order
+}
+
+func NewChannelConfig() *ChannelConfig {
+ return &ChannelConfig{
+ PortID: mock.ModuleName,
+ Version: DefaultChannelVersion,
+ Order: channeltypes.UNORDERED,
+ }
+}
diff --git a/testing/coordinator.go b/testing/coordinator.go
index 9bf6f040..c7a5d6ed 100644
--- a/testing/coordinator.go
+++ b/testing/coordinator.go
@@ -8,15 +8,11 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
-
- sdk "github.com/cosmos/cosmos-sdk/types"
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
- host "github.com/cosmos/ibc-go/modules/core/24-host"
- "github.com/cosmos/ibc-go/modules/core/exported"
)
+const ChainIDPrefix = "testchain"
+
var (
- ChainIDPrefix = "testchain"
globalStartTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC)
TimeIncrement = time.Second * 5
)
@@ -26,334 +22,146 @@ var (
type Coordinator struct {
t *testing.T
- Chains map[string]*TestChain
+ CurrentTime time.Time
+ Chains map[string]*TestChain
}
// NewCoordinator initializes Coordinator with N TestChain's
func NewCoordinator(t *testing.T, n int) *Coordinator {
chains := make(map[string]*TestChain)
+ coord := &Coordinator{
+ t: t,
+ CurrentTime: globalStartTime,
+ }
for i := 0; i < n; i++ {
chainID := GetChainID(i)
- chains[chainID] = NewTestChain(t, chainID)
+ chains[chainID] = NewTestChain(t, coord, chainID)
}
- return &Coordinator{
- t: t,
- Chains: chains,
+ coord.Chains = chains
+
+ return coord
+}
+
+// IncrementTime iterates through all the TestChain's and increments their current header time
+// by 5 seconds.
+//
+// CONTRACT: this function must be called after every Commit on any TestChain.
+func (coord *Coordinator) IncrementTime() {
+ coord.IncrementTimeBy(TimeIncrement)
+}
+
+// IncrementTimeBy iterates through all the TestChain's and increments their current header time
+// by specified time.
+func (coord *Coordinator) IncrementTimeBy(increment time.Duration) {
+ coord.CurrentTime = coord.CurrentTime.Add(increment).UTC()
+ coord.UpdateTime()
+
+}
+
+// UpdateTime updates all clocks for the TestChains to the current global time.
+func (coord *Coordinator) UpdateTime() {
+ for _, chain := range coord.Chains {
+ coord.UpdateTimeForChain(chain)
}
}
+// UpdateTimeForChain updates the clock for a specific chain.
+func (coord *Coordinator) UpdateTimeForChain(chain *TestChain) {
+ chain.CurrentHeader.Time = coord.CurrentTime.UTC()
+ chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
+}
+
// Setup constructs a TM client, connection, and channel on both chains provided. It will
// fail if any error occurs. The clientID's, TestConnections, and TestChannels are returned
// for both chains. The channels created are connected to the ibc-transfer application.
-func (coord *Coordinator) Setup(
- chainA, chainB *TestChain, order channeltypes.Order,
-) (string, string, *TestConnection, *TestConnection, TestChannel, TestChannel) {
- clientA, clientB, connA, connB := coord.SetupClientConnections(chainA, chainB, exported.Tendermint)
+func (coord *Coordinator) Setup(path *Path) {
+ coord.SetupConnections(path)
// channels can also be referenced through the returned connections
- channelA, channelB := coord.CreateMockChannels(chainA, chainB, connA, connB, order)
-
- return clientA, clientB, connA, connB, channelA, channelB
+ coord.CreateChannels(path)
}
// SetupClients is a helper function to create clients on both chains. It assumes the
// caller does not anticipate any errors.
-func (coord *Coordinator) SetupClients(
- chainA, chainB *TestChain,
- clientType string,
-) (string, string) {
-
- clientA, err := coord.CreateClient(chainA, chainB, clientType)
+func (coord *Coordinator) SetupClients(path *Path) {
+ err := path.EndpointA.CreateClient()
require.NoError(coord.t, err)
- clientB, err := coord.CreateClient(chainB, chainA, clientType)
+ err = path.EndpointB.CreateClient()
require.NoError(coord.t, err)
-
- return clientA, clientB
}
// SetupClientConnections is a helper function to create clients and the appropriate
// connections on both the source and counterparty chain. It assumes the caller does not
// anticipate any errors.
-func (coord *Coordinator) SetupClientConnections(
- chainA, chainB *TestChain,
- clientType string,
-) (string, string, *TestConnection, *TestConnection) {
-
- clientA, clientB := coord.SetupClients(chainA, chainB, clientType)
-
- connA, connB := coord.CreateConnection(chainA, chainB, clientA, clientB)
-
- return clientA, clientB, connA, connB
-}
-
-// CreateClient creates a counterparty client on the source chain and returns the clientID.
-func (coord *Coordinator) CreateClient(
- source, counterparty *TestChain,
- clientType string,
-) (clientID string, err error) {
- coord.CommitBlock(source, counterparty)
-
- clientID = source.NewClientID(clientType)
-
- switch clientType {
- case exported.Tendermint:
- err = source.CreateTMClient(counterparty, clientID)
-
- default:
- err = fmt.Errorf("client type %s is not supported", clientType)
- }
-
- if err != nil {
- return "", err
- }
-
- coord.IncrementTime()
-
- return clientID, nil
-}
-
-// UpdateClient updates a counterparty client on the source chain.
-func (coord *Coordinator) UpdateClient(
- source, counterparty *TestChain,
- clientID string,
- clientType string,
-) (err error) {
- coord.CommitBlock(source, counterparty)
-
- switch clientType {
- case exported.Tendermint:
- err = source.UpdateTMClient(counterparty, clientID)
-
- default:
- err = fmt.Errorf("client type %s is not supported", clientType)
- }
-
- if err != nil {
- return err
- }
+func (coord *Coordinator) SetupConnections(path *Path) {
+ coord.SetupClients(path)
- coord.IncrementTime()
-
- return nil
+ coord.CreateConnections(path)
}
// CreateConnection constructs and executes connection handshake messages in order to create
// OPEN channels on chainA and chainB. The connection information of for chainA and chainB
// are returned within a TestConnection struct. The function expects the connections to be
// successfully opened otherwise testing will fail.
-func (coord *Coordinator) CreateConnection(
- chainA, chainB *TestChain,
- clientA, clientB string,
-) (*TestConnection, *TestConnection) {
+func (coord *Coordinator) CreateConnections(path *Path) {
- connA, connB, err := coord.ConnOpenInit(chainA, chainB, clientA, clientB)
+ err := path.EndpointA.ConnOpenInit()
require.NoError(coord.t, err)
- err = coord.ConnOpenTry(chainB, chainA, connB, connA)
+ err = path.EndpointB.ConnOpenTry()
require.NoError(coord.t, err)
- err = coord.ConnOpenAck(chainA, chainB, connA, connB)
+ err = path.EndpointA.ConnOpenAck()
require.NoError(coord.t, err)
- err = coord.ConnOpenConfirm(chainB, chainA, connB, connA)
+ err = path.EndpointB.ConnOpenConfirm()
require.NoError(coord.t, err)
- return connA, connB
+ // ensure counterparty is up to date
+ path.EndpointA.UpdateClient()
}
// CreateMockChannels constructs and executes channel handshake messages to create OPEN
// channels that use a mock application module that returns nil on all callbacks. This
// function is expects the channels to be successfully opened otherwise testing will
// fail.
-func (coord *Coordinator) CreateMockChannels(
- chainA, chainB *TestChain,
- connA, connB *TestConnection,
- order channeltypes.Order,
-) (TestChannel, TestChannel) {
- return coord.CreateChannel(chainA, chainB, connA, connB, MockPort, MockPort, order)
+func (coord *Coordinator) CreateMockChannels(path *Path) {
+ path.EndpointA.ChannelConfig.PortID = MockPort
+ path.EndpointB.ChannelConfig.PortID = MockPort
+
+ coord.CreateChannels(path)
}
// CreateTransferChannels constructs and executes channel handshake messages to create OPEN
// ibc-transfer channels on chainA and chainB. The function expects the channels to be
// successfully opened otherwise testing will fail.
-func (coord *Coordinator) CreateTransferChannels(
- chainA, chainB *TestChain,
- connA, connB *TestConnection,
- order channeltypes.Order,
-) (TestChannel, TestChannel) {
- return coord.CreateChannel(chainA, chainB, connA, connB, TransferPort, TransferPort, order)
+func (coord *Coordinator) CreateTransferChannels(path *Path) {
+ path.EndpointA.ChannelConfig.PortID = TransferPort
+ path.EndpointB.ChannelConfig.PortID = TransferPort
+
+ coord.CreateChannels(path)
}
// CreateChannel constructs and executes channel handshake messages in order to create
// OPEN channels on chainA and chainB. The function expects the channels to be successfully
// opened otherwise testing will fail.
-func (coord *Coordinator) CreateChannel(
- chainA, chainB *TestChain,
- connA, connB *TestConnection,
- sourcePortID, counterpartyPortID string,
- order channeltypes.Order,
-) (TestChannel, TestChannel) {
-
- channelA, channelB, err := coord.ChanOpenInit(chainA, chainB, connA, connB, sourcePortID, counterpartyPortID, order)
+func (coord *Coordinator) CreateChannels(path *Path) {
+ err := path.EndpointA.ChanOpenInit()
require.NoError(coord.t, err)
- err = coord.ChanOpenTry(chainB, chainA, channelB, channelA, connB, order)
+ err = path.EndpointB.ChanOpenTry()
require.NoError(coord.t, err)
- err = coord.ChanOpenAck(chainA, chainB, channelA, channelB)
+ err = path.EndpointA.ChanOpenAck()
require.NoError(coord.t, err)
- err = coord.ChanOpenConfirm(chainB, chainA, channelB, channelA)
+ err = path.EndpointB.ChanOpenConfirm()
require.NoError(coord.t, err)
- return channelA, channelB
-}
-
-// SendPacket sends a packet through the channel keeper on the source chain and updates the
-// counterparty client for the source chain.
-func (coord *Coordinator) SendPacket(
- source, counterparty *TestChain,
- packet exported.PacketI,
- counterpartyClientID string,
-) error {
- if err := source.SendPacket(packet); err != nil {
- return err
- }
- coord.IncrementTime()
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- counterpartyClientID, exported.Tendermint,
- )
-}
-
-// RecvPacket receives a channel packet on the counterparty chain and updates
-// the client on the source chain representing the counterparty.
-func (coord *Coordinator) RecvPacket(
- source, counterparty *TestChain,
- sourceClient string,
- packet channeltypes.Packet,
-) error {
- // get proof of packet commitment on source
- packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
- proof, proofHeight := source.QueryProof(packetKey)
-
- // Increment time and commit block so that 5 second delay period passes between send and receive
- coord.IncrementTime()
- coord.CommitBlock(source, counterparty)
-
- recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, counterparty.SenderAccount.GetAddress().String())
-
- // receive on counterparty and update source client
- return coord.SendMsgs(counterparty, source, sourceClient, []sdk.Msg{recvMsg})
-}
-
-// WriteAcknowledgement writes an acknowledgement to the channel keeper on the source chain and updates the
-// counterparty client for the source chain.
-func (coord *Coordinator) WriteAcknowledgement(
- source, counterparty *TestChain,
- packet exported.PacketI,
- counterpartyClientID string,
-) error {
- if err := source.WriteAcknowledgement(packet); err != nil {
- return err
- }
- coord.IncrementTime()
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- counterpartyClientID, exported.Tendermint,
- )
-}
-
-// AcknowledgePacket acknowledges on the source chain the packet received on
-// the counterparty chain and updates the client on the counterparty representing
-// the source chain.
-// TODO: add a query for the acknowledgement by events
-// - https://github.com/cosmos/cosmos-sdk/issues/6509
-func (coord *Coordinator) AcknowledgePacket(
- source, counterparty *TestChain,
- counterpartyClient string,
- packet channeltypes.Packet, ack []byte,
-) error {
- // get proof of acknowledgement on counterparty
- packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
- proof, proofHeight := counterparty.QueryProof(packetKey)
-
- // Increment time and commit block so that 5 second delay period passes between send and receive
- coord.IncrementTime()
- coord.CommitBlock(source, counterparty)
-
- ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, source.SenderAccount.GetAddress().String())
- return coord.SendMsgs(source, counterparty, counterpartyClient, []sdk.Msg{ackMsg})
-}
-
-// RelayPacket receives a channel packet on counterparty, queries the ack
-// and acknowledges the packet on source. The clients are updated as needed.
-func (coord *Coordinator) RelayPacket(
- source, counterparty *TestChain,
- sourceClient, counterpartyClient string,
- packet channeltypes.Packet, ack []byte,
-) error {
- // Increment time and commit block so that 5 second delay period passes between send and receive
- coord.IncrementTime()
- coord.CommitBlock(counterparty)
-
- if err := coord.RecvPacket(source, counterparty, sourceClient, packet); err != nil {
- return err
- }
-
- // Increment time and commit block so that 5 second delay period passes between send and receive
- coord.IncrementTime()
- coord.CommitBlock(source)
-
- return coord.AcknowledgePacket(source, counterparty, counterpartyClient, packet, ack)
-}
-
-// IncrementTime iterates through all the TestChain's and increments their current header time
-// by 5 seconds.
-//
-// CONTRACT: this function must be called after every commit on any TestChain.
-func (coord *Coordinator) IncrementTime() {
- for _, chain := range coord.Chains {
- chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(TimeIncrement)
- chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
- }
-}
-
-// IncrementTimeBy iterates through all the TestChain's and increments their current header time
-// by specified time.
-func (coord *Coordinator) IncrementTimeBy(increment time.Duration) {
- for _, chain := range coord.Chains {
- chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(increment)
- chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
- }
-}
-
-// SendMsg delivers a single provided message to the chain. The counterparty
-// client is update with the new source consensus state.
-func (coord *Coordinator) SendMsg(source, counterparty *TestChain, counterpartyClientID string, msg sdk.Msg) error {
- return coord.SendMsgs(source, counterparty, counterpartyClientID, []sdk.Msg{msg})
-}
-
-// SendMsgs delivers the provided messages to the chain. The counterparty
-// client is updated with the new source consensus state.
-func (coord *Coordinator) SendMsgs(source, counterparty *TestChain, counterpartyClientID string, msgs []sdk.Msg) error {
- if err := source.sendMsgs(msgs...); err != nil {
- return err
- }
-
- coord.IncrementTime()
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- counterpartyClientID, exported.Tendermint,
- )
+ // ensure counterparty is up to date
+ path.EndpointA.UpdateClient()
}
// GetChain returns the TestChain using the given chainID and returns an error if it does
@@ -390,311 +198,49 @@ func (coord *Coordinator) CommitNBlocks(chain *TestChain, n uint64) {
}
}
-// ConnOpenInit initializes a connection on the source chain with the state INIT
-// using the OpenInit handshake call.
-//
-// NOTE: The counterparty testing connection will be created even if it is not created in the
-// application state.
-func (coord *Coordinator) ConnOpenInit(
- source, counterparty *TestChain,
- clientID, counterpartyClientID string,
-) (*TestConnection, *TestConnection, error) {
- sourceConnection := source.AddTestConnection(clientID, counterpartyClientID)
- counterpartyConnection := counterparty.AddTestConnection(counterpartyClientID, clientID)
-
- // initialize connection on source
- if err := source.ConnectionOpenInit(counterparty, sourceConnection, counterpartyConnection); err != nil {
- return sourceConnection, counterpartyConnection, err
- }
- coord.IncrementTime()
-
- // update source client on counterparty connection
- if err := coord.UpdateClient(
- counterparty, source,
- counterpartyClientID, exported.Tendermint,
- ); err != nil {
- return sourceConnection, counterpartyConnection, err
- }
-
- return sourceConnection, counterpartyConnection, nil
-}
-
-// ConnOpenInitOnBothChains initializes a connection on the source chain with the state INIT
+// ConnOpenInitOnBothChains initializes a connection on both endpoints with the state INIT
// using the OpenInit handshake call.
-func (coord *Coordinator) ConnOpenInitOnBothChains(
- source, counterparty *TestChain,
- clientID, counterpartyClientID string,
-) (*TestConnection, *TestConnection, error) {
- sourceConnection := source.AddTestConnection(clientID, counterpartyClientID)
- counterpartyConnection := counterparty.AddTestConnection(counterpartyClientID, clientID)
-
- // initialize connection on source
- if err := source.ConnectionOpenInit(counterparty, sourceConnection, counterpartyConnection); err != nil {
- return sourceConnection, counterpartyConnection, err
- }
- coord.IncrementTime()
-
- // initialize connection on counterparty
- if err := counterparty.ConnectionOpenInit(source, counterpartyConnection, sourceConnection); err != nil {
- return sourceConnection, counterpartyConnection, err
- }
- coord.IncrementTime()
-
- // update counterparty client on source connection
- if err := coord.UpdateClient(
- source, counterparty,
- clientID, exported.Tendermint,
- ); err != nil {
- return sourceConnection, counterpartyConnection, err
- }
-
- // update source client on counterparty connection
- if err := coord.UpdateClient(
- counterparty, source,
- counterpartyClientID, exported.Tendermint,
- ); err != nil {
- return sourceConnection, counterpartyConnection, err
- }
-
- return sourceConnection, counterpartyConnection, nil
-}
-
-// ConnOpenTry initializes a connection on the source chain with the state TRYOPEN
-// using the OpenTry handshake call.
-func (coord *Coordinator) ConnOpenTry(
- source, counterparty *TestChain,
- sourceConnection, counterpartyConnection *TestConnection,
-) error {
- // initialize TRYOPEN connection on source
- if err := source.ConnectionOpenTry(counterparty, sourceConnection, counterpartyConnection); err != nil {
+func (coord *Coordinator) ConnOpenInitOnBothChains(path *Path) error {
+ if err := path.EndpointA.ConnOpenInit(); err != nil {
return err
}
- coord.IncrementTime()
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- counterpartyConnection.ClientID, exported.Tendermint,
- )
-}
-// ConnOpenAck initializes a connection on the source chain with the state OPEN
-// using the OpenAck handshake call.
-func (coord *Coordinator) ConnOpenAck(
- source, counterparty *TestChain,
- sourceConnection, counterpartyConnection *TestConnection,
-) error {
- // set OPEN connection on source using OpenAck
- if err := source.ConnectionOpenAck(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ if err := path.EndpointB.ConnOpenInit(); err != nil {
return err
}
- coord.IncrementTime()
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- counterpartyConnection.ClientID, exported.Tendermint,
- )
-}
-
-// ConnOpenConfirm initializes a connection on the source chain with the state OPEN
-// using the OpenConfirm handshake call.
-func (coord *Coordinator) ConnOpenConfirm(
- source, counterparty *TestChain,
- sourceConnection, counterpartyConnection *TestConnection,
-) error {
- if err := source.ConnectionOpenConfirm(counterparty, sourceConnection, counterpartyConnection); err != nil {
+ if err := path.EndpointA.UpdateClient(); err != nil {
return err
}
- coord.IncrementTime()
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- counterpartyConnection.ClientID, exported.Tendermint,
- )
-}
-
-// ChanOpenInit initializes a channel on the source chain with the state INIT
-// using the OpenInit handshake call.
-//
-// NOTE: The counterparty testing channel will be created even if it is not created in the
-// application state.
-func (coord *Coordinator) ChanOpenInit(
- source, counterparty *TestChain,
- connection, counterpartyConnection *TestConnection,
- sourcePortID, counterpartyPortID string,
- order channeltypes.Order,
-) (TestChannel, TestChannel, error) {
- sourceChannel := source.AddTestChannel(connection, sourcePortID)
- counterpartyChannel := counterparty.AddTestChannel(counterpartyConnection, counterpartyPortID)
-
- // NOTE: only creation of a capability for a transfer or mock port is supported
- // Other applications must bind to the port in InitGenesis or modify this code.
- source.CreatePortCapability(sourceChannel.PortID)
- coord.IncrementTime()
-
- // initialize channel on source
- if err := source.ChanOpenInit(sourceChannel, counterpartyChannel, order, connection.ID); err != nil {
- return sourceChannel, counterpartyChannel, err
- }
- coord.IncrementTime()
-
- // update source client on counterparty connection
- if err := coord.UpdateClient(
- counterparty, source,
- counterpartyConnection.ClientID, exported.Tendermint,
- ); err != nil {
- return sourceChannel, counterpartyChannel, err
+ if err := path.EndpointB.UpdateClient(); err != nil {
+ return err
}
- return sourceChannel, counterpartyChannel, nil
+ return nil
}
// ChanOpenInitOnBothChains initializes a channel on the source chain and counterparty chain
// with the state INIT using the OpenInit handshake call.
-func (coord *Coordinator) ChanOpenInitOnBothChains(
- source, counterparty *TestChain,
- connection, counterpartyConnection *TestConnection,
- sourcePortID, counterpartyPortID string,
- order channeltypes.Order,
-) (TestChannel, TestChannel, error) {
- sourceChannel := source.AddTestChannel(connection, sourcePortID)
- counterpartyChannel := counterparty.AddTestChannel(counterpartyConnection, counterpartyPortID)
-
+func (coord *Coordinator) ChanOpenInitOnBothChains(path *Path) error {
// NOTE: only creation of a capability for a transfer or mock port is supported
// Other applications must bind to the port in InitGenesis or modify this code.
- source.CreatePortCapability(sourceChannel.PortID)
- counterparty.CreatePortCapability(counterpartyChannel.PortID)
- coord.IncrementTime()
-
- // initialize channel on source
- if err := source.ChanOpenInit(sourceChannel, counterpartyChannel, order, connection.ID); err != nil {
- return sourceChannel, counterpartyChannel, err
- }
- coord.IncrementTime()
-
- // initialize channel on counterparty
- if err := counterparty.ChanOpenInit(counterpartyChannel, sourceChannel, order, counterpartyConnection.ID); err != nil {
- return sourceChannel, counterpartyChannel, err
- }
- coord.IncrementTime()
-
- // update counterparty client on source connection
- if err := coord.UpdateClient(
- source, counterparty,
- connection.ClientID, exported.Tendermint,
- ); err != nil {
- return sourceChannel, counterpartyChannel, err
- }
-
- // update source client on counterparty connection
- if err := coord.UpdateClient(
- counterparty, source,
- counterpartyConnection.ClientID, exported.Tendermint,
- ); err != nil {
- return sourceChannel, counterpartyChannel, err
- }
- return sourceChannel, counterpartyChannel, nil
-}
-
-// ChanOpenTry initializes a channel on the source chain with the state TRYOPEN
-// using the OpenTry handshake call.
-func (coord *Coordinator) ChanOpenTry(
- source, counterparty *TestChain,
- sourceChannel, counterpartyChannel TestChannel,
- connection *TestConnection,
- order channeltypes.Order,
-) error {
-
- // initialize channel on source
- if err := source.ChanOpenTry(counterparty, sourceChannel, counterpartyChannel, order, connection.ID); err != nil {
+ if err := path.EndpointA.ChanOpenInit(); err != nil {
return err
}
- coord.IncrementTime()
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- connection.CounterpartyClientID, exported.Tendermint,
- )
-}
-
-// ChanOpenAck initializes a channel on the source chain with the state OPEN
-// using the OpenAck handshake call.
-func (coord *Coordinator) ChanOpenAck(
- source, counterparty *TestChain,
- sourceChannel, counterpartyChannel TestChannel,
-) error {
- if err := source.ChanOpenAck(counterparty, sourceChannel, counterpartyChannel); err != nil {
+ if err := path.EndpointB.ChanOpenInit(); err != nil {
return err
}
- coord.IncrementTime()
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- sourceChannel.CounterpartyClientID, exported.Tendermint,
- )
-}
-// ChanOpenConfirm initializes a channel on the source chain with the state OPEN
-// using the OpenConfirm handshake call.
-func (coord *Coordinator) ChanOpenConfirm(
- source, counterparty *TestChain,
- sourceChannel, counterpartyChannel TestChannel,
-) error {
-
- if err := source.ChanOpenConfirm(counterparty, sourceChannel, counterpartyChannel); err != nil {
+ if err := path.EndpointA.UpdateClient(); err != nil {
return err
}
- coord.IncrementTime()
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- sourceChannel.CounterpartyClientID, exported.Tendermint,
- )
-}
-
-// ChanCloseInit closes a channel on the source chain resulting in the channels state
-// being set to CLOSED.
-//
-// NOTE: does not work with ibc-transfer module
-func (coord *Coordinator) ChanCloseInit(
- source, counterparty *TestChain,
- channel TestChannel,
-) error {
-
- if err := source.ChanCloseInit(counterparty, channel); err != nil {
+ if err := path.EndpointB.UpdateClient(); err != nil {
return err
}
- coord.IncrementTime()
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- channel.CounterpartyClientID, exported.Tendermint,
- )
-}
-
-// SetChannelClosed sets a channel state to CLOSED.
-func (coord *Coordinator) SetChannelClosed(
- source, counterparty *TestChain,
- testChannel TestChannel,
-) error {
- channel := source.GetChannel(testChannel)
-
- channel.State = channeltypes.CLOSED
- source.App.IBCKeeper.ChannelKeeper.SetChannel(source.GetContext(), testChannel.PortID, testChannel.ID, channel)
-
- coord.CommitBlock(source)
-
- // update source client on counterparty connection
- return coord.UpdateClient(
- counterparty, source,
- testChannel.CounterpartyClientID, exported.Tendermint,
- )
+ return nil
}
diff --git a/testing/endpoint.go b/testing/endpoint.go
new file mode 100644
index 00000000..3a9e8dbd
--- /dev/null
+++ b/testing/endpoint.go
@@ -0,0 +1,464 @@
+package ibctesting
+
+import (
+ "fmt"
+
+ // sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/stretchr/testify/require"
+
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+)
+
+// Endpoint is a which represents a channel endpoint and its associated
+// client and connections. It contains client, connection, and channel
+// configuration parameters. Endpoint functions will utilize the parameters
+// set in the configuration structs when executing IBC messages.
+type Endpoint struct {
+ Chain *TestChain
+ Counterparty *Endpoint
+ ClientID string
+ ConnectionID string
+ ChannelID string
+
+ ClientConfig ClientConfig
+ ConnectionConfig *ConnectionConfig
+ ChannelConfig *ChannelConfig
+}
+
+// NewEndpoint constructs a new endpoint without the counterparty.
+// CONTRACT: the counterparty endpoint must be set by the caller.
+func NewEndpoint(
+ chain *TestChain, clientConfig ClientConfig,
+ connectionConfig *ConnectionConfig, channelConfig *ChannelConfig,
+) *Endpoint {
+ return &Endpoint{
+ Chain: chain,
+ ClientConfig: clientConfig,
+ ConnectionConfig: connectionConfig,
+ ChannelConfig: channelConfig,
+ }
+}
+
+// NewDefaultEndpoint constructs a new endpoint using default values.
+// CONTRACT: the counterparty endpoitn must be set by the caller.
+func NewDefaultEndpoint(chain *TestChain) *Endpoint {
+ return &Endpoint{
+ Chain: chain,
+ ClientConfig: NewTendermintConfig(),
+ ConnectionConfig: NewConnectionConfig(),
+ ChannelConfig: NewChannelConfig(),
+ }
+}
+
+// QueryProof queries proof associated with this endpoint using the lastest client state
+// height on the counterparty chain.
+func (endpoint *Endpoint) QueryProof(key []byte) ([]byte, clienttypes.Height) {
+ // obtain the counterparty client representing the chain associated with the endpoint
+ clientState := endpoint.Counterparty.Chain.GetClientState(endpoint.Counterparty.ClientID)
+
+ // query proof on the counterparty using the latest height of the IBC client
+ return endpoint.QueryProofAtHeight(key, clientState.GetLatestHeight().GetRevisionHeight())
+}
+
+// QueryProofAtHeight queries proof associated with this endpoint using the proof height
+// providied
+func (endpoint *Endpoint) QueryProofAtHeight(key []byte, height uint64) ([]byte, clienttypes.Height) {
+ // query proof on the counterparty using the latest height of the IBC client
+ return endpoint.Chain.QueryProofAtHeight(key, int64(height))
+}
+
+// CreateClient creates an IBC client on the endpoint. It will update the
+// clientID for the endpoint if the message is successfully executed.
+// NOTE: a solo machine client will be created with an empty diversifier.
+func (endpoint *Endpoint) CreateClient() (err error) {
+ // ensure counterparty has committed state
+ endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain)
+
+ var (
+ clientState exported.ClientState
+ consensusState exported.ConsensusState
+ )
+
+ switch endpoint.ClientConfig.GetClientType() {
+ case exported.Tendermint:
+ tmConfig, ok := endpoint.ClientConfig.(*TendermintConfig)
+ require.True(endpoint.Chain.t, ok)
+
+ height := endpoint.Counterparty.Chain.LastHeader.GetHeight().(clienttypes.Height)
+ clientState = ibctmtypes.NewClientState(
+ endpoint.Counterparty.Chain.ChainID, tmConfig.TrustLevel, tmConfig.TrustingPeriod, tmConfig.UnbondingPeriod, tmConfig.MaxClockDrift,
+ height, commitmenttypes.GetSDKSpecs(), UpgradePath, tmConfig.AllowUpdateAfterExpiry, tmConfig.AllowUpdateAfterMisbehaviour,
+ )
+ consensusState = endpoint.Counterparty.Chain.LastHeader.ConsensusState()
+ case exported.Solomachine:
+ // TODO
+ // solo := NewSolomachine(chain.t, endpoint.Chain.Codec, clientID, "", 1)
+ // clientState = solo.ClientState()
+ // consensusState = solo.ConsensusState()
+
+ default:
+ err = fmt.Errorf("client type %s is not supported", endpoint.ClientConfig.GetClientType())
+ }
+
+ if err != nil {
+ return err
+ }
+
+ msg, err := clienttypes.NewMsgCreateClient(
+ clientState, consensusState, endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ require.NoError(endpoint.Chain.t, err)
+
+ res, err := endpoint.Chain.SendMsgs(msg)
+ if err != nil {
+ return err
+ }
+
+ endpoint.ClientID, err = ParseClientIDFromEvents(res.GetEvents())
+ require.NoError(endpoint.Chain.t, err)
+
+ return nil
+}
+
+// UpdateClient updates the IBC client associated with the endpoint.
+func (endpoint *Endpoint) UpdateClient() (err error) {
+ // ensure counterparty has committed state
+ endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain)
+
+ var (
+ header exported.Header
+ )
+
+ switch endpoint.ClientConfig.GetClientType() {
+ case exported.Tendermint:
+ header, err = endpoint.Chain.ConstructUpdateTMClientHeader(endpoint.Counterparty.Chain, endpoint.ClientID)
+
+ default:
+ err = fmt.Errorf("client type %s is not supported", endpoint.ClientConfig.GetClientType())
+ }
+
+ if err != nil {
+ return err
+ }
+
+ msg, err := clienttypes.NewMsgUpdateClient(
+ endpoint.ClientID, header,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ require.NoError(endpoint.Chain.t, err)
+
+ return endpoint.Chain.sendMsgs(msg)
+
+}
+
+// ConnOpenInit will construct and execute a MsgConnectionOpenInit on the associated endpoint.
+func (endpoint *Endpoint) ConnOpenInit() error {
+ msg := connectiontypes.NewMsgConnectionOpenInit(
+ endpoint.ClientID,
+ endpoint.Counterparty.ClientID,
+ endpoint.Counterparty.Chain.GetPrefix(), DefaultOpenInitVersion, endpoint.ConnectionConfig.DelayPeriod,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ res, err := endpoint.Chain.SendMsgs(msg)
+ if err != nil {
+ return err
+ }
+
+ endpoint.ConnectionID, err = ParseConnectionIDFromEvents(res.GetEvents())
+ require.NoError(endpoint.Chain.t, err)
+
+ return nil
+}
+
+// ConnOpenTry will construct and execute a MsgConnectionOpenTry on the associated endpoint.
+func (endpoint *Endpoint) ConnOpenTry() error {
+ endpoint.UpdateClient()
+
+ counterpartyClient, proofClient, proofConsensus, consensusHeight, proofInit, proofHeight := endpoint.QueryConnectionHandshakeProof()
+
+ msg := connectiontypes.NewMsgConnectionOpenTry(
+ "", endpoint.ClientID, // does not support handshake continuation
+ endpoint.Counterparty.ConnectionID, endpoint.Counterparty.ClientID,
+ counterpartyClient, endpoint.Counterparty.Chain.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, endpoint.ConnectionConfig.DelayPeriod,
+ proofInit, proofClient, proofConsensus,
+ proofHeight, consensusHeight,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ res, err := endpoint.Chain.SendMsgs(msg)
+ if err != nil {
+ return err
+ }
+
+ if endpoint.ConnectionID == "" {
+ endpoint.ConnectionID, err = ParseConnectionIDFromEvents(res.GetEvents())
+ require.NoError(endpoint.Chain.t, err)
+ }
+
+ return nil
+}
+
+// ConnOpenAck will construct and execute a MsgConnectionOpenAck on the associated endpoint.
+func (endpoint *Endpoint) ConnOpenAck() error {
+ endpoint.UpdateClient()
+
+ counterpartyClient, proofClient, proofConsensus, consensusHeight, proofTry, proofHeight := endpoint.QueryConnectionHandshakeProof()
+
+ msg := connectiontypes.NewMsgConnectionOpenAck(
+ endpoint.ConnectionID, endpoint.Counterparty.ConnectionID, counterpartyClient, // testing doesn't use flexible selection
+ proofTry, proofClient, proofConsensus,
+ proofHeight, consensusHeight,
+ ConnectionVersion,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ return endpoint.Chain.sendMsgs(msg)
+}
+
+// ConnOpenConfirm will construct and execute a MsgConnectionOpenConfirm on the associated endpoint.
+func (endpoint *Endpoint) ConnOpenConfirm() error {
+ endpoint.UpdateClient()
+
+ connectionKey := host.ConnectionKey(endpoint.Counterparty.ConnectionID)
+ proof, height := endpoint.Counterparty.Chain.QueryProof(connectionKey)
+
+ msg := connectiontypes.NewMsgConnectionOpenConfirm(
+ endpoint.ConnectionID,
+ proof, height,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ return endpoint.Chain.sendMsgs(msg)
+}
+
+// QueryConnectionHandshakeProof returns all the proofs necessary to execute OpenTry or Open Ack of
+// the connection handshakes. It returns the counterparty client state, proof of the counterparty
+// client state, proof of the counterparty consensus state, the consensus state height, proof of
+// the counterparty connection, and the proof height for all the proofs returned.
+func (endpoint *Endpoint) QueryConnectionHandshakeProof() (
+ clientState exported.ClientState, proofClient,
+ proofConsensus []byte, consensusHeight clienttypes.Height,
+ proofConnection []byte, proofHeight clienttypes.Height,
+) {
+ // obtain the client state on the counterparty chain
+ clientState = endpoint.Counterparty.Chain.GetClientState(endpoint.Counterparty.ClientID)
+
+ // query proof for the client state on the counterparty
+ clientKey := host.FullClientStateKey(endpoint.Counterparty.ClientID)
+ proofClient, proofHeight = endpoint.Counterparty.QueryProof(clientKey)
+
+ consensusHeight = clientState.GetLatestHeight().(clienttypes.Height)
+
+ // query proof for the consensus state on the counterparty
+ consensusKey := host.FullConsensusStateKey(endpoint.Counterparty.ClientID, consensusHeight)
+ proofConsensus, _ = endpoint.Counterparty.QueryProofAtHeight(consensusKey, proofHeight.GetRevisionHeight())
+
+ // query proof for the connection on the counterparty
+ connectionKey := host.ConnectionKey(endpoint.Counterparty.ConnectionID)
+ proofConnection, _ = endpoint.Counterparty.QueryProofAtHeight(connectionKey, proofHeight.GetRevisionHeight())
+
+ return
+}
+
+// ChanOpenInit will construct and execute a MsgChannelOpenInit on the associated endpoint.
+func (endpoint *Endpoint) ChanOpenInit() error {
+ msg := channeltypes.NewMsgChannelOpenInit(
+ endpoint.ChannelConfig.PortID,
+ endpoint.ChannelConfig.Version, endpoint.ChannelConfig.Order, []string{endpoint.ConnectionID},
+ endpoint.Counterparty.ChannelConfig.PortID,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ res, err := endpoint.Chain.SendMsgs(msg)
+ if err != nil {
+ return err
+ }
+
+ endpoint.ChannelID, err = ParseChannelIDFromEvents(res.GetEvents())
+ require.NoError(endpoint.Chain.t, err)
+
+ return nil
+}
+
+// ChanOpenTry will construct and execute a MsgChannelOpenTry on the associated endpoint.
+func (endpoint *Endpoint) ChanOpenTry() error {
+ endpoint.UpdateClient()
+
+ channelKey := host.ChannelKey(endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID)
+ proof, height := endpoint.Counterparty.Chain.QueryProof(channelKey)
+
+ msg := channeltypes.NewMsgChannelOpenTry(
+ endpoint.ChannelConfig.PortID, "", // does not support handshake continuation
+ endpoint.ChannelConfig.Version, endpoint.ChannelConfig.Order, []string{endpoint.ConnectionID},
+ endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID, endpoint.Counterparty.ChannelConfig.Version,
+ proof, height,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ res, err := endpoint.Chain.SendMsgs(msg)
+ if err != nil {
+ return err
+ }
+
+ if endpoint.ChannelID == "" {
+ endpoint.ChannelID, err = ParseChannelIDFromEvents(res.GetEvents())
+ require.NoError(endpoint.Chain.t, err)
+ }
+
+ return nil
+}
+
+// ChanOpenAck will construct and execute a MsgChannelOpenAck on the associated endpoint.
+func (endpoint *Endpoint) ChanOpenAck() error {
+ endpoint.UpdateClient()
+
+ channelKey := host.ChannelKey(endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID)
+ proof, height := endpoint.Counterparty.Chain.QueryProof(channelKey)
+
+ msg := channeltypes.NewMsgChannelOpenAck(
+ endpoint.ChannelConfig.PortID, endpoint.ChannelID,
+ endpoint.Counterparty.ChannelID, endpoint.Counterparty.ChannelConfig.Version, // testing doesn't use flexible selection
+ proof, height,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ return endpoint.Chain.sendMsgs(msg)
+}
+
+// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm on the associated endpoint.
+func (endpoint *Endpoint) ChanOpenConfirm() error {
+ endpoint.UpdateClient()
+
+ channelKey := host.ChannelKey(endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID)
+ proof, height := endpoint.Counterparty.Chain.QueryProof(channelKey)
+
+ msg := channeltypes.NewMsgChannelOpenConfirm(
+ endpoint.ChannelConfig.PortID, endpoint.ChannelID,
+ proof, height,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ return endpoint.Chain.sendMsgs(msg)
+}
+
+// ChanCloseInit will construct and execute a MsgChannelCloseInit on the associated endpoint.
+//
+// NOTE: does not work with ibc-transfer module
+func (endpoint *Endpoint) ChanCloseInit() error {
+ msg := channeltypes.NewMsgChannelCloseInit(
+ endpoint.ChannelConfig.PortID, endpoint.ChannelID,
+ endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+ return endpoint.Chain.sendMsgs(msg)
+}
+
+// SendPacket sends a packet through the channel keeper using the associated endpoint
+// The counterparty client is updated so proofs can be sent to the counterparty chain.
+func (endpoint *Endpoint) SendPacket(packet exported.PacketI) error {
+ channelCap := endpoint.Chain.GetChannelCapability(packet.GetSourcePort(), packet.GetSourceChannel())
+
+ // no need to send message, acting as a module
+ err := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SendPacket(endpoint.Chain.GetContext(), channelCap, packet)
+ if err != nil {
+ return err
+ }
+
+ // commit changes since no message was sent
+ endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain)
+
+ return endpoint.Counterparty.UpdateClient()
+}
+
+// RecvPacket receives a packet on the associated endpoint.
+// The counterparty client is updated.
+func (endpoint *Endpoint) RecvPacket(packet channeltypes.Packet) error {
+ // get proof of packet commitment on source
+ packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ proof, proofHeight := endpoint.Counterparty.Chain.QueryProof(packetKey)
+
+ recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String())
+
+ // receive on counterparty and update source client
+ if err := endpoint.Chain.sendMsgs(recvMsg); err != nil {
+ return err
+ }
+
+ return endpoint.Counterparty.UpdateClient()
+}
+
+// WriteAcknowledgement writes an acknowledgement on the channel associated with the endpoint.
+// The counterparty client is updated.
+func (endpoint *Endpoint) WriteAcknowledgement(ack exported.Acknowledgement, packet exported.PacketI) error {
+ channelCap := endpoint.Chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel())
+
+ // no need to send message, acting as a handler
+ err := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.WriteAcknowledgement(endpoint.Chain.GetContext(), channelCap, packet, ack.Acknowledgement())
+ if err != nil {
+ return err
+ }
+
+ // commit changes since no message was sent
+ endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain)
+
+ return endpoint.Counterparty.UpdateClient()
+}
+
+// AcknowledgePacket sends a MsgAcknowledgement to the channel associated with the endpoint.
+// TODO: add a query for the acknowledgement by events
+// - https://github.com/cosmos/cosmos-sdk/issues/6509
+func (endpoint *Endpoint) AcknowledgePacket(packet channeltypes.Packet, ack []byte) error {
+ // get proof of acknowledgement on counterparty
+ packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey)
+
+ ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String())
+
+ return endpoint.Chain.sendMsgs(ackMsg)
+}
+
+// SetChannelClosed sets a channel state to CLOSED.
+func (endpoint *Endpoint) SetChannelClosed() error {
+ channel := endpoint.GetChannel()
+
+ channel.State = channeltypes.CLOSED
+ endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SetChannel(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID, channel)
+
+ endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain)
+
+ return endpoint.Counterparty.UpdateClient()
+}
+
+// GetClientState retrieves the Client State for this endpoint. The
+// client state is expected to exist otherwise testing will fail.
+func (endpoint *Endpoint) GetClientState() exported.ClientState {
+ return endpoint.Chain.GetClientState(endpoint.ClientID)
+}
+
+// GetConnection retrieves an IBC Connection for the endpoint. The
+// connection is expected to exist otherwise testing will fail.
+func (endpoint *Endpoint) GetConnection() connectiontypes.ConnectionEnd {
+ connection, found := endpoint.Chain.App.GetIBCKeeper().ConnectionKeeper.GetConnection(endpoint.Chain.GetContext(), endpoint.ConnectionID)
+ require.True(endpoint.Chain.t, found)
+
+ return connection
+}
+
+// GetChannel retrieves an IBC Channel for the endpoint. The channel
+// is expected to exist otherwise testing will fail.
+func (endpoint *Endpoint) GetChannel() channeltypes.Channel {
+ channel, found := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.GetChannel(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID)
+ require.True(endpoint.Chain.t, found)
+
+ return channel
+}
+
+// QueryClientStateProof performs and abci query for a client stat associated
+// with this endpoint and returns the ClientState along with the proof.
+func (endpoint *Endpoint) QueryClientStateProof() (exported.ClientState, []byte) {
+ // retrieve client state to provide proof for
+ clientState := endpoint.GetClientState()
+
+ clientKey := host.FullClientStateKey(endpoint.ClientID)
+ proofClient, _ := endpoint.QueryProof(clientKey)
+
+ return clientState, proofClient
+}
diff --git a/testing/events.go b/testing/events.go
new file mode 100644
index 00000000..b8eb2822
--- /dev/null
+++ b/testing/events.go
@@ -0,0 +1,56 @@
+package ibctesting
+
+import (
+ "fmt"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+)
+
+// ParseClientIDFromEvents parses events emitted from a MsgCreateClient and returns the
+// client identifier.
+func ParseClientIDFromEvents(events sdk.Events) (string, error) {
+ for _, ev := range events {
+ if ev.Type == clienttypes.EventTypeCreateClient {
+ for _, attr := range ev.Attributes {
+ if string(attr.Key) == clienttypes.AttributeKeyClientID {
+ return string(attr.Value), nil
+ }
+ }
+ }
+ }
+ return "", fmt.Errorf("client identifier event attribute not found")
+}
+
+// ParseConnectionIDFromEvents parses events emitted from a MsgConnectionOpenInit or
+// MsgConnectionOpenTry and returns the connection identifier.
+func ParseConnectionIDFromEvents(events sdk.Events) (string, error) {
+ for _, ev := range events {
+ if ev.Type == connectiontypes.EventTypeConnectionOpenInit ||
+ ev.Type == connectiontypes.EventTypeConnectionOpenTry {
+ for _, attr := range ev.Attributes {
+ if string(attr.Key) == connectiontypes.AttributeKeyConnectionID {
+ return string(attr.Value), nil
+ }
+ }
+ }
+ }
+ return "", fmt.Errorf("connection identifier event attribute not found")
+}
+
+// ParseChannelIDFromEvents parses events emitted from a MsgChannelOpenInit or
+// MsgChannelOpenTry and returns the channel identifier.
+func ParseChannelIDFromEvents(events sdk.Events) (string, error) {
+ for _, ev := range events {
+ if ev.Type == channeltypes.EventTypeChannelOpenInit || ev.Type == channeltypes.EventTypeChannelOpenTry {
+ for _, attr := range ev.Attributes {
+ if string(attr.Key) == channeltypes.AttributeKeyChannelID {
+ return string(attr.Value), nil
+ }
+ }
+ }
+ }
+ return "", fmt.Errorf("channel identifier event attribute not found")
+}
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index 1ac33f8f..5bd1b123 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -34,6 +34,12 @@ var (
MockCanaryCapabilityName = "mock canary capability name"
)
+// Expected Interface
+// PortKeeper defines the expected IBC port keeper
+type PortKeeper interface {
+ BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability
+}
+
// AppModuleBasic is the mock AppModuleBasic.
type AppModuleBasic struct{}
@@ -78,12 +84,14 @@ func (AppModuleBasic) GetQueryCmd() *cobra.Command {
type AppModule struct {
AppModuleBasic
scopedKeeper capabilitykeeper.ScopedKeeper
+ portKeeper PortKeeper
}
// NewAppModule returns a mock AppModule instance.
-func NewAppModule(sk capabilitykeeper.ScopedKeeper) AppModule {
+func NewAppModule(sk capabilitykeeper.ScopedKeeper, pk PortKeeper) AppModule {
return AppModule{
scopedKeeper: sk,
+ portKeeper: pk,
}
}
@@ -110,6 +118,10 @@ func (am AppModule) RegisterServices(module.Configurator) {}
// InitGenesis implements the AppModule interface.
func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate {
+ // bind mock port ID
+ cap := am.portKeeper.BindPort(ctx, ModuleName)
+ am.scopedKeeper.ClaimCapability(ctx, cap, host.PortPath(ModuleName))
+
return []abci.ValidatorUpdate{}
}
@@ -118,6 +130,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json
return nil
}
+// ConsensusVersion implements AppModule/ConsensusVersion.
+func (AppModule) ConsensusVersion() uint64 { return 1 }
+
// BeginBlock implements the AppModule interface
func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
}
diff --git a/testing/path.go b/testing/path.go
new file mode 100644
index 00000000..7b6e0673
--- /dev/null
+++ b/testing/path.go
@@ -0,0 +1,75 @@
+package ibctesting
+
+import (
+ "bytes"
+ "fmt"
+
+ channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+)
+
+// Path contains two endpoints representing two chains connected over IBC
+type Path struct {
+ EndpointA *Endpoint
+ EndpointB *Endpoint
+}
+
+// NewPath constructs an endpoint for each chain using the default values
+// for the endpoints. Each endpoint is updated to have a pointer to the
+// counterparty endpoint.
+func NewPath(chainA, chainB *TestChain) *Path {
+ endpointA := NewDefaultEndpoint(chainA)
+ endpointB := NewDefaultEndpoint(chainB)
+
+ endpointA.Counterparty = endpointB
+ endpointB.Counterparty = endpointA
+
+ return &Path{
+ EndpointA: endpointA,
+ EndpointB: endpointB,
+ }
+}
+
+// SetChannelOrdered sets the channel order for both endpoints to ORDERED.
+func (path *Path) SetChannelOrdered() {
+ path.EndpointA.ChannelConfig.Order = channeltypes.ORDERED
+ path.EndpointB.ChannelConfig.Order = channeltypes.ORDERED
+}
+
+// RelayPacket attempts to relay the packet first on EndpointA and then on EndpointB
+// if EndpointA does not contain a packet commitment for that packet. An error is returned
+// if a relay step fails or the packet commitment does not exist on either endpoint.
+func (path *Path) RelayPacket(packet channeltypes.Packet, ack []byte) error {
+ pc := path.EndpointA.Chain.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(path.EndpointA.Chain.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ if bytes.Equal(pc, channeltypes.CommitPacket(path.EndpointA.Chain.App.AppCodec(), packet)) {
+
+ // packet found, relay from A to B
+ path.EndpointB.UpdateClient()
+
+ if err := path.EndpointB.RecvPacket(packet); err != nil {
+ return err
+ }
+
+ if err := path.EndpointA.AcknowledgePacket(packet, ack); err != nil {
+ return err
+ }
+ return nil
+
+ }
+
+ pc = path.EndpointB.Chain.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(path.EndpointB.Chain.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ if bytes.Equal(pc, channeltypes.CommitPacket(path.EndpointB.Chain.App.AppCodec(), packet)) {
+
+ // packet found, relay B to A
+ path.EndpointA.UpdateClient()
+
+ if err := path.EndpointA.RecvPacket(packet); err != nil {
+ return err
+ }
+ if err := path.EndpointB.AcknowledgePacket(packet, ack); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return fmt.Errorf("packet commitment does not exist on either endpoint for provided packet")
+}
diff --git a/testing/simapp/app.go b/testing/simapp/app.go
index 7e4ba042..dc7de557 100644
--- a/testing/simapp/app.go
+++ b/testing/simapp/app.go
@@ -131,6 +131,7 @@ var (
upgrade.AppModuleBasic{},
evidence.AppModuleBasic{},
transfer.AppModuleBasic{},
+ ibcmock.AppModuleBasic{},
authz.AppModuleBasic{},
vesting.AppModuleBasic{},
)
@@ -328,7 +329,7 @@ func NewSimApp(
// NOTE: the IBC mock keeper and application module is used only for testing core IBC. Do
// note replicate if you do not need to test core IBC or light clients.
- mockModule := ibcmock.NewAppModule(scopedIBCMockKeeper)
+ mockModule := ibcmock.NewAppModule(scopedIBCMockKeeper, &app.IBCKeeper.PortKeeper)
// Create static IBC router, add transfer route, then set and seal it
ibcRouter := porttypes.NewRouter()
@@ -373,6 +374,7 @@ func NewSimApp(
params.NewAppModule(app.ParamsKeeper),
authz.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
transferModule,
+ mockModule,
)
// During begin block slashing happens after distr.BeginBlocker so that
@@ -394,7 +396,7 @@ func NewSimApp(
capabilitytypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName,
slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName,
ibchost.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authztypes.ModuleName, ibctransfertypes.ModuleName,
- feegranttypes.ModuleName,
+ ibcmock.ModuleName, feegranttypes.ModuleName,
)
app.mm.RegisterInvariants(&app.CrisisKeeper)
@@ -568,6 +570,33 @@ func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace {
return subspace
}
+// TestingApp functions
+
+// GetBaseApp implements the TestingApp interface.
+func (app *SimApp) GetBaseApp() *baseapp.BaseApp {
+ return app.BaseApp
+}
+
+// GetStakingKeeper implements the TestingApp interface.
+func (app *SimApp) GetStakingKeeper() stakingkeeper.Keeper {
+ return app.StakingKeeper
+}
+
+// GetIBCKeeper implements the TestingApp interface.
+func (app *SimApp) GetIBCKeeper() *ibckeeper.Keeper {
+ return app.IBCKeeper
+}
+
+// GetScopedIBCKeeper implements the TestingApp interface.
+func (app *SimApp) GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper {
+ return app.ScopedIBCKeeper
+}
+
+// GetTxConfig implements the TestingApp interface.
+func (app *SimApp) GetTxConfig() client.TxConfig {
+ return MakeTestEncodingConfig().TxConfig
+}
+
// SimulationManager implements the SimulationApp interface
func (app *SimApp) SimulationManager() *module.SimulationManager {
return app.sm
diff --git a/testing/simapp/sim_test.go b/testing/simapp/sim_test.go
index acac8731..6a297c11 100644
--- a/testing/simapp/sim_test.go
+++ b/testing/simapp/sim_test.go
@@ -260,7 +260,7 @@ func TestAppSimulationAfterImport(t *testing.T) {
_, _, err = simulation.SimulateFromSeed(
t,
os.Stdout,
- newApp.BaseApp,
+ newApp.GetBaseApp(),
AppStateFn(app.AppCodec(), app.SimulationManager()),
simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1
SimulationOperations(newApp, newApp.AppCodec(), config),
diff --git a/testing/simapp/test_helpers.go b/testing/simapp/test_helpers.go
index 69af7433..8d92dbde 100644
--- a/testing/simapp/test_helpers.go
+++ b/testing/simapp/test_helpers.go
@@ -18,8 +18,6 @@ import (
bam "github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
- codectypes "github.com/cosmos/cosmos-sdk/codec/types"
- cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdk "github.com/cosmos/cosmos-sdk/types"
@@ -27,7 +25,6 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
- stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/cosmos/ibc-go/testing/simapp/helpers"
)
@@ -83,87 +80,6 @@ func Setup(isCheckTx bool) *SimApp {
return app
}
-// SetupWithGenesisValSet initializes a new SimApp with a validator set and genesis accounts
-// that also act as delegators. For simplicity, each validator is bonded with a delegation
-// of one consensus engine unit (10^6) in the default token of the simapp from first genesis
-// account. A Nop logger is set in SimApp.
-func SetupWithGenesisValSet(t *testing.T, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) *SimApp {
- app, genesisState := setup(true, 5)
- // set genesis accounts
- authGenesis := authtypes.NewGenesisState(authtypes.DefaultParams(), genAccs)
- genesisState[authtypes.ModuleName] = app.AppCodec().MustMarshalJSON(authGenesis)
-
- validators := make([]stakingtypes.Validator, 0, len(valSet.Validators))
- delegations := make([]stakingtypes.Delegation, 0, len(valSet.Validators))
-
- bondAmt := sdk.NewInt(1000000)
-
- for _, val := range valSet.Validators {
- pk, err := cryptocodec.FromTmPubKeyInterface(val.PubKey)
- require.NoError(t, err)
- pkAny, err := codectypes.NewAnyWithValue(pk)
- require.NoError(t, err)
- validator := stakingtypes.Validator{
- OperatorAddress: sdk.ValAddress(val.Address).String(),
- ConsensusPubkey: pkAny,
- Jailed: false,
- Status: stakingtypes.Bonded,
- Tokens: bondAmt,
- DelegatorShares: sdk.OneDec(),
- Description: stakingtypes.Description{},
- UnbondingHeight: int64(0),
- UnbondingTime: time.Unix(0, 0).UTC(),
- Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()),
- MinSelfDelegation: sdk.ZeroInt(),
- }
- validators = append(validators, validator)
- delegations = append(delegations, stakingtypes.NewDelegation(genAccs[0].GetAddress(), val.Address.Bytes(), sdk.OneDec()))
-
- }
- // set validators and delegations
- stakingGenesis := stakingtypes.NewGenesisState(stakingtypes.DefaultParams(), validators, delegations)
- genesisState[stakingtypes.ModuleName] = app.AppCodec().MustMarshalJSON(stakingGenesis)
-
- totalSupply := sdk.NewCoins()
- for _, b := range balances {
- // add genesis acc tokens and delegated tokens to total supply
- totalSupply = totalSupply.Add(b.Coins.Add(sdk.NewCoin(sdk.DefaultBondDenom, bondAmt))...)
- }
-
- // add bonded amount to bonded pool module account
- balances = append(balances, banktypes.Balance{
- Address: authtypes.NewModuleAddress(stakingtypes.BondedPoolName).String(),
- Coins: sdk.Coins{sdk.NewCoin(sdk.DefaultBondDenom, bondAmt)},
- })
-
- // update total supply
- bankGenesis := banktypes.NewGenesisState(banktypes.DefaultGenesisState().Params, balances, totalSupply, []banktypes.Metadata{})
- genesisState[banktypes.ModuleName] = app.AppCodec().MustMarshalJSON(bankGenesis)
-
- stateBytes, err := json.MarshalIndent(genesisState, "", " ")
- require.NoError(t, err)
-
- // init chain will set the validator set and initialize the genesis accounts
- app.InitChain(
- abci.RequestInitChain{
- Validators: []abci.ValidatorUpdate{},
- ConsensusParams: DefaultConsensusParams,
- AppStateBytes: stateBytes,
- },
- )
-
- // commit genesis changes
- app.Commit()
- app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{
- Height: app.LastBlockHeight() + 1,
- AppHash: app.LastCommitID().Hash,
- ValidatorsHash: valSet.Hash(),
- NextValidatorsHash: valSet.Hash(),
- }})
-
- return app
-}
-
// SetupWithGenesisAccounts initializes a new SimApp with the provided genesis
// accounts and possible balances.
func SetupWithGenesisAccounts(genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance) *SimApp {
@@ -316,11 +232,9 @@ func CheckBalance(t *testing.T, app *SimApp, addr sdk.AccAddress, balances sdk.C
require.True(t, balances.IsEqual(app.BankKeeper.GetAllBalances(ctxCheck, addr)))
}
-// SignCheckDeliver checks a generated signed transaction and simulates a
-// block commitment with the given transaction. A test assertion is made using
-// the parameter 'expPass' against the result. A corresponding result is
-// returned.
-func SignCheckDeliver(
+// SignAndDeliver signs and delivers a transaction. No simulation occurs as the
+// ibc testing package causes checkState and deliverState to diverge in block time.
+func SignAndDeliver(
t *testing.T, txCfg client.TxConfig, app *bam.BaseApp, header tmproto.Header, msgs []sdk.Msg,
chainID string, accNums, accSeqs []uint64, expSimPass, expPass bool, priv ...cryptotypes.PrivKey,
) (sdk.GasInfo, *sdk.Result, error) {
@@ -336,19 +250,6 @@ func SignCheckDeliver(
priv...,
)
require.NoError(t, err)
- txBytes, err := txCfg.TxEncoder()(tx)
- require.Nil(t, err)
-
- // Must simulate now as CheckTx doesn't run Msgs anymore
- _, res, err := app.Simulate(txBytes)
-
- if expSimPass {
- require.NoError(t, err)
- require.NotNil(t, res)
- } else {
- require.Error(t, err)
- require.Nil(t, res)
- }
// Simulate a sending a transaction and committing a block
app.BeginBlock(abci.RequestBeginBlock{Header: header})
diff --git a/testing/solomachine.go b/testing/solomachine.go
index c418a15a..ff4cc651 100644
--- a/testing/solomachine.go
+++ b/testing/solomachine.go
@@ -19,8 +19,6 @@ import (
solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
)
-var prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
-
// Solomachine is a testing helper used to simulate a counterparty
// solo machine client.
type Solomachine struct {
diff --git a/testing/types.go b/testing/types.go
deleted file mode 100644
index 9712a951..00000000
--- a/testing/types.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package ibctesting
-
-import (
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
-)
-
-// TestConnection is a testing helper struct to keep track of the connectionID, source clientID,
-// counterparty clientID, and the next channel version used in creating and interacting with a
-// connection.
-type TestConnection struct {
- ID string
- ClientID string
- CounterpartyClientID string
- NextChannelVersion string
- Channels []TestChannel
-}
-
-// FirstOrNextTestChannel returns the first test channel if it exists, otherwise it
-// returns the next test channel to be created. This function is expected to be used
-// when the caller does not know if the channel has or has not been created in app
-// state, but would still like to refer to it to test existence or non-existence.
-func (conn *TestConnection) FirstOrNextTestChannel(portID string) TestChannel {
- if len(conn.Channels) > 0 {
- return conn.Channels[0]
- }
- return TestChannel{
- PortID: portID,
- ID: channeltypes.FormatChannelIdentifier(0),
- ClientID: conn.ClientID,
- CounterpartyClientID: conn.CounterpartyClientID,
- Version: conn.NextChannelVersion,
- }
-}
-
-// TestChannel is a testing helper struct to keep track of the portID and channelID
-// used in creating and interacting with a channel. The clientID and counterparty
-// client ID are also tracked to cut down on querying and argument passing.
-type TestChannel struct {
- PortID string
- ID string
- ClientID string
- CounterpartyClientID string
- Version string
-}
diff --git a/testing/values.go b/testing/values.go
new file mode 100644
index 00000000..d71356ec
--- /dev/null
+++ b/testing/values.go
@@ -0,0 +1,58 @@
+/*
+ This file contains the variables, constants, and default values
+ used in the testing package and commonly defined in tests.
+*/
+package ibctesting
+
+import (
+ "time"
+
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ ibctransfertypes "github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/mock"
+)
+
+const (
+ FirstChannelID = "channel-0"
+ FirstConnectionID = "connection-0"
+
+ // Default params constants used to create a TM client
+ TrustingPeriod time.Duration = time.Hour * 24 * 7 * 2
+ UnbondingPeriod time.Duration = time.Hour * 24 * 7 * 3
+ MaxClockDrift time.Duration = time.Second * 10
+ DefaultDelayPeriod uint64 = 0
+
+ DefaultChannelVersion = ibctransfertypes.Version
+ InvalidID = "IDisInvalid"
+
+ // Application Ports
+ TransferPort = ibctransfertypes.ModuleName
+ MockPort = mock.ModuleName
+
+ // used for testing proposals
+ Title = "title"
+ Description = "description"
+)
+
+var (
+ DefaultOpenInitVersion *connectiontypes.Version
+
+ // Default params variables used to create a TM client
+ DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel
+ TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
+
+ UpgradePath = []string{"upgrade", "upgradedIBCState"}
+
+ ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0]
+
+ MockAcknowledgement = mock.MockAcknowledgement.Acknowledgement()
+ MockPacketData = mock.MockPacketData
+ MockFailPacketData = mock.MockFailPacketData
+ MockCanaryCapabilityName = mock.MockCanaryCapabilityName
+
+ prefix = commitmenttypes.NewMerklePrefix([]byte("ibc"))
+)
From 239301e2599dea9949200138074e44a4df31ba62 Mon Sep 17 00:00:00 2001
From: Aditya
Date: Fri, 23 Apr 2021 15:46:02 -0400
Subject: [PATCH 040/393] Efficient Consensus State Iteration (#125)
* start with efficient consensus state lookup
* writeup pruning logic
* fix tests
* add documentation
* improve byte efficiency
* actually fix tests and bug
* deduplicate
* fix return
* edit changelog
---
CHANGELOG.md | 1 +
.../07-tendermint/types/store.go | 154 +++++++++++++-
.../07-tendermint/types/store_test.go | 76 +++++++
.../07-tendermint/types/update.go | 36 ++++
.../07-tendermint/types/update_test.go | 193 ++++++++++++------
5 files changed, 396 insertions(+), 64 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1df5346f..a4fbd915 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -57,6 +57,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Improvements
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
+* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
## IBC in the Cosmos SDK Repository
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index ea2fce2c..726fdfd6 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -1,9 +1,11 @@
package types
import (
+ "encoding/binary"
"strings"
"github.com/cosmos/cosmos-sdk/codec"
+ "github.com/cosmos/cosmos-sdk/store/prefix"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
@@ -11,8 +13,30 @@ import (
"github.com/cosmos/ibc-go/modules/core/exported"
)
-// KeyProcessedTime is appended to consensus state key to store the processed time
-var KeyProcessedTime = []byte("/processedTime")
+/*
+This file contains the logic for storage and iteration over `IterationKey` metadata that is stored
+for each consensus state. The consensus state key specified in ICS-24 and expected by counterparty chains
+stores the consensus state under the key: `consensusStates/{revision_number}-{revision_height}`, with each number
+represented as a string.
+While this works fine for IBC proof verification, it makes efficient iteration difficult since the lexicographic order
+of the consensus state keys do not match the height order of consensus states. This makes consensus state pruning and
+monotonic time enforcement difficult since it is inefficient to find the earliest consensus state or to find the neigboring
+consensus states given a consensus state height.
+Changing the ICS-24 representation will be a major breaking change that requires counterparty chains to accept a new key format.
+Thus to avoid breaking IBC, we can store a lookup from a more efficiently formatted key: `iterationKey` to the consensus state key which
+stores the underlying consensus state. This efficient iteration key will be formatted like so: `iterateConsensusStates{BigEndianRevisionBytes}{BigEndianHeightBytes}`.
+This ensures that the lexicographic order of iteration keys match the height order of the consensus states. Thus, we can use the SDK store's
+Iterators to iterate over the consensus states in ascending/descending order by providing a mapping from `iterationKey -> consensusStateKey -> ConsensusState`.
+A future version of IBC may choose to replace the ICS24 ConsensusState path with the more efficient format and make this indirection unnecessary.
+*/
+
+const KeyIterateConsensusStatePrefix = "iterateConsensusStates"
+
+var (
+ // KeyProcessedTime is appended to consensus state key to store the processed time
+ KeyProcessedTime = []byte("/processedTime")
+ KeyIteration = []byte("/iterationKey")
+)
// SetConsensusState stores the consensus state at the given height.
func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, consensusState *ConsensusState, height exported.Height) {
@@ -48,6 +72,12 @@ func GetConsensusState(store sdk.KVStore, cdc codec.BinaryMarshaler, height expo
return consensusState, nil
}
+// deleteConsensusState deletes the consensus state at the given height
+func deleteConsensusState(clientStore sdk.KVStore, height exported.Height) {
+ key := host.ConsensusStateKey(height)
+ clientStore.Delete(key)
+}
+
// IterateProcessedTime iterates through the prefix store and applies the callback.
// If the cb returns true, then iterator will close and stop.
func IterateProcessedTime(store sdk.KVStore, cb func(key, val []byte) bool) {
@@ -94,3 +124,123 @@ func GetProcessedTime(clientStore sdk.KVStore, height exported.Height) (uint64,
}
return sdk.BigEndianToUint64(bz), true
}
+
+// deleteProcessedTime deletes the processedTime for a given height
+func deleteProcessedTime(clientStore sdk.KVStore, height exported.Height) {
+ key := ProcessedTimeKey(height)
+ clientStore.Delete(key)
+}
+
+// Iteration Code
+
+// IterationKey returns the key under which the consensus state key will be stored.
+// The iteration key is a BigEndian representation of the consensus state key to support efficient iteration.
+func IterationKey(height exported.Height) []byte {
+ heightBytes := bigEndianHeightBytes(height)
+ return append([]byte(KeyIterateConsensusStatePrefix), heightBytes...)
+}
+
+// SetIterationKey stores the consensus state key under a key that is more efficient for ordered iteration
+func SetIterationKey(clientStore sdk.KVStore, height exported.Height) {
+ key := IterationKey(height)
+ val := host.ConsensusStateKey(height)
+ clientStore.Set(key, val)
+}
+
+// GetIterationKey returns the consensus state key stored under the efficient iteration key.
+// NOTE: This function is currently only used for testing purposes
+func GetIterationKey(clientStore sdk.KVStore, height exported.Height) []byte {
+ key := IterationKey(height)
+ return clientStore.Get(key)
+}
+
+// deleteIterationKey deletes the iteration key for a given height
+func deleteIterationKey(clientStore sdk.KVStore, height exported.Height) {
+ key := IterationKey(height)
+ clientStore.Delete(key)
+}
+
+// GetHeightFromIterationKey takes an iteration key and returns the height that it references
+func GetHeightFromIterationKey(iterKey []byte) exported.Height {
+ bigEndianBytes := iterKey[len([]byte(KeyIterateConsensusStatePrefix)):]
+ revisionBytes := bigEndianBytes[0:8]
+ heightBytes := bigEndianBytes[8:]
+ revision := binary.BigEndian.Uint64(revisionBytes)
+ height := binary.BigEndian.Uint64(heightBytes)
+ return clienttypes.NewHeight(revision, height)
+}
+
+func IterateConsensusStateAscending(clientStore sdk.KVStore, cb func(height exported.Height) (stop bool)) error {
+ iterator := sdk.KVStorePrefixIterator(clientStore, []byte(KeyIterateConsensusStatePrefix))
+ defer iterator.Close()
+
+ for ; iterator.Valid(); iterator.Next() {
+ iterKey := iterator.Key()
+ height := GetHeightFromIterationKey(iterKey)
+ if cb(height) {
+ return nil
+ }
+ }
+ return nil
+}
+
+// GetNextConsensusState returns the lowest consensus state that is larger than the given height.
+// The Iterator returns a storetypes.Iterator which iterates from start (inclusive) to end (exclusive).
+// Thus, to get the next consensus state, we must first call iterator.Next() and then get the value.
+func GetNextConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, bool) {
+ iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix))
+ iterator := iterateStore.Iterator(bigEndianHeightBytes(height), nil)
+ defer iterator.Close()
+ // ignore the consensus state at current height and get next height
+ iterator.Next()
+ if !iterator.Valid() {
+ return nil, false
+ }
+
+ csKey := iterator.Value()
+
+ return getTmConsensusState(clientStore, cdc, csKey)
+}
+
+// GetPreviousConsensusState returns the highest consensus state that is lower than the given height.
+// The Iterator returns a storetypes.Iterator which iterates from the end (exclusive) to start (inclusive).
+// Thus to get previous consensus state we call iterator.Value() immediately.
+func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, bool) {
+ iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix))
+ iterator := iterateStore.ReverseIterator(nil, bigEndianHeightBytes(height))
+ defer iterator.Close()
+
+ if !iterator.Valid() {
+ return nil, false
+ }
+
+ csKey := iterator.Value()
+
+ return getTmConsensusState(clientStore, cdc, csKey)
+}
+
+// Helper function for GetNextConsensusState and GetPreviousConsensusState
+func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, key []byte) (*ConsensusState, bool) {
+ bz := clientStore.Get(key)
+ if bz == nil {
+ return nil, false
+ }
+
+ consensusStateI, err := clienttypes.UnmarshalConsensusState(cdc, bz)
+ if err != nil {
+ return nil, false
+ }
+
+ consensusState, ok := consensusStateI.(*ConsensusState)
+ if !ok {
+ return nil, false
+ }
+ return consensusState, true
+}
+
+func bigEndianHeightBytes(height exported.Height) []byte {
+ heightBytes := make([]byte, 16)
+ binary.BigEndian.PutUint64(heightBytes, height.GetRevisionNumber())
+ binary.BigEndian.PutUint64(heightBytes[8:], height.GetRevisionHeight())
+ return heightBytes
+}
diff --git a/modules/light-clients/07-tendermint/types/store_test.go b/modules/light-clients/07-tendermint/types/store_test.go
index e0230e08..9ba7823b 100644
--- a/modules/light-clients/07-tendermint/types/store_test.go
+++ b/modules/light-clients/07-tendermint/types/store_test.go
@@ -1,7 +1,11 @@
package types_test
import (
+ "math"
+ "time"
+
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
solomachinetypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
@@ -116,3 +120,75 @@ func (suite *TendermintTestSuite) TestGetProcessedTime() {
_, ok = types.GetProcessedTime(store, clienttypes.NewHeight(1, 1))
suite.Require().False(ok, "retrieved processed time for a non-existent consensus state")
}
+
+func (suite *TendermintTestSuite) TestIterationKey() {
+ testHeights := []exported.Height{
+ clienttypes.NewHeight(0, 1),
+ clienttypes.NewHeight(0, 1234),
+ clienttypes.NewHeight(7890, 4321),
+ clienttypes.NewHeight(math.MaxUint64, math.MaxUint64),
+ }
+ for _, h := range testHeights {
+ k := types.IterationKey(h)
+ retrievedHeight := types.GetHeightFromIterationKey(k)
+ suite.Require().Equal(h, retrievedHeight, "retrieving height from iteration key failed")
+ }
+}
+
+func (suite *TendermintTestSuite) TestIterateConsensusStates() {
+ nextValsHash := []byte("nextVals")
+
+ // Set iteration keys and consensus states
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 1))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 1), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash))
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(4, 9))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(4, 9), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash4-9")), nextValsHash))
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 10))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 10), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-10")), nextValsHash))
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 4))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 4), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash))
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(40, 1))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(40, 1), types.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash40-1")), nextValsHash))
+
+ var testArr []string
+ cb := func(height exported.Height) bool {
+ testArr = append(testArr, height.String())
+ return false
+ }
+
+ types.IterateConsensusStateAscending(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), cb)
+ expectedArr := []string{"0-1", "0-4", "0-10", "4-9", "40-1"}
+ suite.Require().Equal(expectedArr, testArr)
+}
+
+func (suite *TendermintTestSuite) TestGetNeighboringConsensusStates() {
+ nextValsHash := []byte("nextVals")
+ cs01 := types.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash)
+ cs04 := types.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash)
+ cs49 := types.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash4-9")), nextValsHash)
+ height01 := clienttypes.NewHeight(0, 1)
+ height04 := clienttypes.NewHeight(0, 4)
+ height49 := clienttypes.NewHeight(4, 9)
+
+ // Set iteration keys and consensus states
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), height01)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", height01, cs01)
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), height04)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", height04, cs04)
+ types.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), height49)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", height49, cs49)
+
+ prevCs01, ok := types.GetPreviousConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height01)
+ suite.Require().Nil(prevCs01, "consensus state exists before lowest consensus state")
+ suite.Require().False(ok)
+ prevCs49, ok := types.GetPreviousConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height49)
+ suite.Require().Equal(cs04, prevCs49, "previous consensus state is not returned correctly")
+ suite.Require().True(ok)
+
+ nextCs01, ok := types.GetNextConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height01)
+ suite.Require().Equal(cs04, nextCs01, "next consensus state not returned correctly")
+ suite.Require().True(ok)
+ nextCs49, ok := types.GetNextConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height49)
+ suite.Require().Nil(nextCs49, "next consensus state exists after highest consensus state")
+ suite.Require().False(ok)
+}
diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go
index f1183cdc..18cda8fb 100644
--- a/modules/light-clients/07-tendermint/types/update.go
+++ b/modules/light-clients/07-tendermint/types/update.go
@@ -37,6 +37,11 @@ import (
// number must be the same. To update to a new revision, use a separate upgrade path
// Tendermint client validity checking uses the bisection algorithm described
// in the [Tendermint spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md).
+//
+// Pruning:
+// UpdateClient will additionally retrieve the earliest consensus state for this clientID and check if it is expired. If it is,
+// that consensus state will be pruned from store along with all associated metadata. This will prevent the client store from
+// becoming bloated with expired consensus states that can no longer be used for updates and packet verification.
func (cs ClientState) CheckHeaderAndUpdateState(
ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
header exported.Header,
@@ -60,6 +65,35 @@ func (cs ClientState) CheckHeaderAndUpdateState(
return nil, nil, err
}
+ // Check the earliest consensus state to see if it is expired, if so then set the prune height
+ // so that we can delete consensus state and all associated metadata.
+ var (
+ pruneHeight exported.Height
+ pruneError error
+ )
+ pruneCb := func(height exported.Height) bool {
+ consState, err := GetConsensusState(clientStore, cdc, height)
+ // this error should never occur
+ if err != nil {
+ pruneError = err
+ return true
+ }
+ if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) {
+ pruneHeight = height
+ }
+ return true
+ }
+ IterateConsensusStateAscending(clientStore, pruneCb)
+ if pruneError != nil {
+ return nil, nil, pruneError
+ }
+ // if pruneHeight is set, delete consensus state and metadata
+ if pruneHeight != nil {
+ deleteConsensusState(clientStore, pruneHeight)
+ deleteProcessedTime(clientStore, pruneHeight)
+ deleteIterationKey(clientStore, pruneHeight)
+ }
+
newClientState, consensusState := update(ctx, clientStore, &cs, tmHeader)
return newClientState, consensusState, nil
}
@@ -180,7 +214,9 @@ func update(ctx sdk.Context, clientStore sdk.KVStore, clientState *ClientState,
// set context time as processed time as this is state internal to tendermint client logic.
// client state and consensus state will be set by client keeper
+ // set iteration key to provide ability for efficient ordered iteration of consensus states.
SetProcessedTime(clientStore, header.GetHeight(), uint64(ctx.BlockTime().UnixNano()))
+ SetIterationKey(clientStore, header.GetHeight())
return clientState, consensusState
}
diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go
index 672defe7..14c34645 100644
--- a/modules/light-clients/07-tendermint/types/update_test.go
+++ b/modules/light-clients/07-tendermint/types/update_test.go
@@ -19,6 +19,9 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
consStateHeight clienttypes.Height
newHeader *types.Header
currentTime time.Time
+ bothValSet *tmtypes.ValidatorSet
+ signers []tmtypes.PrivValidator
+ bothSigners []tmtypes.PrivValidator
)
// Setup different validators and signers for testing different types of updates
@@ -35,28 +38,19 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
heightPlus5 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+5)
altVal := tmtypes.NewValidator(altPubKey, revisionHeight)
-
- // Create bothValSet with both suite validator and altVal. Would be valid update
- bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
// Create alternative validator set with only altVal, invalid update (too much change in valSet)
altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal})
- signers := []tmtypes.PrivValidator{suite.privVal}
-
- // Create signer array and ensure it is in same order as bothValSet
- _, suiteVal := suite.valSet.GetByIndex(0)
- bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
-
altSigners := []tmtypes.PrivValidator{altPrivVal}
testCases := []struct {
name string
- setup func()
+ setup func(suite *TendermintTestSuite)
expPass bool
}{
{
name: "successful update with next height and same validator set",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
@@ -66,7 +60,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "successful update with future height and different validator set",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners)
@@ -76,7 +70,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "successful update with next height and different validator set",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash())
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners)
@@ -86,7 +80,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "successful update for a previous height",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
consStateHeight = heightMinus3
@@ -97,9 +91,10 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "successful update for a previous revision",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ consStateHeight = heightMinus3
newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners)
currentTime = suite.now
},
@@ -107,7 +102,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update with incorrect header chain-id",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader("ethermint", int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
@@ -117,7 +112,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update to a future revision",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainIDRevision0, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, height, suite.headerTime, suite.valSet, suite.valSet, signers)
@@ -127,7 +122,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update: header height revision and trusted height revision mismatch",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 3, height, suite.headerTime, suite.valSet, suite.valSet, signers)
@@ -137,7 +132,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update with next height: update header mismatches nextValSetHash",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners)
@@ -147,7 +142,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update with next height: update header mismatches different nextValSetHash",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash())
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, bothValSet, signers)
@@ -157,7 +152,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update with future height: too much change in validator set",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, altValSet, suite.valSet, altSigners)
@@ -167,7 +162,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful updates, passed in incorrect trusted validators for given consensus state",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners)
@@ -177,7 +172,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update: trusting period has passed since last client timestamp",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
@@ -188,7 +183,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update: header timestamp is past current timestamp",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers)
@@ -198,7 +193,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "unsuccessful update: header timestamp is not past last client timestamp",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.clientTime, suite.valSet, suite.valSet, signers)
@@ -208,7 +203,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "header basic validation failed",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
@@ -220,7 +215,7 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
{
name: "header height < consensus height",
- setup: func() {
+ setup: func(suite *TendermintTestSuite) {
clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(height.RevisionNumber, heightPlus5.RevisionHeight), commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
// Make new header at height less than latest client state
@@ -233,49 +228,123 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
for i, tc := range testCases {
tc := tc
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset metadata writes
+ // Create bothValSet with both suite validator and altVal. Would be valid update
+ bothValSet = tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
+ signers = []tmtypes.PrivValidator{suite.privVal}
+
+ // Create signer array and ensure it is in same order as bothValSet
+ _, suiteVal := suite.valSet.GetByIndex(0)
+ bothSigners = ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
- consStateHeight = height // must be explicitly changed
- // setup test
- tc.setup()
+ consStateHeight = height // must be explicitly changed
+ // setup test
+ tc.setup(suite)
- // Set current timestamp in context
- ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
+ // Set current timestamp in context
+ ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
- // Set trusted consensus state in client store
- suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState)
+ // Set trusted consensus state in client store
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState)
- height := newHeader.GetHeight()
- expectedConsensus := &types.ConsensusState{
- Timestamp: newHeader.GetTime(),
- Root: commitmenttypes.NewMerkleRoot(newHeader.Header.GetAppHash()),
- NextValidatorsHash: newHeader.Header.NextValidatorsHash,
- }
+ height := newHeader.GetHeight()
+ expectedConsensus := &types.ConsensusState{
+ Timestamp: newHeader.GetTime(),
+ Root: commitmenttypes.NewMerkleRoot(newHeader.Header.GetAppHash()),
+ NextValidatorsHash: newHeader.Header.NextValidatorsHash,
+ }
+
+ newClientState, consensusState, err := clientState.CheckHeaderAndUpdateState(
+ ctx,
+ suite.cdc,
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore
+ newHeader,
+ )
- newClientState, consensusState, err := clientState.CheckHeaderAndUpdateState(
- ctx,
- suite.cdc,
- suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore
- newHeader,
- )
+ if tc.expPass {
+ suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
- if tc.expPass {
- suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ // Determine if clientState should be updated or not
+ // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height
+ if height.GT(clientState.LatestHeight) {
+ // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
+ suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update")
+ } else {
+ // Update will add past consensus state, clientState should not be updated at all
+ suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header")
+ }
- // Determine if clientState should be updated or not
- // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height
- if height.GT(clientState.LatestHeight) {
- // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
- suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update")
+ suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name)
} else {
- // Update will add past consensus state, clientState should not be updated at all
- suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header")
+ suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ suite.Require().Nil(newClientState, "invalid test case %d passed: %s", i, tc.name)
+ suite.Require().Nil(consensusState, "invalid test case %d passed: %s", i, tc.name)
}
-
- suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name)
- } else {
- suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
- suite.Require().Nil(newClientState, "invalid test case %d passed: %s", i, tc.name)
- suite.Require().Nil(consensusState, "invalid test case %d passed: %s", i, tc.name)
- }
+ })
}
}
+
+func (suite *TendermintTestSuite) TestPruneConsensusState() {
+ // create path and setup clients
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+
+ // call update client twice. When pruning occurs, only first consensus state should be pruned.
+ // this height will be pruned
+ path.EndpointA.UpdateClient()
+ pruneHeight := path.EndpointA.GetClientState().GetLatestHeight()
+
+ // this height will be expired but not pruned
+ path.EndpointA.UpdateClient()
+ expiredHeight := path.EndpointA.GetClientState().GetLatestHeight()
+
+ // expected values that must still remain in store after pruning
+ expectedConsState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight)
+ suite.Require().True(ok)
+ ctx := path.EndpointA.Chain.GetContext()
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
+ expectedProcessTime, ok := types.GetProcessedTime(clientStore, expiredHeight)
+ suite.Require().True(ok)
+ expectedConsKey := types.GetIterationKey(clientStore, expiredHeight)
+ suite.Require().NotNil(expectedConsKey)
+
+ // Increment the time by a week
+ suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour)
+
+ // create the consensus state that can be used as trusted height for next update
+ path.EndpointA.UpdateClient()
+
+ // Increment the time by another week, then update the client.
+ // This will cause the first two consensus states to become expired.
+ suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour)
+ path.EndpointA.UpdateClient()
+
+ ctx = path.EndpointA.Chain.GetContext()
+ clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
+
+ // check that the first expired consensus state got deleted along with all associated metadata
+ consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight)
+ suite.Require().Nil(consState, "expired consensus state not pruned")
+ suite.Require().False(ok)
+ // check processed time metadata is pruned
+ processTime, ok := types.GetProcessedTime(clientStore, pruneHeight)
+ suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned")
+ suite.Require().False(ok)
+ // check iteration key metadata is pruned
+ consKey := types.GetIterationKey(clientStore, pruneHeight)
+ suite.Require().Nil(consKey, "iteration key not pruned")
+
+ // check that second expired consensus state doesn't get deleted
+ // this ensures that there is a cap on gas cost of UpdateClient
+ consState, ok = path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight)
+ suite.Require().Equal(expectedConsState, consState, "consensus state incorrectly pruned")
+ suite.Require().True(ok)
+ // check processed time metadata is not pruned
+ processTime, ok = types.GetProcessedTime(clientStore, expiredHeight)
+ suite.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned")
+ suite.Require().True(ok)
+ // check iteration key metadata is not pruned
+ consKey = types.GetIterationKey(clientStore, expiredHeight)
+ suite.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned")
+}
From 3561ddf7336f71cac3fe0f4cc5dd5a78f0e2769f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 26 Apr 2021 15:42:28 +0200
Subject: [PATCH 041/393] add ADRs from SDK (#139)
---
docs/architecture/README.md | 34 ++
.../adr-001-coin-source-tracing.md | 376 ++++++++++++++++++
.../adr-015-ibc-packet-receiver.md | 299 ++++++++++++++
.../adr-025-ibc-passive-channels.md | 141 +++++++
.../adr-026-ibc-client-recovery-mechanisms.md | 82 ++++
docs/architecture/adr-template.md | 37 ++
6 files changed, 969 insertions(+)
create mode 100644 docs/architecture/README.md
create mode 100644 docs/architecture/adr-001-coin-source-tracing.md
create mode 100644 docs/architecture/adr-015-ibc-packet-receiver.md
create mode 100644 docs/architecture/adr-025-ibc-passive-channels.md
create mode 100644 docs/architecture/adr-026-ibc-client-recovery-mechanisms.md
create mode 100644 docs/architecture/adr-template.md
diff --git a/docs/architecture/README.md b/docs/architecture/README.md
new file mode 100644
index 00000000..0cc93bae
--- /dev/null
+++ b/docs/architecture/README.md
@@ -0,0 +1,34 @@
+# Architecture Decision Records (ADR)
+
+This is a location to record all high-level architecture decisions in the ibc-go project.
+
+You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t).
+
+An ADR should provide:
+
+- Context on the relevant goals and the current state
+- Proposed changes to achieve the goals
+- Summary of pros and cons
+- References
+- Changelog
+
+Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and
+justification for a change in architecture, or for the architecture of something
+new. The spec is much more compressed and streamlined summary of everything as
+it is or should be.
+
+If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match.
+
+Note the context/background should be written in the present tense.
+
+To suggest an ADR, please make use of the [ADR template](./adr-template.md) provided.
+
+## Table of Contents
+
+| ADR \# | Description | Status |
+| ------ | ----------- | ------ |
+| [001](./adr-001-coin-source-tracing.md) | ICS-20 coin denomination format | Accepted, Implemented |
+| [015](./adr-015-ibc-packet-receiver.md) | IBC Packet Routing | Accepted |
+| [025](./adr-025-ibc-passive-channels.md) | IBC passive channels | Deprecated |
+| [026](./adr-026-ibc-client-recovery-mechanisms.md) | IBC client recovery mechansisms | Accepted |
+
diff --git a/docs/architecture/adr-001-coin-source-tracing.md b/docs/architecture/adr-001-coin-source-tracing.md
new file mode 100644
index 00000000..d5364d1c
--- /dev/null
+++ b/docs/architecture/adr-001-coin-source-tracing.md
@@ -0,0 +1,376 @@
+# ADR 001: Coin Source Tracing
+
+## Changelog
+
+- 2020-07-09: Initial Draft
+- 2020-08-11: Implementation changes
+
+## Status
+
+Accepted, Implemented
+
+## Context
+
+The specification for IBC cross-chain fungible token transfers
+([ICS20](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer)), needs to
+be aware of the origin of any token denomination in order to relay a `Packet` which contains the sender
+and recipient addressed in the
+[`FungibleTokenPacketData`](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer#data-structures).
+
+The Packet relay sending works based in 2 cases (per
+[specification](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer#packet-relay) and [Colin Axnér](https://github.com/colin-axner)'s description):
+
+1. Sender chain is acting as the source zone. The coins are transferred
+to an escrow address (i.e locked) on the sender chain and then transferred
+to the receiving chain through IBC TAO logic. It is expected that the
+receiving chain will mint vouchers to the receiving address.
+
+2. Sender chain is acting as the sink zone. The coins (vouchers) are burned
+on the sender chain and then transferred to the receiving chain though IBC
+TAO logic. It is expected that the receiving chain, which had previously
+sent the original denomination, will unescrow the fungible token and send
+it to the receiving address.
+
+Another way of thinking of source and sink zones is through the token's
+timeline. Each send to any chain other than the one it was previously
+received from is a movement forwards in the token's timeline. This causes
+trace to be added to the token's history and the destination port and
+destination channel to be prefixed to the denomination. In these instances
+the sender chain is acting as the source zone. When the token is sent back
+to the chain it previously received from, the prefix is removed. This is
+a backwards movement in the token's timeline and the sender chain
+is acting as the sink zone.
+
+### Example
+
+Assume the following channel connections exist and that all channels use the port ID `transfer`:
+
+- chain `A` has channels with chain `B` and chain `C` with the IDs `channelToB` and `channelToC`, respectively
+- chain `B` has channels with chain `A` and chain `C` with the IDs `channelToA` and `channelToC`, respectively
+- chain `C` has channels with chain `A` and chain `B` with the IDs `channelToA` and `channelToB`, respectively
+
+These steps of transfer between chains occur in the following order: `A -> B -> C -> A -> C`. In particular:
+
+1. `A -> B`: sender chain is source zone. `A` sends packet with `denom` (escrowed on `A`), `B` receives `denom` and mints and sends voucher `transfer/channelToA/denom` to recipient.
+2. `B -> C`: sender chain is source zone. `B` sends packet with `transfer/channelToA/denom` (escrowed on `B`), `C` receives `transfer/channelToA/denom` and mints and sends voucher `transfer/channelToB/transfer/channelToA/denom` to recipient.
+3. `C -> A`: sender chain is source zone. `C` sends packet with `transfer/channelToB/transfer/channelToA/denom` (escrowed on `C`), `A` receives `transfer/channelToB/transfer/channelToA/denom` and mints and sends voucher `transfer/channelToC/transfer/channelToB/transfer/channelToA/denom` to recipient.
+4. `A -> C`: sender chain is sink zone. `A` sends packet with `transfer/channelToC/transfer/channelToB/transfer/channelToA/denom` (burned on `A`), `C` receives `transfer/channelToC/transfer/channelToB/transfer/channelToA/denom`, and unescrows and sends `transfer/channelToB/transfer/channelToA/denom` to recipient.
+
+The token has a final denomination on chain `C` of `transfer/channelToB/transfer/channelToA/denom`, where `transfer/channelToB/transfer/channelToA` is the trace information.
+
+In this context, upon a receive of a cross-chain fungible token transfer, if the sender chain is the source of the token, the protocol prefixes the denomination with the port and channel identifiers in the following format:
+
+```typescript
+prefix + denom = {destPortN}/{destChannelN}/.../{destPort0}/{destChannel0}/denom
+```
+
+Example: transferring `100 uatom` from port `HubPort` and channel `HubChannel` on the Hub to
+Ethermint's port `EthermintPort` and channel `EthermintChannel` results in `100
+EthermintPort/EthermintChannel/uatom`, where `EthermintPort/EthermintChannel/uatom` is the new
+denomination on the receiving chain.
+
+In the case those tokens are transferred back to the Hub (i.e the **source** chain), the prefix is
+trimmed and the token denomination updated to the original one.
+
+### Problem
+
+The problem of adding additional information to the coin denomination is twofold:
+
+1. The ever increasing length if tokens are transferred to zones other than the source:
+
+If a token is transferred `n` times via IBC to a sink chain, the token denom will contain `n` pairs
+of prefixes, as shown on the format example above. This poses a problem because, while port and
+channel identifiers have a maximum length of 64 each, the SDK `Coin` type only accepts denoms up to
+64 characters. Thus, a single cross-chain token, which again, is composed by the port and channels
+identifiers plus the base denomination, can exceed the length validation for the SDK `Coins`.
+
+This can result in undesired behaviours such as tokens not being able to be transferred to multiple
+sink chains if the denomination exceeds the length or unexpected `panics` due to denomination
+validation failing on the receiving chain.
+
+2. The existence of special characters and uppercase letters on the denomination:
+
+In the SDK every time a `Coin` is initialized through the constructor function `NewCoin`, a validation
+of a coin's denom is performed according to a
+[Regex](https://github.com/cosmos/cosmos-sdk/blob/a940214a4923a3bf9a9161cd14bd3072299cd0c9/types/coin.go#L583),
+where only lowercase alphanumeric characters are accepted. While this is desirable for native denominations
+to keep a clean UX, it presents a challenge for IBC as ports and channels might be randomly
+generated with special and uppercase characters as per the [ICS 024 - Host
+Requirements](https://github.com/cosmos/ibc/tree/master/spec/core/ics-024-host-requirements#paths-identifiers-separators)
+specification.
+
+## Decision
+
+The issues outlined above, are applicable only to SDK-based chains, and thus the proposed solution
+are do not require specification changes that would result in modification to other implementations
+of the ICS20 spec.
+
+Instead of adding the identifiers on the coin denomination directly, the proposed solution hashes
+the denomination prefix in order to get a consistent length for all the cross-chain fungible tokens.
+
+This will be used for internal storage only, and when transferred via IBC to a different chain, the
+denomination specified on the packed data will be the full prefix path of the identifiers needed to
+trace the token back to the originating chain, as specified on ICS20.
+
+The new proposed format will be the following:
+
+```golang
+ibcDenom = "ibc/" + hash(trace path + "/" + base denom)
+```
+
+The hash function will be a SHA256 hash of the fields of the `DenomTrace`:
+
+```protobuf
+// DenomTrace contains the base denomination for ICS20 fungible tokens and the source tracing
+// information
+message DenomTrace {
+ // chain of port/channel identifiers used for tracing the source of the fungible token
+ string path = 1;
+ // base denomination of the relayed fungible token
+ string base_denom = 2;
+}
+```
+
+The `IBCDenom` function constructs the `Coin` denomination used when creating the ICS20 fungible token packet data:
+
+```golang
+// Hash returns the hex bytes of the SHA256 hash of the DenomTrace fields using the following formula:
+//
+// hash = sha256(tracePath + "/" + baseDenom)
+func (dt DenomTrace) Hash() tmbytes.HexBytes {
+ return tmhash.Sum(dt.Path + "/" + dt.BaseDenom)
+}
+
+// IBCDenom a coin denomination for an ICS20 fungible token in the format 'ibc/{hash(tracePath + baseDenom)}'.
+// If the trace is empty, it will return the base denomination.
+func (dt DenomTrace) IBCDenom() string {
+ if dt.Path != "" {
+ return fmt.Sprintf("ibc/%s", dt.Hash())
+ }
+ return dt.BaseDenom
+}
+```
+
+### `x/ibc-transfer` Changes
+
+In order to retrieve the trace information from an IBC denomination, a lookup table needs to be
+added to the `ibc-transfer` module. These values need to also be persisted between upgrades, meaning
+that a new `[]DenomTrace` `GenesisState` field state needs to be added to the module:
+
+```golang
+// GetDenomTrace retrieves the full identifiers trace and base denomination from the store.
+func (k Keeper) GetDenomTrace(ctx Context, denomTraceHash []byte) (DenomTrace, bool) {
+ store := ctx.KVStore(k.storeKey)
+ bz := store.Get(types.KeyDenomTrace(traceHash))
+ if bz == nil {
+ return &DenomTrace, false
+ }
+
+ var denomTrace DenomTrace
+ k.cdc.MustUnmarshalBinaryBare(bz, &denomTrace)
+ return denomTrace, true
+}
+
+// HasDenomTrace checks if a the key with the given trace hash exists on the store.
+func (k Keeper) HasDenomTrace(ctx Context, denomTraceHash []byte) bool {
+ store := ctx.KVStore(k.storeKey)
+ return store.Has(types.KeyTrace(denomTraceHash))
+}
+
+// SetDenomTrace sets a new {trace hash -> trace} pair to the store.
+func (k Keeper) SetDenomTrace(ctx Context, denomTrace DenomTrace) {
+ store := ctx.KVStore(k.storeKey)
+ bz := k.cdc.MustMarshalBinaryBare(&denomTrace)
+ store.Set(types.KeyTrace(denomTrace.Hash()), bz)
+}
+```
+
+The `MsgTransfer` will validate that the `Coin` denomination from the `Token` field contains a valid
+hash, if the trace info is provided, or that the base denominations matches:
+
+```golang
+func (msg MsgTransfer) ValidateBasic() error {
+ // ...
+ return ValidateIBCDenom(msg.Token.Denom)
+}
+```
+
+```golang
+// ValidateIBCDenom validates that the given denomination is either:
+//
+// - A valid base denomination (eg: 'uatom')
+// - A valid fungible token representation (i.e 'ibc/{hash}') per ADR 001 https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-001-coin-source-tracing.md
+func ValidateIBCDenom(denom string) error {
+ denomSplit := strings.SplitN(denom, "/", 2)
+
+ switch {
+ case strings.TrimSpace(denom) == "",
+ len(denomSplit) == 1 && denomSplit[0] == "ibc",
+ len(denomSplit) == 2 && (denomSplit[0] != "ibc" || strings.TrimSpace(denomSplit[1]) == ""):
+ return sdkerrors.Wrapf(ErrInvalidDenomForTransfer, "denomination should be prefixed with the format 'ibc/{hash(trace + \"/\" + %s)}'", denom)
+
+ case denomSplit[0] == denom && strings.TrimSpace(denom) != "":
+ return sdk.ValidateDenom(denom)
+ }
+
+ if _, err := ParseHexHash(denomSplit[1]); err != nil {
+ return Wrapf(err, "invalid denom trace hash %s", denomSplit[1])
+ }
+
+ return nil
+}
+```
+
+The denomination trace info only needs to be updated when token is received:
+
+- Receiver is **source** chain: The receiver created the token and must have the trace lookup already stored (if necessary _ie_ native token case wouldn't need a lookup).
+- Receiver is **not source** chain: Store the received info. For example, during step 1, when chain `B` receives `transfer/channelToA/denom`.
+
+```golang
+// SendTransfer
+// ...
+
+ fullDenomPath := token.Denom
+
+// deconstruct the token denomination into the denomination trace info
+// to determine if the sender is the source chain
+if strings.HasPrefix(token.Denom, "ibc/") {
+ fullDenomPath, err = k.DenomPathFromHash(ctx, token.Denom)
+ if err != nil {
+ return err
+ }
+}
+
+if types.SenderChainIsSource(sourcePort, sourceChannel, fullDenomPath) {
+//...
+```
+
+```golang
+// DenomPathFromHash returns the full denomination path prefix from an ibc denom with a hash
+// component.
+func (k Keeper) DenomPathFromHash(ctx sdk.Context, denom string) (string, error) {
+ hexHash := denom[4:]
+ hash, err := ParseHexHash(hexHash)
+ if err != nil {
+ return "", Wrap(ErrInvalidDenomForTransfer, err.Error())
+ }
+
+ denomTrace, found := k.GetDenomTrace(ctx, hash)
+ if !found {
+ return "", Wrap(ErrTraceNotFound, hexHash)
+ }
+
+ fullDenomPath := denomTrace.GetFullDenomPath()
+ return fullDenomPath, nil
+}
+```
+
+
+```golang
+// OnRecvPacket
+// ...
+
+// This is the prefix that would have been prefixed to the denomination
+// on sender chain IF and only if the token originally came from the
+// receiving chain.
+//
+// NOTE: We use SourcePort and SourceChannel here, because the counterparty
+// chain would have prefixed with DestPort and DestChannel when originally
+// receiving this coin as seen in the "sender chain is the source" condition.
+if ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) {
+ // sender chain is not the source, unescrow tokens
+
+ // remove prefix added by sender chain
+ voucherPrefix := types.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel())
+ unprefixedDenom := data.Denom[len(voucherPrefix):]
+ token := sdk.NewCoin(unprefixedDenom, sdk.NewIntFromUint64(data.Amount))
+
+ // unescrow tokens
+ escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel())
+ return k.bankKeeper.SendCoins(ctx, escrowAddress, receiver, sdk.NewCoins(token))
+}
+
+// sender chain is the source, mint vouchers
+
+// since SendPacket did not prefix the denomination, we must prefix denomination here
+sourcePrefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel())
+// NOTE: sourcePrefix contains the trailing "/"
+prefixedDenom := sourcePrefix + data.Denom
+
+// construct the denomination trace from the full raw denomination
+denomTrace := types.ParseDenomTrace(prefixedDenom)
+
+// set the value to the lookup table if not stored already
+traceHash := denomTrace.Hash()
+if !k.HasDenomTrace(ctx, traceHash) {
+ k.SetDenomTrace(ctx, traceHash, denomTrace)
+}
+
+voucherDenom := denomTrace.IBCDenom()
+voucher := sdk.NewCoin(voucherDenom, sdk.NewIntFromUint64(data.Amount))
+
+// mint new tokens if the source of the transfer is the same chain
+if err := k.bankKeeper.MintCoins(
+ ctx, types.ModuleName, sdk.NewCoins(voucher),
+); err != nil {
+ return err
+}
+
+// send to receiver
+return k.bankKeeper.SendCoinsFromModuleToAccount(
+ ctx, types.ModuleName, receiver, sdk.NewCoins(voucher),
+)
+```
+
+```golang
+func NewDenomTraceFromRawDenom(denom string) DenomTrace{
+ denomSplit := strings.Split(denom, "/")
+ trace := ""
+ if len(denomSplit) > 1 {
+ trace = strings.Join(denomSplit[:len(denomSplit)-1], "/")
+ }
+ return DenomTrace{
+ BaseDenom: denomSplit[len(denomSplit)-1],
+ Trace: trace,
+ }
+}
+```
+
+One final remark is that the `FungibleTokenPacketData` will remain the same, i.e with the prefixed full denomination, since the receiving chain may not be an SDK-based chain.
+
+### Coin Changes
+
+The coin denomination validation will need to be updated to reflect these changes. In particular, the denomination validation
+function will now:
+
+- Accept slash separators (`"/"`) and uppercase characters (due to the `HexBytes` format)
+- Bump the maximum character length to 128, as the hex representation used by Tendermint's
+ `HexBytes` type contains 64 characters.
+
+Additional validation logic, such as verifying the length of the hash, the may be added to the bank module in the future if the [custom base denomination validation](https://github.com/cosmos/cosmos-sdk/pull/6755) is integrated into the SDK.
+
+### Positive
+
+- Clearer separation of the source tracing behaviour of the token (transfer prefix) from the original
+ `Coin` denomination
+- Consistent validation of `Coin` fields (i.e no special characters, fixed max length)
+- Cleaner `Coin` and standard denominations for IBC
+- No additional fields to SDK `Coin`
+
+### Negative
+
+- Store each set of tracing denomination identifiers on the `ibc-transfer` module store
+- Clients will have to fetch the base denomination every time they receive a new relayed fungible token over IBC. This can be mitigated using a map/cache for already seen hashes on the client side. Other forms of mitigation, would be opening a websocket connection subscribe to incoming events.
+
+### Neutral
+
+- Slight difference with the ICS20 spec
+- Additional validation logic for IBC coins on the `ibc-transfer` module
+- Additional genesis fields
+- Slightly increases the gas usage on cross-chain transfers due to access to the store. This should
+ be inter-block cached if transfers are frequent.
+
+## References
+
+- [ICS 20 - Fungible token transfer](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer)
+- [Custom Coin Denomination validation](https://github.com/cosmos/cosmos-sdk/pull/6755)
diff --git a/docs/architecture/adr-015-ibc-packet-receiver.md b/docs/architecture/adr-015-ibc-packet-receiver.md
new file mode 100644
index 00000000..77059fd8
--- /dev/null
+++ b/docs/architecture/adr-015-ibc-packet-receiver.md
@@ -0,0 +1,299 @@
+# ADR 015: IBC Packet Receiver
+
+## Changelog
+
+- 2019 Oct 22: Initial Draft
+
+## Context
+
+[ICS 26 - Routing Module](https://github.com/cosmos/ibc/tree/master/spec/core/ics-026-routing-module) defines a function [`handlePacketRecv`](https://github.com/cosmos/ibc/tree/master/spec/core/ics-026-routing-module#packet-relay).
+
+In ICS 26, the routing module is defined as a layer above each application module
+which verifies and routes messages to the destination modules. It is possible to
+implement it as a separate module, however, we already have functionality to route
+messages upon the destination identifiers in the baseapp. This ADR suggests
+to utilize existing `baseapp.router` to route packets to application modules.
+
+Generally, routing module callbacks have two separate steps in them,
+verification and execution. This corresponds to the `AnteHandler`-`Handler`
+model inside the SDK. We can do the verification inside the `AnteHandler`
+in order to increase developer ergonomics by reducing boilerplate
+verification code.
+
+For atomic multi-message transaction, we want to keep the IBC related
+state modification to be preserved even the application side state change
+reverts. One of the example might be IBC token sending message following with
+stake delegation which uses the tokens received by the previous packet message.
+If the token receiving fails for any reason, we might not want to keep
+executing the transaction, but we also don't want to abort the transaction
+or the sequence and commitment will be reverted and the channel will be stuck.
+This ADR suggests new `CodeType`, `CodeTxBreak`, to fix this problem.
+
+## Decision
+
+`PortKeeper` will have the capability key that is able to access only the
+channels bound to the port. Entities that hold a `PortKeeper` will be
+able to call the methods on it which are corresponding with the methods with
+the same names on the `ChannelKeeper`, but only with the
+allowed port. `ChannelKeeper.Port(string, ChannelChecker)` will be defined to
+easily construct a capability-safe `PortKeeper`. This will be addressed in
+another ADR and we will use insecure `ChannelKeeper` for now.
+
+`baseapp.runMsgs` will break the loop over the messages if one of the handlers
+returns `!Result.IsOK()`. However, the outer logic will write the cached
+store if `Result.IsOK() || Result.Code.IsBreak()`. `Result.Code.IsBreak()` if
+`Result.Code == CodeTxBreak`.
+
+```go
+func (app *BaseApp) runTx(tx Tx) (result Result) {
+ msgs := tx.GetMsgs()
+
+ // AnteHandler
+ if app.anteHandler != nil {
+ anteCtx, msCache := app.cacheTxContext(ctx)
+ newCtx, err := app.anteHandler(anteCtx, tx)
+ if !newCtx.IsZero() {
+ ctx = newCtx.WithMultiStore(ms)
+ }
+
+ if err != nil {
+ // error handling logic
+ return res
+ }
+
+ msCache.Write()
+ }
+
+ // Main Handler
+ runMsgCtx, msCache := app.cacheTxContext(ctx)
+ result = app.runMsgs(runMsgCtx, msgs)
+ // BEGIN modification made in this ADR
+ if result.IsOK() || result.IsBreak() {
+ // END
+ msCache.Write()
+ }
+
+ return result
+}
+```
+
+The Cosmos SDK will define an `AnteDecorator` for IBC packet receiving. The
+`AnteDecorator` will iterate over the messages included in the transaction, type
+`switch` to check whether the message contains an incoming IBC packet, and if so
+verify the Merkle proof.
+
+```go
+type ProofVerificationDecorator struct {
+ clientKeeper ClientKeeper
+ channelKeeper ChannelKeeper
+}
+
+func (pvr ProofVerificationDecorator) AnteHandle(ctx Context, tx Tx, simulate bool, next AnteHandler) (Context, error) {
+ for _, msg := range tx.GetMsgs() {
+ var err error
+ switch msg := msg.(type) {
+ case client.MsgUpdateClient:
+ err = pvr.clientKeeper.UpdateClient(msg.ClientID, msg.Header)
+ case channel.MsgPacket:
+ err = pvr.channelKeeper.RecvPacket(msg.Packet, msg.Proofs, msg.ProofHeight)
+ case chanel.MsgAcknowledgement:
+ err = pvr.channelKeeper.AcknowledgementPacket(msg.Acknowledgement, msg.Proof, msg.ProofHeight)
+ case channel.MsgTimeoutPacket:
+ err = pvr.channelKeeper.TimeoutPacket(msg.Packet, msg.Proof, msg.ProofHeight, msg.NextSequenceRecv)
+ case channel.MsgChannelOpenInit;
+ err = pvr.channelKeeper.CheckOpen(msg.PortID, msg.ChannelID, msg.Channel)
+ default:
+ continue
+ }
+
+ if err != nil {
+ return ctx, err
+ }
+ }
+
+ return next(ctx, tx, simulate)
+}
+```
+
+Where `MsgUpdateClient`, `MsgPacket`, `MsgAcknowledgement`, `MsgTimeoutPacket`
+are `sdk.Msg` types correspond to `handleUpdateClient`, `handleRecvPacket`,
+`handleAcknowledgementPacket`, `handleTimeoutPacket` of the routing module,
+respectively.
+
+The side effects of `RecvPacket`, `VerifyAcknowledgement`,
+`VerifyTimeout` will be extracted out into separated functions,
+`WriteAcknowledgement`, `DeleteCommitment`, `DeleteCommitmentTimeout`, respectively,
+which will be called by the application handlers after the execution.
+
+`WriteAcknowledgement` writes the acknowledgement to the state that can be
+verified by the counter-party chain and increments the sequence to prevent
+double execution. `DeleteCommitment` will delete the commitment stored,
+`DeleteCommitmentTimeout` will delete the commitment and close channel in case
+of ordered channel.
+
+```go
+func (keeper ChannelKeeper) WriteAcknowledgement(ctx Context, packet Packet, ack []byte) {
+ keeper.SetPacketAcknowledgement(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack)
+ keeper.SetNextSequenceRecv(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+}
+
+func (keeper ChannelKeeper) DeleteCommitment(ctx Context, packet Packet) {
+ keeper.deletePacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+}
+
+func (keeper ChannelKeeper) DeleteCommitmentTimeout(ctx Context, packet Packet) {
+ k.deletePacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+
+ if channel.Ordering == types.ORDERED [
+ channel.State = types.CLOSED
+ k.SetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), channel)
+ }
+}
+```
+
+Each application handler should call respective finalization methods on the `PortKeeper`
+in order to increase sequence (in case of packet) or remove the commitment
+(in case of acknowledgement and timeout).
+Calling those functions implies that the application logic has successfully executed.
+However, the handlers can return `Result` with `CodeTxBreak` after calling those methods
+which will persist the state changes that has been already done but prevent any further
+messages to be executed in case of semantically invalid packet. This will keep the sequence
+increased in the previous IBC packets(thus preventing double execution) without
+proceeding to the following messages.
+In any case the application modules should never return state reverting result,
+which will make the channel unable to proceed.
+
+`ChannelKeeper.CheckOpen` method will be introduced. This will replace `onChanOpen*` defined
+under the routing module specification. Instead of define each channel handshake callback
+functions, application modules can provide `ChannelChecker` function with the `AppModule`
+which will be injected to `ChannelKeeper.Port()` at the top level application.
+`CheckOpen` will find the correct `ChennelChecker` using the
+`PortID` and call it, which will return an error if it is unacceptable by the application.
+
+The `ProofVerificationDecorator` will be inserted to the top level application.
+It is not safe to make each module responsible to call proof verification
+logic, whereas application can misbehave(in terms of IBC protocol) by
+mistake.
+
+The `ProofVerificationDecorator` should come right after the default sybil attack
+resistent layer from the current `auth.NewAnteHandler`:
+
+```go
+// add IBC ProofVerificationDecorator to the Chain of
+func NewAnteHandler(
+ ak keeper.AccountKeeper, supplyKeeper types.SupplyKeeper, ibcKeeper ibc.Keeper,
+ sigGasConsumer SignatureVerificationGasConsumer) sdk.AnteHandler {
+ return sdk.ChainAnteDecorators(
+ NewSetUpContextDecorator(), // outermost AnteDecorator. SetUpContext must be called first
+ ...
+ NewIncrementSequenceDecorator(ak),
+ ibcante.ProofVerificationDecorator(ibcKeeper.ClientKeeper, ibcKeeper.ChannelKeeper), // innermost AnteDecorator
+ )
+}
+```
+
+The implementation of this ADR will also create a `Data` field of the `Packet` of type `[]byte`, which can be deserialised by the receiving module into its own private type. It is up to the application modules to do this according to their own interpretation, not by the IBC keeper. This is crucial for dynamic IBC.
+
+Example application-side usage:
+
+```go
+type AppModule struct {}
+
+// CheckChannel will be provided to the ChannelKeeper as ChannelKeeper.Port(module.CheckChannel)
+func (module AppModule) CheckChannel(portID, channelID string, channel Channel) error {
+ if channel.Ordering != UNORDERED {
+ return ErrUncompatibleOrdering()
+ }
+ if channel.CounterpartyPort != "bank" {
+ return ErrUncompatiblePort()
+ }
+ if channel.Version != "" {
+ return ErrUncompatibleVersion()
+ }
+ return nil
+}
+
+func NewHandler(k Keeper) Handler {
+ return func(ctx Context, msg Msg) Result {
+ switch msg := msg.(type) {
+ case MsgTransfer:
+ return handleMsgTransfer(ctx, k, msg)
+ case ibc.MsgPacket:
+ var data PacketDataTransfer
+ if err := types.ModuleCodec.UnmarshalBinaryBare(msg.GetData(), &data); err != nil {
+ return err
+ }
+ return handlePacketDataTransfer(ctx, k, msg, data)
+ case ibc.MsgTimeoutPacket:
+ var data PacketDataTransfer
+ if err := types.ModuleCodec.UnmarshalBinaryBare(msg.GetData(), &data); err != nil {
+ return err
+ }
+ return handleTimeoutPacketDataTransfer(ctx, k, packet)
+ // interface { PortID() string; ChannelID() string; Channel() ibc.Channel }
+ // MsgChanInit, MsgChanTry implements ibc.MsgChannelOpen
+ case ibc.MsgChannelOpen:
+ return handleMsgChannelOpen(ctx, k, msg)
+ }
+ }
+}
+
+func handleMsgTransfer(ctx Context, k Keeper, msg MsgTransfer) Result {
+ err := k.SendTransfer(ctx,msg.PortID, msg.ChannelID, msg.Amount, msg.Sender, msg.Receiver)
+ if err != nil {
+ return sdk.ResultFromError(err)
+ }
+
+ return sdk.Result{}
+}
+
+func handlePacketDataTransfer(ctx Context, k Keeper, packet Packet, data PacketDataTransfer) Result {
+ err := k.ReceiveTransfer(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetDestinationPort(), packet.GetDestinationChannel(), data)
+ if err != nil {
+ // TODO: Source chain sent invalid packet, shutdown channel
+ }
+ k.ChannelKeeper.WriteAcknowledgement([]byte{0x00}) // WriteAcknowledgement increases the sequence, preventing double spending
+ return sdk.Result{}
+}
+
+func handleCustomTimeoutPacket(ctx Context, k Keeper, packet CustomPacket) Result {
+ err := k.RecoverTransfer(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetDestinationPort(), packet.GetDestinationChannel(), data)
+ if err != nil {
+ // This chain sent invalid packet or cannot recover the funds
+ panic(err)
+ }
+ k.ChannelKeeper.DeleteCommitmentTimeout(ctx, packet)
+ // packet timeout should not fail
+ return sdk.Result{}
+}
+
+func handleMsgChannelOpen(sdk.Context, k Keeper, msg MsgOpenChannel) Result {
+ k.AllocateEscrowAddress(ctx, msg.ChannelID())
+ return sdk.Result{}
+}
+```
+
+## Status
+
+Proposed
+
+## Consequences
+
+### Positive
+
+- Intuitive interface for developers - IBC handlers do not need to care about IBC authentication
+- State change commitment logic is embedded into `baseapp.runTx` logic
+
+### Negative
+
+- Cannot support dynamic ports, routing is tied to the baseapp router
+
+### Neutral
+
+- Introduces new `AnteHandler` decorator.
+- Dynamic ports can be supported using hierarchical port identifier, see #5290 for detail
+
+## References
+
+- Relevant comment: [cosmos/ics#289](https://github.com/cosmos/ics/issues/289#issuecomment-544533583)
+- [ICS26 - Routing Module](https://github.com/cosmos/ics/blob/master/spec/ics-026-routing-module)
diff --git a/docs/architecture/adr-025-ibc-passive-channels.md b/docs/architecture/adr-025-ibc-passive-channels.md
new file mode 100644
index 00000000..9b39da98
--- /dev/null
+++ b/docs/architecture/adr-025-ibc-passive-channels.md
@@ -0,0 +1,141 @@
+# ADR 025: IBC Passive Channels
+
+## Changelog
+
+- 2021-04-23: Change status to "deprecated"
+- 2020-05-23: Provide sample Go code and more details
+- 2020-05-18: Initial Draft
+
+## Status
+
+*deprecated*
+
+## Context
+
+The current "naive" IBC Relayer strategy currently establishes a single predetermined IBC channel atop a single connection between two clients (each potentially of a different chain). This strategy then detects packets to be relayed by watching for `send_packet` and `recv_packet` events matching that channel, and sends the necessary transactions to relay those packets.
+
+We wish to expand this "naive" strategy to a "passive" one which detects and relays both channel handshake messages and packets on a given connection, without the need to know each channel in advance of relaying it.
+
+In order to accomplish this, we propose adding more comprehensive events to expose channel metadata for each transaction sent from the `x/ibc/core/04-channel/keeper/handshake.go` and `x/ibc/core/04-channel/keeper/packet.go` modules.
+
+Here is an example of what would be in `ChanOpenInit`:
+
+```go
+const (
+ EventTypeChannelMeta = "channel_meta"
+ AttributeKeyAction = "action"
+ AttributeKeyHops = "hops"
+ AttributeKeyOrder = "order"
+ AttributeKeySrcPort = "src_port"
+ AttributeKeySrcChannel = "src_channel"
+ AttributeKeySrcVersion = "src_version"
+ AttributeKeyDstPort = "dst_port"
+ AttributeKeyDstChannel = "dst_channel"
+ AttributeKeyDstVersion = "dst_version"
+)
+// ...
+ // Emit Event with Channel metadata for the relayer to pick up and
+ // relay to the other chain
+ // This appears immediately before the successful return statement.
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelMeta,
+ sdk.NewAttribute(types.AttributeKeyAction, "open_init"),
+ sdk.NewAttribute(types.AttributeKeySrcConnection, connectionHops[0]),
+ sdk.NewAttribute(types.AttributeKeyHops, strings.Join(connectionHops, ",")),
+ sdk.NewAttribute(types.AttributeKeyOrder, order.String()),
+ sdk.NewAttribute(types.AttributeKeySrcPort, portID),
+ sdk.NewAttribute(types.AttributeKeySrcChannel, chanenlID),
+ sdk.NewAttribute(types.AttributeKeySrcVersion, version),
+ sdk.NewAttribute(types.AttributeKeyDstPort, counterparty.GetPortID()),
+ sdk.NewAttribute(types.AttributeKeyDstChannel, counterparty.GetChannelID()),
+ // The destination version is not yet known, but a value is necessary to pad
+ // the event attribute offsets
+ sdk.NewAttribute(types.AttributeKeyDstVersion, ""),
+ ),
+ })
+```
+
+These metadata events capture all the "header" information needed to route IBC channel handshake transactions without requiring the client to query any data except that of the connection ID that it is willing to relay. It is intended that `channel_meta.src_connection` is the only event key that needs to be indexed for a passive relayer to function.
+
+### Handling Channel Open Attempts
+
+In the case of the passive relayer, when one chain sends a `ChanOpenInit`, the relayer should inform the other chain of this open attempt and allow that chain to decide how (and if) it continues the handshake. Once both chains have actively approved the channel opening, then the rest of the handshake can happen as it does with the current "naive" relayer.
+
+To implement this behavior, we propose replacing the `cbs.OnChanOpenTry` callback with a new `cbs.OnAttemptChanOpenTry` callback which explicitly handles the `MsgChannelOpenTry`, usually by resulting in a call to `keeper.ChanOpenTry`. The typical implementation, in `x/ibc-transfer/module.go` would be compatible with the current "naive" relayer, as follows:
+
+```go
+func (am AppModule) OnAttemptChanOpenTry(
+ ctx sdk.Context,
+ chanKeeper channel.Keeper,
+ portCap *capability.Capability,
+ msg channel.MsgChannelOpenTry,
+) (*sdk.Result, error) {
+ // Require portID is the portID transfer module is bound to
+ boundPort := am.keeper.GetPort(ctx)
+ if boundPort != msg.PortID {
+ return nil, sdkerrors.Wrapf(porttypes.ErrInvalidPort, "invalid port: %s, expected %s", msg.PortID, boundPort)
+ }
+
+ // BEGIN NEW CODE
+ // Assert our protocol version, overriding the relayer's suggestion.
+ msg.Version = types.Version
+ // Continue the ChanOpenTry.
+ res, chanCap, err := channel.HandleMsgChannelOpenTry(ctx, chanKeeper, portCap, msg)
+ if err != nil {
+ return nil, err
+ }
+ // END OF NEW CODE
+
+ // ... the rest of the callback is similar to the existing OnChanOpenTry
+ // but uses msg.* directly.
+```
+
+Here is how this callback would be used, in the implementation of `x/ibc/handler.go`:
+
+```go
+// ...
+ case channel.MsgChannelOpenTry:
+ // Lookup module by port capability
+ module, portCap, err := k.PortKeeper.LookupModuleByPort(ctx, msg.PortID)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
+ }
+ // Retrieve callbacks from router
+ cbs, ok := k.Router.GetRoute(module)
+ if !ok {
+ return nil, sdkerrors.Wrapf(port.ErrInvalidRoute, "route not found to module: %s", module)
+ }
+ // Delegate to the module's OnAttemptChanOpenTry.
+ return cbs.OnAttemptChanOpenTry(ctx, k.ChannelKeeper, portCap, msg)
+```
+
+The reason we do not have a more structured interaction between `x/ibc/handler.go` and the port's module (to explicitly negotiate versions, etc) is that we do not wish to constrain the app module to have to finish handling the `MsgChannelOpenTry` during this transaction or even this block.
+
+## Decision
+
+- Expose events to allow "passive" connection relayers.
+- Enable application-initiated channels via such passive relayers.
+- Allow port modules to control how to handle open-try messages.
+
+## Consequences
+
+### Positive
+
+Makes channels into a complete application-level abstraction.
+
+Applications have full control over initiating and accepting channels, rather than expecting a relayer to tell them when to do so.
+
+A passive relayer does not have to know what kind of channel (version string, ordering constraints, firewalling logic) the application supports. These are negotiated directly between applications.
+
+### Negative
+
+Increased event size for IBC messages.
+
+### Neutral
+
+More IBC events are exposed.
+
+## References
+
+- The Agoric VM's IBC handler currently [accomodates `attemptChanOpenTry`](https://github.com/Agoric/agoric-sdk/blob/904b3a0423222a1b32893453e44bbde598473960/packages/cosmic-swingset/lib/ag-solo/vats/ibc.js#L546)
diff --git a/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md b/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md
new file mode 100644
index 00000000..2e33bf58
--- /dev/null
+++ b/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md
@@ -0,0 +1,82 @@
+# ADR 026: IBC Client Recovery Mechanisms
+
+## Changelog
+
+- 2020/06/23: Initial version
+- 2020/08/06: Revisions per review & to reference version
+- 2021/01/15: Revision to support substitute clients for unfreezing
+
+## Status
+
+*Accepted*
+
+## Context
+
+### Summary
+
+At launch, IBC will be a novel protocol, without an experienced user-base. At the protocol layer, it is not possible to distinguish between client expiry or misbehaviour due to genuine faults (Byzantine behavior) and client expiry or misbehaviour due to user mistakes (failing to update a client, or accidentally double-signing). In the base IBC protocol and ICS 20 fungible token transfer implementation, if a client can no longer be updated, funds in that channel will be permanently locked and can no longer be transferred. To the degree that it is safe to do so, it would be preferable to provide users with a recovery mechanism which can be utilised in these exceptional cases.
+
+### Exceptional cases
+
+The state of concern is where a client associated with connection(s) and channel(s) can no longer be updated. This can happen for several reasons:
+
+1. The chain which the client is following has halted and is no longer producing blocks/headers, so no updates can be made to the client
+1. The chain which the client is following has continued to operate, but no relayer has submitted a new header within the unbonding period, and the client has expired
+ 1. This could be due to real misbehaviour (intentional Byzantine behaviour) or merely a mistake by validators, but the client cannot distinguish these two cases
+1. The chain which the client is following has experienced a misbehaviour event, and the client has been frozen & thus can no longer be updated
+
+### Security model
+
+Two-thirds of the validator set (the quorum for governance, module participation) can already sign arbitrary data, so allowing governance to manually force-update a client with a new header after a delay period does not substantially alter the security model.
+
+## Decision
+
+We elect not to deal with chains which have actually halted, which is necessarily Byzantine behaviour and in which case token recovery is not likely possible anyways (in-flight packets cannot be timed-out, but the relative impact of that is minor).
+
+1. Require Tendermint light clients (ICS 07) to be created with the following additional flags
+ 1. `allow_governance_override_after_expiry` (boolean, default false)
+1. Require Tendermint light clients (ICS 07) to expose the following additional internal query functions
+ 1. `Expired() boolean`, which returns whether or not the client has passed the trusting period since the last update (in which case no headers can be validated)
+1. Require Tendermint light clients (ICS 07) & solo machine clients (ICS 06) to be created with the following additional flags
+ 1. `allow_governance_override_after_misbehaviour` (boolean, default false)
+1. Require Tendermint light clients (ICS 07) to expose the following additional state mutation functions
+ 1. `Unfreeze()`, which unfreezes a light client after misbehaviour and clears any frozen height previously set
+1. Add a new governance proposal type, `ClientUpdateProposal`, in the `x/ibc` module
+ 1. Extend the base `Proposal` with two client identifiers (`string`) and an initial height ('exported.Height').
+ 1. The first client identifier is the proposed client to be updated. This client must be either frozen or expired.
+ 1. The second client is a substitute client. It carries all the state for the client which may be updated. It must have identitical client and chain parameters to the client which may be updated (except for latest height, frozen height, and chain-id). It should be continually updated during the voting period.
+ 1. The initial height represents the starting height consensus states which will be copied from the substitute client to the frozen/expired client.
+ 1. If this governance proposal passes, the client on trial will be updated with all the state of the substitute, if and only if:
+ 1. `allow_governance_override_after_expiry` is true and the client has expired (`Expired()` returns true)
+ 1. `allow_governance_override_after_misbehaviour` is true and the client has been frozen (`Frozen()` returns true)
+ 1. In this case, additionally, the client is unfrozen by calling `Unfreeze()`
+
+
+Note that clients frozen due to misbehaviour must wait for the evidence to expire to avoid becoming refrozen.
+
+This ADR does not address planned upgrades, which are handled separately as per the [specification](https://github.com/cosmos/ibc/tree/master/spec/client/ics-007-tendermint-client#upgrades).
+
+## Consequences
+
+### Positive
+
+- Establishes a mechanism for client recovery in the case of expiry
+- Establishes a mechanism for client recovery in the case of misbehaviour
+- Clients can elect to disallow this recovery mechanism if they do not wish to allow for it
+- Constructing an ClientUpdate Proposal is as difficult as creating a new client
+
+### Negative
+
+- Additional complexity in client creation which must be understood by the user
+- Coping state of the substitute adds complexity
+- Governance participants must vote on a substitute client
+
+### Neutral
+
+No neutral consequences.
+
+## References
+
+- [Prior discussion](https://github.com/cosmos/ics/issues/421)
+- [Epoch number discussion](https://github.com/cosmos/ics/issues/439)
+- [Upgrade plan discussion](https://github.com/cosmos/ics/issues/445)
diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md
new file mode 100644
index 00000000..129ddfa8
--- /dev/null
+++ b/docs/architecture/adr-template.md
@@ -0,0 +1,37 @@
+# ADR {ADR-NUMBER}: {TITLE}
+
+## Changelog
+* {date}: {changelog}
+
+## Status
+
+> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement.
+
+{Deprecated|Proposed|Accepted}
+
+## Context
+
+> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution.
+
+## Decision
+
+> This section explains all of the details of the proposed solution, including implementation details.
+It should also describe affects / corollary items that may need to be changed as a part of this.
+If the proposed change will be large, please also indicate a way to do the change to maximize ease of review.
+(e.g. the optimal split of things to do between separate PR's)
+
+## Consequences
+
+> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones.
+
+### Positive
+
+### Negative
+
+### Neutral
+
+## References
+
+> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here!
+
+* {reference link}
From cd7077e3ff63ad146773bc3a2ec9c92d20bd8bc1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 26 Apr 2021 16:24:10 +0200
Subject: [PATCH 042/393] contributing docs (#137)
* add contribution docs
* add squash part
---
CONTRIBUTING.md | 252 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 252 insertions(+)
create mode 100644 CONTRIBUTING.md
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000..b1111fc6
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,252 @@
+# Contributing
+
+- [Contributing](#contributing)
+ - [Architecture Decision Records (ADR)](#architecture-decision-records-adr)
+ - [Pull Requests](#pull-requests)
+ - [Process for reviewing PRs](#process-for-reviewing-prs)
+ - [Updating Documentation](#updating-documentation)
+ - [Forking](#forking)
+ - [Dependencies](#dependencies)
+ - [Protobuf](#protobuf)
+ - [Testing](#testing)
+ - [Branching Model and Release](#branching-model-and-release)
+ - [PR Targeting](#pr-targeting)
+ - [Development Procedure](#development-procedure)
+ - [Pull Merge Procedure](#pull-merge-procedure)
+ - [Release Procedure](#release-procedure)
+ - [Point Release Procedure](#point-release-procedure)
+
+Thank you for considering making contributions to ibc-go!
+
+Contributing to this repo can mean many things such as participating in
+discussion or proposing code changes. To ensure a smooth workflow for all
+contributors, the general procedure for contributing has been established:
+
+1. Either [open](https://github.com/cosmos/ibc-go/issues/new/choose) or
+ [find](https://github.com/cosmos/ibc-go/issues) an issue you'd like to help with
+2. Participate in thoughtful discussion on that issue
+3. If you would like to contribute:
+ 1. If the issue is a proposal, ensure that the proposal has been accepted
+ 2. Ensure that nobody else has already begun working on this issue. If they have,
+ make sure to contact them to collaborate
+ 3. If nobody has been assigned for the issue and you would like to work on it,
+ make a comment on the issue to inform the community of your intentions
+ to begin work
+ 4. Follow standard Github best practices: fork the repo, branch from the
+ HEAD of `main`, make some commits, and submit a PR to `main`
+ - For core developers working within the ibc-go repo, to ensure a clear
+ ownership of branches, branches must be named with the convention
+ `{moniker}/{issue#}-branch-name`
+ 5. Be sure to submit the PR in `Draft` mode submit your PR early, even if
+ it's incomplete as this indicates to the community you're working on
+ something and allows them to provide comments early in the development process
+ 6. When the code is complete it can be marked `Ready for Review`
+ 7. Be sure to include a relevant change log entry in the `Unreleased` section
+ of `CHANGELOG.md` (see file for log format)
+
+Note that for very small or blatantly obvious problems (such as typos) it is
+not required to an open issue to submit a PR, but be aware that for more complex
+problems/features, if a PR is opened before an adequate design discussion has
+taken place in a github issue, that PR runs a high likelihood of being rejected.
+
+Other notes:
+
+- Looking for a good place to start contributing? How about checking out some
+ [good first issues](https://github.com/cosmos/ibc-go/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
+- Please make sure to run `make format` before every commit - the easiest way
+ to do this is have your editor run it for you upon saving a file. Additionally
+ please ensure that your code is lint compliant by running `golangci-lint run`.
+
+## Architecture Decision Records (ADR)
+
+When proposing an architecture decision for the ibc-go, please create an [ADR](./docs/architecture/README.md)
+so further discussions can be made. We are following this process so all involved parties are in
+agreement before any party begins coding the proposed implementation. If you would like to see some examples
+of how these are written refer to [Cosmos SDK ADRs](https://github.com/cosmos/cosmos-sdk/tree/master/docs/architecture)
+
+## Pull Requests
+
+To accommodate review process we suggest that PRs are categorically broken up.
+Ideally each PR addresses only a single issue. Additionally, as much as possible
+code refactoring and cleanup should be submitted as a separate PRs from bugfixes/feature-additions.
+
+### Process for reviewing PRs
+
+All PRs require an approval from at least one CODEOWNER before merge. PRs which cause signficant changes require two approvals from CODEOWNERS. When reviewing PRs please use the following review explanations:
+
+- `LGTM` without an explicit approval means that the changes look good, but you haven't pulled down the code, run tests locally and thoroughly reviewed it.
+- `Approval` through the GH UI means that you understand the code, documentation/spec is updated in the right places, you have pulled down and tested the code locally. In addition:
+ - You must also think through anything which ought to be included but is not
+ - You must think through whether any added code could be partially combined (DRYed) with existing code
+ - You must think through any potential security issues or incentive-compatibility flaws introduced by the changes
+ - Naming must be consistent with conventions and the rest of the codebase
+ - Code must live in a reasonable location, considering dependency structures (e.g. not importing testing modules in production code, or including example code modules in production code).
+ - if you approve of the PR, you are responsible for fixing any of the issues mentioned here and more
+- If you sat down with the PR submitter and did a pairing review please note that in the `Approval`, or your PR comments.
+- If you are only making "surface level" reviews, submit any notes as `Comments` without adding a review.
+
+### Updating Documentation
+
+If you open a PR on ibc-go, it is mandatory to update the relevant documentation in /docs.
+
+## Forking
+
+Please note that Go requires code to live under absolute paths, which complicates forking.
+While my fork lives at `https://github.com/colin-axner/ibc-go`,
+the code should never exist at `$GOPATH/src/github.com/colin-axner/ibc-go`.
+Instead, we use `git remote` to add the fork as a new remote for the original repo,
+`$GOPATH/src/github.com/cosmos/ibc-go`, and do all the work there.
+
+For instance, to create a fork and work on a branch of it, I would:
+
+- Create the fork on github, using the fork button.
+- Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/cosmos/ibc-go`)
+- `git remote add fork git@github.com:colin-axner/ibc-go.git`
+
+Now `fork` refers to my fork and `origin` refers to the ibc-go version.
+So I can `git push -u fork main` to update my fork, and make pull requests to ibc-go from there.
+Of course, replace `colin-axner` with your git handle.
+
+To pull in updates from the origin repo, run
+
+- `git fetch origin`
+- `git rebase origin/main` (or whatever branch you want)
+
+Please don't make Pull Requests from `main`.
+
+## Dependencies
+
+We use [Go 1.14 Modules](https://github.com/golang/go/wiki/Modules) to manage
+dependency versions.
+
+The main branch of every Cosmos repository should just build with `go get`,
+which means they should be kept up-to-date with their dependencies, so we can
+get away with telling people they can just `go get` our software.
+
+Since some dependencies are not under our control, a third party may break our
+build, in which case we can fall back on `go mod tidy -v`.
+
+## Protobuf
+
+We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use in ibc-go.
+
+For determinstic behavior around Protobuf tooling, everything is containerized using Docker. Make sure to have Docker installed on your machine, or head to [Docker's website](https://docs.docker.com/get-docker/) to install it.
+
+For formatting code in `.proto` files, you can run `make proto-format` command.
+
+For linting and checking breaking changes, we use [buf](https://buf.build/). You can use the commands `make proto-lint` and `make proto-check-breaking` to respectively lint your proto files and check for breaking changes.
+
+To generate the protobuf stubs, you can run `make proto-gen`.
+
+We also added the `make proto-all` command to run all the above commands sequentially.
+
+In order for imports to properly compile in your IDE, you may need to manually set your protobuf path in your IDE's workspace settings/config.
+
+For example, in vscode your `.vscode/settings.json` should look like:
+
+```
+{
+ "protoc": {
+ "options": [
+ "--proto_path=${workspaceRoot}/proto",
+ "--proto_path=${workspaceRoot}/third_party/proto"
+ ]
+ }
+}
+```
+
+## Testing
+
+All go tests in ibc-go can be ran by running `make test`.
+
+When testing a function under a variety of different inputs, we prefer to use
+[table driven tests](https://github.com/golang/go/wiki/TableDrivenTests).
+
+All tests should use the testing package. Please see the testing package [README](./testing/README.md) for more information.
+
+
+## Branching Model and Release
+
+User-facing repos should adhere to the trunk based development branching model: https://trunkbaseddevelopment.com/.
+
+ibc-go utilizes [semantic versioning](https://semver.org/).
+
+### PR Targeting
+
+Ensure that you base and target your PR on the `main` branch.
+
+All development should be targeted against `main`. Bug fixes which are required for outstanding releases should be backported if the CODEOWNERS decide it is applicable.
+
+### Development Procedure
+
+- the latest state of development is on `main`
+- `main` must never fail `make test`
+- no `--force` onto `main` (except when reverting a broken commit, which should seldom happen)
+- create a development branch either on github.com/cosmos/ibc-go, or your fork (using `git remote add fork`)
+- before submitting a pull request, begin `git rebase` on top of `main`
+
+### Pull Merge Procedure
+
+- ensure all github requirements pass
+- squash and merge pull request
+
+### Release Procedure
+
+- Start on `main`
+- Create the release candidate branch `rc/v*` (going forward known as **RC**)
+ and ensure it's protected against pushing from anyone except the release
+ manager/coordinator
+ - **no PRs targeting this branch should be merged unless exceptional circumstances arise**
+- On the `RC` branch, prepare a new version section in the `CHANGELOG.md`
+ - All links must be link-ified: `$ python ./scripts/linkify_changelog.py CHANGELOG.md`
+- Run external relayer tests against the prepared RC
+- If errors are found during the relayer testing, commit the fixes to `main`
+ and create a new `RC` branch (making sure to increment the `rcN`)
+- After relayer testing has successfully completed, create the release branch
+ (`release/vX.XX.X`) from the `RC` branch
+- Create a PR to `main` to incorporate the `CHANGELOG.md` updates
+- Tag the release (use `git tag -a`) and create a release in Github
+- Delete the `RC` branches
+
+### Point Release Procedure
+
+At the moment, only a single major release will be supported, so all point releases will be based
+off of that release.
+
+In order to alleviate the burden for a single person to have to cherry-pick and handle merge conflicts
+of all desired backporting PRs to a point release, we instead maintain a living backport branch, where
+all desired features and bug fixes are merged into as separate PRs.
+
+Example:
+
+Current release is `v1.0.2`. We then maintain a (living) branch `release/v1.0.x`, given x as
+the next patch release number (currently `1.0.3`) for the `1.0` release series. As bugs are fixed
+and PRs are merged into `main`, if a contributor wishes the PR to be released into the
+`v1.0.x` point release, the contributor must:
+
+1. Add `1.0.x-backport` label
+2. Pull latest changes on the desired `release/v1.0.x` branch
+3. Create a 2nd PR merging the respective PR into `release/v1.0.x`
+4. Update the PR's description and ensure it contains the following information:
+ - **[Impact]** Explanation of how the bug affects users or developers.
+ - **[Test Case]** section with detailed instructions on how to reproduce the bug.
+ - **[Regression Potential]** section with a discussion how regressions are most likely to manifest, or might
+ manifest even if it's unlikely, as a result of the change. **It is assumed that any backport PR is
+ well-tested before it is merged in and has an overall low risk of regression**. This section should discuss
+ the potential for state breaking changes to occur such as through out-of-gas errors.
+
+It is the PR's author's responsibility to fix merge conflicts, update changelog entries, and
+ensure CI passes. If a PR originates from an external contributor, it may be a core team member's
+responsibility to perform this process instead of the original author.
+Lastly, it is core team's responsibility to ensure that the PR meets all the backport criteria.
+
+Finally, when a point release is ready to be made:
+
+1. Checkout `release/v1.0.x` branch
+2. Ensure changelog entries are verified
+3. Add release version date to the changelog
+4. Push release branch along with the annotated tag: **git tag -a**
+5. Create a PR into `main` containing ONLY `CHANGELOG.md` updates
+
+Note, although we aim to support only a single release at a time, the process stated above could be
+used for multiple previous versions.
From 5b9e3c07f385fe1126c9a2ad89cc7e4f72664e42 Mon Sep 17 00:00:00 2001
From: Aditya
Date: Wed, 28 Apr 2021 10:46:17 -0400
Subject: [PATCH 043/393] Hex Packet Data (#144)
* add non-breaking event attribute
* CHANGELOG
* add migration doc
---
CHANGELOG.md | 1 +
docs/migrations/ibc-migration-043.md | 4 ++++
modules/core/04-channel/keeper/packet.go | 10 +++++++---
modules/core/04-channel/types/events.go | 5 ++++-
4 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a4fbd915..c536f16e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -57,6 +57,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Improvements
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
+* (modules/core/04-channel) [\#144](https://github.com/cosmos/ibc-go/pull/144) Introduced a `packet_data_hex` attribute to emit the hex-encoded packet data in events. This allows for raw binary (proto-encoded message) to be sent over events and decoded correctly on relayer. Original `packet_data` is DEPRECATED. All relayers and IBC event consumers are encouraged to switch to `packet_data_hex` as soon as possible.
* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
## IBC in the Cosmos SDK Repository
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 6d6b9cff..f343afc2 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -99,3 +99,7 @@ The gRPC querier service endpoints have changed slightly. The previous files use
Application developers need to update their `OnRecvPacket` callback logic.
The `OnRecvPacket` callback has been modified to only return the acknowledgement. The acknowledgement returned must implement the `Acknowledgement` interface. The acknowledgement should indicate if it represents a successful processing of a packet by returning true on `Success()` and false in all other cases. A return value of false on `Success()` will result in all state changes which occurred in the callback being discarded. More information can be found in the [documentation](https://github.com/cosmos/ibc-go/blob/main/docs/custom.md#receiving-packets).
+
+## IBC Event changes
+
+The `packet_data` attribute has been deprecated in favor of `packet_data_hex`, in order to provide standardized encoding/decoding of packet data in events. While the `packet_data` event still exists, all relayers and IBC Event consumers are strongly encouraged to switch over to using `packet_data_hex` as soon as possible.
diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
index 957a5a2f..7634ebf4 100644
--- a/modules/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -2,6 +2,7 @@ package keeper
import (
"bytes"
+ "encoding/hex"
"fmt"
"time"
@@ -120,7 +121,8 @@ func (k Keeper) SendPacket(
ctx.EventManager().EmitEvents(sdk.Events{
sdk.NewEvent(
types.EventTypeSendPacket,
- sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())),
+ sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())), // DEPRECATED
+ sdk.NewAttribute(types.AttributeKeyDataHex, hex.EncodeToString(packet.GetData())),
sdk.NewAttribute(types.AttributeKeyTimeoutHeight, timeoutHeight.String()),
sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
@@ -283,7 +285,8 @@ func (k Keeper) RecvPacket(
ctx.EventManager().EmitEvents(sdk.Events{
sdk.NewEvent(
types.EventTypeRecvPacket,
- sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())),
+ sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())), // DEPRECATED
+ sdk.NewAttribute(types.AttributeKeyDataHex, hex.EncodeToString(packet.GetData())),
sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()),
sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
@@ -367,7 +370,8 @@ func (k Keeper) WriteAcknowledgement(
ctx.EventManager().EmitEvents(sdk.Events{
sdk.NewEvent(
types.EventTypeWriteAck,
- sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())),
+ sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())), // DEPRECATED
+ sdk.NewAttribute(types.AttributeKeyDataHex, hex.EncodeToString(packet.GetData())),
sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()),
sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())),
sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())),
diff --git a/modules/core/04-channel/types/events.go b/modules/core/04-channel/types/events.go
index 9667a130..6229ebaa 100644
--- a/modules/core/04-channel/types/events.go
+++ b/modules/core/04-channel/types/events.go
@@ -20,7 +20,10 @@ const (
EventTypeAcknowledgePacket = "acknowledge_packet"
EventTypeTimeoutPacket = "timeout_packet"
- AttributeKeyData = "packet_data"
+ // NOTE: DEPRECATED in favor of AttributeKeyDataHex
+ AttributeKeyData = "packet_data"
+
+ AttributeKeyDataHex = "packet_data_hex"
AttributeKeyAck = "packet_ack"
AttributeKeyTimeoutHeight = "packet_timeout_height"
AttributeKeyTimeoutTimestamp = "packet_timeout_timestamp"
From 094fa31cae75c143ed68cffcffd2a679cc8a8493 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 4 May 2021 04:13:23 -0400
Subject: [PATCH 044/393] Bump codecov/codecov-action from v1.4.1 to v1.5.0
(#151)
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from v1.4.1 to v1.5.0.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.4.1...a1ed4b322b4b38cb846afb5a0ebfa17086917d27)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 7e70a0c4..b2057a57 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -150,7 +150,7 @@ jobs:
sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
done
if: env.GIT_DIFF
- - uses: codecov/codecov-action@v1.4.1
+ - uses: codecov/codecov-action@v1.5.0
with:
file: ./coverage.txt
if: env.GIT_DIFF
From 2c880a22e9f9cc75f62b527ca94aa75ce1106001 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 4 May 2021 11:24:52 +0200
Subject: [PATCH 045/393] IsFrozen() changed to Status() (#140)
* initial work, pause for feedback
* IsFrozen() -> Status()
* fix bug
* fix tests
* remove typo
* add verify tests
* error message and code cleanup
* self review fixes
* Update modules/core/02-client/keeper/client.go
* add gRPC route to proto
* add gRPC route and tests
* update changelog
* apply review suggestions
* Update modules/light-clients/06-solomachine/types/client_state_test.go
* code ordering
* add set consensus state helper function
* use typed string for status
---
CHANGELOG.md | 1 +
docs/ibc/proto-docs.md | 73 +++
modules/core/02-client/keeper/client.go | 26 +-
modules/core/02-client/keeper/client_test.go | 183 +++----
modules/core/02-client/keeper/grpc_query.go | 27 +
.../core/02-client/keeper/grpc_query_test.go | 168 +++++-
modules/core/02-client/keeper/keeper.go | 2 +-
modules/core/02-client/keeper/keeper_test.go | 2 +
modules/core/02-client/types/errors.go | 1 +
modules/core/02-client/types/query.pb.go | 518 +++++++++++++++---
modules/core/02-client/types/query.pb.gw.go | 98 ++++
modules/core/03-connection/keeper/verify.go | 110 +++-
.../core/03-connection/keeper/verify_test.go | 465 ++++++++++------
modules/core/04-channel/keeper/packet.go | 5 +-
.../core/04-channel/types/expected_keepers.go | 1 +
modules/core/exported/client.go | 25 +-
modules/core/keeper/grpc_query.go | 5 +
.../06-solomachine/types/client_state.go | 12 +
.../06-solomachine/types/client_state_test.go | 12 +
.../types/misbehaviour_handle_test.go | 2 +-
.../07-tendermint/types/client_state.go | 35 +-
.../07-tendermint/types/client_state_test.go | 79 +--
.../types/misbehaviour_handle.go | 6 +-
.../types/misbehaviour_handle_test.go | 2 +-
.../07-tendermint/types/proposal_handle.go | 2 +-
.../09-localhost/types/client_state.go | 7 +-
.../09-localhost/types/client_state_test.go | 8 +
proto/ibc/apps/transfer/v1/genesis.proto | 8 +-
proto/ibc/apps/transfer/v1/query.proto | 3 +-
proto/ibc/apps/transfer/v1/transfer.proto | 5 +-
proto/ibc/apps/transfer/v1/tx.proto | 18 +-
proto/ibc/core/channel/v1/channel.proto | 59 +-
proto/ibc/core/channel/v1/genesis.proto | 38 +-
proto/ibc/core/channel/v1/query.proto | 77 ++-
proto/ibc/core/channel/v1/tx.proto | 158 +++---
proto/ibc/core/client/v1/client.proto | 53 +-
proto/ibc/core/client/v1/genesis.proto | 32 +-
proto/ibc/core/client/v1/query.proto | 65 ++-
proto/ibc/core/client/v1/tx.proto | 35 +-
proto/ibc/core/commitment/v1/commitment.proto | 8 +-
proto/ibc/core/connection/v1/connection.proto | 32 +-
proto/ibc/core/connection/v1/genesis.proto | 12 +-
proto/ibc/core/connection/v1/query.proto | 37 +-
proto/ibc/core/connection/v1/tx.proto | 103 ++--
proto/ibc/core/types/v1/genesis.proto | 18 +-
.../lightclients/localhost/v1/localhost.proto | 4 +-
.../solomachine/v1/solomachine.proto | 113 ++--
.../tendermint/v1/tendermint.proto | 91 +--
testing/chain.go | 12 +-
testing/endpoint.go | 29 +
testing/values.go | 1 +
51 files changed, 1831 insertions(+), 1055 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c536f16e..458dc36a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -44,6 +44,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (modules) [\#140](https://github.com/cosmos/ibc-go/pull/140) IsFrozen() client state interface changed to Status(). gRPC `ClientStatus` route added.
* (modules/core) [\#109](https://github.com/cosmos/ibc-go/pull/109) Remove connection and channel handshake CLI commands.
* (modules) [\#107](https://github.com/cosmos/ibc-go/pull/107) Modify OnRecvPacket callback to return an acknowledgement which indicates if it is successful or not. Callback state changes are discarded for unsuccessful acknowledgements only.
* (modules) [\#108](https://github.com/cosmos/ibc-go/pull/108) All message constructors take the signer as a string to prevent upstream bugs. The `String()` function for an SDK Acc Address relies on external context.
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 5cf16d3f..14fc96fa 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -640,6 +640,14 @@
M QueryClientStatesResponse
+
+ M QueryClientStatusRequest
+
+
+
+ M QueryClientStatusResponse
+
+
M QueryConsensusStateRequest
@@ -4547,6 +4555,54 @@ MsgChannelCloseConfirm.
+ QueryClientStatusRequest
+ QueryClientStatusRequest is the request type for the Query/ClientStatus RPC
method
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ client_id
+ string
+
+ client unique identifier
+
+
+
+
+
+
+
+
+
+ QueryClientStatusResponse
+ QueryClientStatusResponse is the response type for the Query/ClientStatus RPC
method. It returns the current status of the IBC client.
+
+
+
+
+ Field Type Label Description
+
+
+
+
+ status
+ string
+
+
+
+
+
+
+
+
+
+
+
QueryConsensusStateRequest
QueryConsensusStateRequest is the request type for the Query/ConsensusState
RPC method. Besides the consensus state, it includes a proof and the height
from which the proof was retrieved.
@@ -4799,6 +4855,13 @@ a given height.
client.
+
+ ClientStatus
+ QueryClientStatusRequest
+ QueryClientStatusResponse
+ Status queries the status of an IBC client.
+
+
ClientParams
QueryClientParamsRequest
@@ -4880,6 +4943,16 @@ client.
+
+ ClientStatus
+ GET
+ /ibc/core/client/v1/client_status/{client_id}
+
+
+
+
+
+
ClientParams
GET
diff --git a/modules/core/02-client/keeper/client.go b/modules/core/02-client/keeper/client.go
index e8288da5..c5416b9e 100644
--- a/modules/core/02-client/keeper/client.go
+++ b/modules/core/02-client/keeper/client.go
@@ -61,12 +61,13 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot update client with ID %s", clientID)
}
- // prevent update if the client is frozen before or at header height
- if clientState.IsFrozen() && clientState.GetFrozenHeight().LTE(header.GetHeight()) {
- return sdkerrors.Wrapf(types.ErrClientFrozen, "cannot update client with ID %s", clientID)
+ clientStore := k.ClientStore(ctx, clientID)
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(types.ErrClientNotActive, "cannot update client (%s) with status %s", clientID, status)
}
- clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, clientID), header)
+ clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(ctx, k.cdc, clientStore, header)
if err != nil {
return sdkerrors.Wrapf(err, "cannot update client with ID %s", clientID)
}
@@ -130,12 +131,13 @@ func (k Keeper) UpgradeClient(ctx sdk.Context, clientID string, upgradedClient e
return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot update client with ID %s", clientID)
}
- // prevent upgrade if current client is frozen
- if clientState.IsFrozen() {
- return sdkerrors.Wrapf(types.ErrClientFrozen, "cannot update client with ID %s", clientID)
+ clientStore := k.ClientStore(ctx, clientID)
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(types.ErrClientNotActive, "cannot upgrade client (%s) with status %s", clientID, status)
}
- updatedClientState, updatedConsState, err := clientState.VerifyUpgradeAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, clientID),
+ updatedClientState, updatedConsState, err := clientState.VerifyUpgradeAndUpdateState(ctx, k.cdc, clientStore,
upgradedClient, upgradedConsState, proofUpgradeClient, proofUpgradeConsState)
if err != nil {
return sdkerrors.Wrapf(err, "cannot upgrade client with ID %s", clientID)
@@ -178,11 +180,13 @@ func (k Keeper) CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour ex
return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot check misbehaviour for client with ID %s", misbehaviour.GetClientID())
}
- if clientState.IsFrozen() && clientState.GetFrozenHeight().LTE(misbehaviour.GetHeight()) {
- return sdkerrors.Wrapf(types.ErrInvalidMisbehaviour, "client is already frozen at height ≤ misbehaviour height (%s ≤ %s)", clientState.GetFrozenHeight(), misbehaviour.GetHeight())
+ clientStore := k.ClientStore(ctx, misbehaviour.GetClientID())
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(types.ErrClientNotActive, "cannot process misbehaviour for client (%s) with status %s", misbehaviour.GetClientID(), status)
}
- clientState, err := clientState.CheckMisbehaviourAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, misbehaviour.GetClientID()), misbehaviour)
+ clientState, err := clientState.CheckMisbehaviourAndUpdateState(ctx, k.cdc, clientStore, misbehaviour)
if err != nil {
return err
}
diff --git a/modules/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go
index b42322d9..1e1f393d 100644
--- a/modules/core/02-client/keeper/client_test.go
+++ b/modules/core/02-client/keeper/client_test.go
@@ -42,141 +42,101 @@ func (suite *KeeperTestSuite) TestCreateClient() {
}
func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
- // Must create header creation functions since suite.header gets recreated on each test case
- createFutureUpdateFn := func(s *KeeperTestSuite) *ibctmtypes.Header {
- heightPlus3 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()+3)
- height := suite.header.GetHeight().(clienttypes.Height)
-
- return suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus3.RevisionHeight), height, suite.header.Header.Time.Add(time.Hour),
- suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
- }
- createPastUpdateFn := func(s *KeeperTestSuite) *ibctmtypes.Header {
- heightMinus2 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()-2)
- heightMinus4 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()-4)
-
- return suite.chainA.CreateTMClientHeader(testChainID, int64(heightMinus2.RevisionHeight), heightMinus4, suite.header.Header.Time,
- suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
- }
var (
+ path *ibctesting.Path
updateHeader *ibctmtypes.Header
- clientState *ibctmtypes.ClientState
- clientID string
- err error
)
+ // Must create header creation functions since suite.header gets recreated on each test case
+ createFutureUpdateFn := func(trustedHeight clienttypes.Height) *ibctmtypes.Header {
+ header, err := suite.chainA.ConstructUpdateTMClientHeaderWithTrustedHeight(path.EndpointB.Chain, path.EndpointA.ClientID, trustedHeight)
+ suite.Require().NoError(err)
+ return header
+ }
+
+ createPastUpdateFn := func(fillHeight, trustedHeight clienttypes.Height) *ibctmtypes.Header {
+ consState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, trustedHeight)
+ suite.Require().True(found)
+
+ return suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(fillHeight.RevisionHeight), trustedHeight, consState.(*ibctmtypes.ConsensusState).Timestamp.Add(time.Second*5),
+ suite.chainB.Vals, suite.chainB.Vals, suite.chainB.Signers)
+ }
cases := []struct {
name string
- malleate func() error
+ malleate func()
expPass bool
}{
- {"valid update", func() error {
- clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+ {"valid update", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ trustHeight := clientState.GetLatestHeight().(types.Height)
// store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height
- incrementedClientHeight := testClientHeight.Increment().(types.Height)
- intermediateConsState := &ibctmtypes.ConsensusState{
- Timestamp: suite.now.Add(time.Minute),
- NextValidatorsHash: suite.valSetHash,
- }
- suite.keeper.SetClientConsensusState(suite.ctx, clientID, incrementedClientHeight, intermediateConsState)
-
- clientState.LatestHeight = incrementedClientHeight
- suite.keeper.SetClientState(suite.ctx, clientID, clientState)
+ path.EndpointA.UpdateClient()
- updateHeader = createFutureUpdateFn(suite)
- return err
+ updateHeader = createFutureUpdateFn(trustHeight)
}, true},
- {"valid past update", func() error {
- clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
- suite.Require().NoError(err)
+ {"valid past update", func() {
+ clientState := path.EndpointA.GetClientState()
+ trustedHeight := clientState.GetLatestHeight().(types.Height)
- height1 := types.NewHeight(0, 1)
+ currHeight := suite.chainB.CurrentHeader.Height
+ fillHeight := types.NewHeight(clientState.GetLatestHeight().GetRevisionNumber(), uint64(currHeight))
- // store previous consensus state
- prevConsState := &ibctmtypes.ConsensusState{
- Timestamp: suite.past,
- NextValidatorsHash: suite.valSetHash,
- }
- suite.keeper.SetClientConsensusState(suite.ctx, clientID, height1, prevConsState)
+ // commit a couple blocks to allow client to fill in gaps
+ suite.coordinator.CommitBlock(suite.chainB) // this height is not filled in yet
+ suite.coordinator.CommitBlock(suite.chainB) // this height is filled in by the update below
- height2 := types.NewHeight(0, 2)
+ path.EndpointA.UpdateClient()
- // store intermediate consensus state to check that trustedHeight does not need to be hightest consensus state before header height
- intermediateConsState := &ibctmtypes.ConsensusState{
- Timestamp: suite.past.Add(time.Minute),
- NextValidatorsHash: suite.valSetHash,
- }
- suite.keeper.SetClientConsensusState(suite.ctx, clientID, height2, intermediateConsState)
+ // ensure fill height not set
+ _, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, fillHeight)
+ suite.Require().False(found)
// updateHeader will fill in consensus state between prevConsState and suite.consState
// clientState should not be updated
- updateHeader = createPastUpdateFn(suite)
- return nil
+ updateHeader = createPastUpdateFn(fillHeight, trustedHeight)
}, true},
- {"client state not found", func() error {
- updateHeader = createFutureUpdateFn(suite)
+ {"client state not found", func() {
+ updateHeader = createFutureUpdateFn(path.EndpointA.GetClientState().GetLatestHeight().(types.Height))
- return nil
+ path.EndpointA.ClientID = ibctesting.InvalidID
}, false},
- {"consensus state not found", func() error {
- clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
- updateHeader = createFutureUpdateFn(suite)
-
- return nil
+ {"consensus state not found", func() {
+ clientState := path.EndpointA.GetClientState()
+ tmClient, ok := clientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClient.LatestHeight = tmClient.LatestHeight.Increment().(types.Height)
+
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientState)
+ updateHeader = createFutureUpdateFn(clientState.GetLatestHeight().(types.Height))
}, false},
- {"frozen client before update", func() error {
- clientState = &ibctmtypes.ClientState{FrozenHeight: types.NewHeight(0, 1), LatestHeight: testClientHeight}
- suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
- updateHeader = createFutureUpdateFn(suite)
-
- return nil
+ {"client is not active", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = types.NewHeight(0, 1)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientState)
+ updateHeader = createFutureUpdateFn(clientState.GetLatestHeight().(types.Height))
}, false},
- {"valid past update before client was frozen", func() error {
- clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- clientState.FrozenHeight = types.NewHeight(0, testClientHeight.RevisionHeight-1)
- clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
- suite.Require().NoError(err)
-
- height1 := types.NewHeight(0, 1)
-
- // store previous consensus state
- prevConsState := &ibctmtypes.ConsensusState{
- Timestamp: suite.past,
- NextValidatorsHash: suite.valSetHash,
- }
- suite.keeper.SetClientConsensusState(suite.ctx, clientID, height1, prevConsState)
-
- // updateHeader will fill in consensus state between prevConsState and suite.consState
- // clientState should not be updated
- updateHeader = createPastUpdateFn(suite)
- return nil
- }, true},
- {"invalid header", func() error {
- clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- _, err := suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
- suite.Require().NoError(err)
- updateHeader = createPastUpdateFn(suite)
-
- return nil
+ {"invalid header", func() {
+ updateHeader = createFutureUpdateFn(path.EndpointA.GetClientState().GetLatestHeight().(types.Height))
+ updateHeader.TrustedHeight = updateHeader.TrustedHeight.Increment().(types.Height)
}, false},
}
- for i, tc := range cases {
+ for _, tc := range cases {
tc := tc
- i := i
suite.Run(fmt.Sprintf("Case %s", tc.name), func() {
suite.SetupTest()
- clientID = testClientID // must be explicitly changed
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
- err := tc.malleate()
- suite.Require().NoError(err)
+ tc.malleate()
- suite.ctx = suite.ctx.WithBlockTime(updateHeader.Header.Time.Add(time.Minute))
+ var clientState exported.ClientState
+ if tc.expPass {
+ clientState = path.EndpointA.GetClientState()
+ }
- err = suite.keeper.UpdateClient(suite.ctx, clientID, updateHeader)
+ err := suite.chainA.App.GetIBCKeeper().ClientKeeper.UpdateClient(suite.chainA.GetContext(), path.EndpointA.ClientID, updateHeader)
if tc.expPass {
suite.Require().NoError(err, err)
@@ -187,11 +147,10 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
NextValidatorsHash: updateHeader.Header.NextValidatorsHash,
}
- newClientState, found := suite.keeper.GetClientState(suite.ctx, clientID)
- suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
+ newClientState := path.EndpointA.GetClientState()
- consensusState, found := suite.keeper.GetClientConsensusState(suite.ctx, clientID, updateHeader.GetHeight())
- suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
+ consensusState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, updateHeader.GetHeight())
+ suite.Require().True(found)
// Determine if clientState should be updated or not
if updateHeader.GetHeight().GT(clientState.GetLatestHeight()) {
@@ -202,10 +161,10 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
suite.Require().Equal(clientState.GetLatestHeight(), newClientState.GetLatestHeight(), "client state height updated for past header")
}
- suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().NoError(err)
suite.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name)
} else {
- suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
+ suite.Require().Error(err)
}
})
}
@@ -291,8 +250,10 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
expPass: false,
},
{
- name: "client state frozen",
+ name: "client state is not active",
setup: func() {
+ // client is frozen
+
// last Height is at next block
lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1))
@@ -524,7 +485,7 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
false,
},
{
- "client already frozen at earlier height",
+ "client already is not active - client is frozen",
&ibctmtypes.Misbehaviour{
Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
@@ -582,7 +543,7 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
clientState, found := suite.keeper.GetClientState(suite.ctx, clientID)
suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
- suite.Require().True(clientState.IsFrozen(), "valid test case %d failed: %s", i, tc.name)
+ suite.Require().True(!clientState.(*ibctmtypes.ClientState).FrozenHeight.IsZero(), "valid test case %d failed: %s", i, tc.name)
suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(),
"valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight())
} else {
diff --git a/modules/core/02-client/keeper/grpc_query.go b/modules/core/02-client/keeper/grpc_query.go
index 556ccad2..4ebfadb5 100644
--- a/modules/core/02-client/keeper/grpc_query.go
+++ b/modules/core/02-client/keeper/grpc_query.go
@@ -188,6 +188,33 @@ func (q Keeper) ConsensusStates(c context.Context, req *types.QueryConsensusStat
}, nil
}
+// ClientStatus implements the Query/ClientStatus gRPC method
+func (q Keeper) ClientStatus(c context.Context, req *types.QueryClientStatusRequest) (*types.QueryClientStatusResponse, error) {
+ if req == nil {
+ return nil, status.Error(codes.InvalidArgument, "empty request")
+ }
+
+ if err := host.ClientIdentifierValidator(req.ClientId); err != nil {
+ return nil, status.Error(codes.InvalidArgument, err.Error())
+ }
+
+ ctx := sdk.UnwrapSDKContext(c)
+ clientState, found := q.GetClientState(ctx, req.ClientId)
+ if !found {
+ return nil, status.Error(
+ codes.NotFound,
+ sdkerrors.Wrap(types.ErrClientNotFound, req.ClientId).Error(),
+ )
+ }
+
+ clientStore := q.ClientStore(ctx, req.ClientId)
+ status := clientState.Status(ctx, clientStore, q.cdc)
+
+ return &types.QueryClientStatusResponse{
+ Status: status.String(),
+ }, nil
+}
+
// ClientParams implements the Query/ClientParams gRPC method
func (q Keeper) ClientParams(c context.Context, _ *types.QueryClientParamsRequest) (*types.QueryClientParamsResponse, error) {
ctx := sdk.UnwrapSDKContext(c)
diff --git a/modules/core/02-client/keeper/grpc_query_test.go b/modules/core/02-client/keeper/grpc_query_test.go
index b80fd57f..a2531c4d 100644
--- a/modules/core/02-client/keeper/grpc_query_test.go
+++ b/modules/core/02-client/keeper/grpc_query_test.go
@@ -28,6 +28,12 @@ func (suite *KeeperTestSuite) TestQueryClientState() {
malleate func()
expPass bool
}{
+ {"req is nil",
+ func() {
+ req = nil
+ },
+ false,
+ },
{"invalid clientID",
func() {
req = &types.QueryClientStateRequest{}
@@ -45,15 +51,15 @@ func (suite *KeeperTestSuite) TestQueryClientState() {
{
"success",
func() {
- clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
var err error
- expClientState, err = types.PackClientState(clientState)
+ expClientState, err = types.PackClientState(path.EndpointA.GetClientState())
suite.Require().NoError(err)
req = &types.QueryClientStateRequest{
- ClientId: testClientID,
+ ClientId: path.EndpointA.ClientID,
}
},
true,
@@ -65,8 +71,8 @@ func (suite *KeeperTestSuite) TestQueryClientState() {
suite.SetupTest() // reset
tc.malleate()
- ctx := sdk.WrapSDKContext(suite.ctx)
- res, err := suite.queryClient.ClientState(ctx, req)
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+ res, err := suite.chainA.QueryServer.ClientState(ctx, req)
if tc.expPass {
suite.Require().NoError(err)
@@ -94,6 +100,12 @@ func (suite *KeeperTestSuite) TestQueryClientStates() {
malleate func()
expPass bool
}{
+ {"req is nil",
+ func() {
+ req = nil
+ },
+ false,
+ },
{
"empty pagination",
func() {
@@ -179,6 +191,12 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() {
malleate func()
expPass bool
}{
+ {"req is nil",
+ func() {
+ req = nil
+ },
+ false,
+ },
{
"invalid clientID",
func() {
@@ -202,7 +220,7 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() {
"consensus state not found",
func() {
req = &types.QueryConsensusStateRequest{
- ClientId: testClientID,
+ ClientId: ibctesting.FirstClientID,
LatestHeight: true,
}
},
@@ -211,19 +229,16 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() {
{
"success latest height",
func() {
- clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
- cs := ibctmtypes.NewConsensusState(
- suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil,
- )
- suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
- suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, cs)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ cs := path.EndpointA.GetConsensusState(path.EndpointA.GetClientState().GetLatestHeight())
var err error
expConsensusState, err = types.PackConsensusState(cs)
suite.Require().NoError(err)
req = &types.QueryConsensusStateRequest{
- ClientId: testClientID,
+ ClientId: path.EndpointA.ClientID,
LatestHeight: true,
}
},
@@ -232,19 +247,22 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() {
{
"success with height",
func() {
- cs := ibctmtypes.NewConsensusState(
- suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil,
- )
- suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, cs)
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ height := path.EndpointA.GetClientState().GetLatestHeight()
+ cs := path.EndpointA.GetConsensusState(height)
var err error
expConsensusState, err = types.PackConsensusState(cs)
suite.Require().NoError(err)
+ // update client to new height
+ path.EndpointA.UpdateClient()
+
req = &types.QueryConsensusStateRequest{
- ClientId: testClientID,
- RevisionNumber: 0,
- RevisionHeight: height,
+ ClientId: path.EndpointA.ClientID,
+ RevisionNumber: height.GetRevisionNumber(),
+ RevisionHeight: height.GetRevisionHeight(),
}
},
true,
@@ -256,8 +274,8 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() {
suite.SetupTest() // reset
tc.malleate()
- ctx := sdk.WrapSDKContext(suite.ctx)
- res, err := suite.queryClient.ConsensusState(ctx, req)
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+ res, err := suite.chainA.QueryServer.ConsensusState(ctx, req)
if tc.expPass {
suite.Require().NoError(err)
@@ -378,6 +396,102 @@ func (suite *KeeperTestSuite) TestQueryConsensusStates() {
}
}
+func (suite *KeeperTestSuite) TestQueryClientStatus() {
+ var (
+ req *types.QueryClientStatusRequest
+ )
+
+ testCases := []struct {
+ msg string
+ malleate func()
+ expPass bool
+ expStatus string
+ }{
+ {"req is nil",
+ func() {
+ req = nil
+ },
+ false, "",
+ },
+ {"invalid clientID",
+ func() {
+ req = &types.QueryClientStatusRequest{}
+ },
+ false, "",
+ },
+ {"client not found",
+ func() {
+ req = &types.QueryClientStatusRequest{
+ ClientId: ibctesting.InvalidID,
+ }
+ },
+ false, "",
+ },
+ {
+ "Active client status",
+ func() {
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ req = &types.QueryClientStatusRequest{
+ ClientId: path.EndpointA.ClientID,
+ }
+ },
+ true, exported.Active.String(),
+ },
+ {
+ "Unknown client status",
+ func() {
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+
+ // increment latest height so no consensus state is stored
+ clientState.LatestHeight = clientState.LatestHeight.Increment().(types.Height)
+ path.EndpointA.SetClientState(clientState)
+
+ req = &types.QueryClientStatusRequest{
+ ClientId: path.EndpointA.ClientID,
+ }
+ },
+ true, exported.Unknown.String(),
+ },
+ {
+ "Frozen client status",
+ func() {
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+
+ clientState.FrozenHeight = types.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+
+ req = &types.QueryClientStatusRequest{
+ ClientId: path.EndpointA.ClientID,
+ }
+ },
+ true, exported.Frozen.String(),
+ },
+ }
+
+ for _, tc := range testCases {
+ suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.SetupTest() // reset
+
+ tc.malleate()
+ ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
+ res, err := suite.chainA.QueryServer.ClientStatus(ctx, req)
+
+ if tc.expPass {
+ suite.Require().NoError(err)
+ suite.Require().NotNil(res)
+ suite.Require().Equal(tc.expStatus, res.Status)
+ } else {
+ suite.Require().Error(err)
+ }
+ })
+ }
+}
+
func (suite *KeeperTestSuite) TestQueryUpgradedConsensusStates() {
var (
req *types.QueryUpgradedConsensusStateRequest
@@ -390,6 +504,12 @@ func (suite *KeeperTestSuite) TestQueryUpgradedConsensusStates() {
malleate func()
expPass bool
}{
+ {"req is nil",
+ func() {
+ req = nil
+ },
+ false,
+ },
{
"no plan",
func() {
@@ -437,6 +557,6 @@ func (suite *KeeperTestSuite) TestQueryUpgradedConsensusStates() {
func (suite *KeeperTestSuite) TestQueryParams() {
ctx := sdk.WrapSDKContext(suite.chainA.GetContext())
expParams := types.DefaultParams()
- res, _ := suite.queryClient.ClientParams(ctx, &types.QueryClientParamsRequest{})
+ res, _ := suite.chainA.QueryServer.ClientParams(ctx, &types.QueryClientParamsRequest{})
suite.Require().Equal(&expParams, res.Params)
}
diff --git a/modules/core/02-client/keeper/keeper.go b/modules/core/02-client/keeper/keeper.go
index aa3369bc..d19360dc 100644
--- a/modules/core/02-client/keeper/keeper.go
+++ b/modules/core/02-client/keeper/keeper.go
@@ -274,7 +274,7 @@ func (k Keeper) ValidateSelfClient(ctx sdk.Context, clientState exported.ClientS
&ibctmtypes.ClientState{}, tmClient)
}
- if clientState.IsFrozen() {
+ if !tmClient.FrozenHeight.IsZero() {
return types.ErrClientFrozen
}
diff --git a/modules/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go
index d3e1cef5..676967cb 100644
--- a/modules/core/02-client/keeper/keeper_test.go
+++ b/modules/core/02-client/keeper/keeper_test.go
@@ -66,6 +66,7 @@ type KeeperTestSuite struct {
now time.Time
past time.Time
+ // TODO: deprecate
queryClient types.QueryClient
}
@@ -122,6 +123,7 @@ func (suite *KeeperTestSuite) SetupTest() {
)
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
+ // TODO: deprecate
queryHelper := baseapp.NewQueryServerTestHelper(suite.ctx, app.InterfaceRegistry())
types.RegisterQueryServer(queryHelper, app.IBCKeeper.ClientKeeper)
suite.queryClient = types.NewQueryClient(queryHelper)
diff --git a/modules/core/02-client/types/errors.go b/modules/core/02-client/types/errors.go
index 8a956f86..c40bae87 100644
--- a/modules/core/02-client/types/errors.go
+++ b/modules/core/02-client/types/errors.go
@@ -33,4 +33,5 @@ var (
ErrInvalidHeight = sdkerrors.Register(SubModuleName, 26, "invalid height")
ErrInvalidSubstitute = sdkerrors.Register(SubModuleName, 27, "invalid client state substitute")
ErrInvalidUpgradeProposal = sdkerrors.Register(SubModuleName, 28, "invalid upgrade proposal")
+ ErrClientNotActive = sdkerrors.Register(SubModuleName, 29, "client is not active")
)
diff --git a/modules/core/02-client/types/query.pb.go b/modules/core/02-client/types/query.pb.go
index 57908e13..0e624312 100644
--- a/modules/core/02-client/types/query.pb.go
+++ b/modules/core/02-client/types/query.pb.go
@@ -500,6 +500,99 @@ func (m *QueryConsensusStatesResponse) GetPagination() *query.PageResponse {
return nil
}
+// QueryClientStatusRequest is the request type for the Query/ClientStatus RPC
+// method
+type QueryClientStatusRequest struct {
+ // client unique identifier
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+}
+
+func (m *QueryClientStatusRequest) Reset() { *m = QueryClientStatusRequest{} }
+func (m *QueryClientStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*QueryClientStatusRequest) ProtoMessage() {}
+func (*QueryClientStatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_dc42cdfd1d52d76e, []int{8}
+}
+func (m *QueryClientStatusRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientStatusRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientStatusRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientStatusRequest.Merge(m, src)
+}
+func (m *QueryClientStatusRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientStatusRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientStatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientStatusRequest proto.InternalMessageInfo
+
+func (m *QueryClientStatusRequest) GetClientId() string {
+ if m != nil {
+ return m.ClientId
+ }
+ return ""
+}
+
+// QueryClientStatusResponse is the response type for the Query/ClientStatus RPC
+// method. It returns the current status of the IBC client.
+type QueryClientStatusResponse struct {
+ Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (m *QueryClientStatusResponse) Reset() { *m = QueryClientStatusResponse{} }
+func (m *QueryClientStatusResponse) String() string { return proto.CompactTextString(m) }
+func (*QueryClientStatusResponse) ProtoMessage() {}
+func (*QueryClientStatusResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_dc42cdfd1d52d76e, []int{9}
+}
+func (m *QueryClientStatusResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *QueryClientStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_QueryClientStatusResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *QueryClientStatusResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QueryClientStatusResponse.Merge(m, src)
+}
+func (m *QueryClientStatusResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *QueryClientStatusResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_QueryClientStatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QueryClientStatusResponse proto.InternalMessageInfo
+
+func (m *QueryClientStatusResponse) GetStatus() string {
+ if m != nil {
+ return m.Status
+ }
+ return ""
+}
+
// QueryClientParamsRequest is the request type for the Query/ClientParams RPC
// method.
type QueryClientParamsRequest struct {
@@ -509,7 +602,7 @@ func (m *QueryClientParamsRequest) Reset() { *m = QueryClientParamsReque
func (m *QueryClientParamsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryClientParamsRequest) ProtoMessage() {}
func (*QueryClientParamsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc42cdfd1d52d76e, []int{8}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{10}
}
func (m *QueryClientParamsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -549,7 +642,7 @@ func (m *QueryClientParamsResponse) Reset() { *m = QueryClientParamsResp
func (m *QueryClientParamsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryClientParamsResponse) ProtoMessage() {}
func (*QueryClientParamsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc42cdfd1d52d76e, []int{9}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{11}
}
func (m *QueryClientParamsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -594,7 +687,7 @@ func (m *QueryUpgradedClientStateRequest) Reset() { *m = QueryUpgradedCl
func (m *QueryUpgradedClientStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryUpgradedClientStateRequest) ProtoMessage() {}
func (*QueryUpgradedClientStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc42cdfd1d52d76e, []int{10}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{12}
}
func (m *QueryUpgradedClientStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -634,7 +727,7 @@ func (m *QueryUpgradedClientStateResponse) Reset() { *m = QueryUpgradedC
func (m *QueryUpgradedClientStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryUpgradedClientStateResponse) ProtoMessage() {}
func (*QueryUpgradedClientStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc42cdfd1d52d76e, []int{11}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{13}
}
func (m *QueryUpgradedClientStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -679,7 +772,7 @@ func (m *QueryUpgradedConsensusStateRequest) Reset() { *m = QueryUpgrade
func (m *QueryUpgradedConsensusStateRequest) String() string { return proto.CompactTextString(m) }
func (*QueryUpgradedConsensusStateRequest) ProtoMessage() {}
func (*QueryUpgradedConsensusStateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc42cdfd1d52d76e, []int{12}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{14}
}
func (m *QueryUpgradedConsensusStateRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -719,7 +812,7 @@ func (m *QueryUpgradedConsensusStateResponse) Reset() { *m = QueryUpgrad
func (m *QueryUpgradedConsensusStateResponse) String() string { return proto.CompactTextString(m) }
func (*QueryUpgradedConsensusStateResponse) ProtoMessage() {}
func (*QueryUpgradedConsensusStateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_dc42cdfd1d52d76e, []int{13}
+ return fileDescriptor_dc42cdfd1d52d76e, []int{15}
}
func (m *QueryUpgradedConsensusStateResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -764,6 +857,8 @@ func init() {
proto.RegisterType((*QueryConsensusStateResponse)(nil), "ibc.core.client.v1.QueryConsensusStateResponse")
proto.RegisterType((*QueryConsensusStatesRequest)(nil), "ibc.core.client.v1.QueryConsensusStatesRequest")
proto.RegisterType((*QueryConsensusStatesResponse)(nil), "ibc.core.client.v1.QueryConsensusStatesResponse")
+ proto.RegisterType((*QueryClientStatusRequest)(nil), "ibc.core.client.v1.QueryClientStatusRequest")
+ proto.RegisterType((*QueryClientStatusResponse)(nil), "ibc.core.client.v1.QueryClientStatusResponse")
proto.RegisterType((*QueryClientParamsRequest)(nil), "ibc.core.client.v1.QueryClientParamsRequest")
proto.RegisterType((*QueryClientParamsResponse)(nil), "ibc.core.client.v1.QueryClientParamsResponse")
proto.RegisterType((*QueryUpgradedClientStateRequest)(nil), "ibc.core.client.v1.QueryUpgradedClientStateRequest")
@@ -775,66 +870,69 @@ func init() {
func init() { proto.RegisterFile("ibc/core/client/v1/query.proto", fileDescriptor_dc42cdfd1d52d76e) }
var fileDescriptor_dc42cdfd1d52d76e = []byte{
- // 942 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0xdc, 0x44,
- 0x14, 0xce, 0xa4, 0x69, 0xd5, 0xbc, 0xdd, 0x26, 0x68, 0x9a, 0xa6, 0x5b, 0xb7, 0x38, 0x5b, 0x07,
- 0xd1, 0xb4, 0x24, 0x9e, 0x64, 0x4b, 0x5b, 0x2e, 0x1c, 0x48, 0xa5, 0xd2, 0x5e, 0xda, 0x62, 0x84,
- 0x90, 0x90, 0x50, 0x64, 0x7b, 0x27, 0x8e, 0xa5, 0x5d, 0x8f, 0xeb, 0xb1, 0x23, 0x45, 0x55, 0x2e,
- 0x3d, 0x72, 0x42, 0x42, 0xe2, 0xca, 0x9d, 0x43, 0xc5, 0x01, 0x89, 0x2b, 0x27, 0xd4, 0x63, 0x25,
- 0x38, 0x70, 0x40, 0x04, 0x25, 0xfc, 0x21, 0xc8, 0x33, 0xe3, 0x8d, 0xbd, 0x3b, 0xcb, 0x7a, 0x11,
- 0xbd, 0x6d, 0xde, 0xcf, 0xef, 0x7d, 0xef, 0xcd, 0x67, 0x05, 0xcc, 0xd0, 0xf3, 0x89, 0xcf, 0x12,
- 0x4a, 0xfc, 0x5e, 0x48, 0xa3, 0x94, 0xec, 0x6f, 0x91, 0x67, 0x19, 0x4d, 0x0e, 0xec, 0x38, 0x61,
- 0x29, 0xc3, 0x38, 0xf4, 0x7c, 0x3b, 0xf7, 0xdb, 0xd2, 0x6f, 0xef, 0x6f, 0x19, 0xb7, 0x7c, 0xc6,
- 0xfb, 0x8c, 0x13, 0xcf, 0xe5, 0x54, 0x06, 0x93, 0xfd, 0x2d, 0x8f, 0xa6, 0xee, 0x16, 0x89, 0xdd,
- 0x20, 0x8c, 0xdc, 0x34, 0x64, 0x91, 0xcc, 0x37, 0x56, 0x34, 0xf5, 0x55, 0x25, 0x19, 0x70, 0x25,
- 0x60, 0x2c, 0xe8, 0x51, 0x22, 0xfe, 0xf2, 0xb2, 0x5d, 0xe2, 0x46, 0xaa, 0xb7, 0x71, 0x4d, 0xb9,
- 0xdc, 0x38, 0x24, 0x6e, 0x14, 0xb1, 0x54, 0x14, 0xe6, 0xca, 0xbb, 0x14, 0xb0, 0x80, 0x89, 0x9f,
- 0x24, 0xff, 0x25, 0xad, 0xd6, 0x5d, 0xb8, 0xfc, 0x49, 0x8e, 0xe8, 0xbe, 0xe8, 0xf1, 0x69, 0xea,
- 0xa6, 0xd4, 0xa1, 0xcf, 0x32, 0xca, 0x53, 0x7c, 0x15, 0xe6, 0x65, 0xe7, 0x9d, 0xb0, 0xdb, 0x42,
- 0x6d, 0xb4, 0x36, 0xef, 0x9c, 0x97, 0x86, 0x47, 0x5d, 0xeb, 0x25, 0x82, 0xd6, 0x68, 0x22, 0x8f,
- 0x59, 0xc4, 0x29, 0xbe, 0x07, 0x4d, 0x95, 0xc9, 0x73, 0xbb, 0x48, 0x6e, 0x74, 0x96, 0x6c, 0x89,
- 0xcf, 0x2e, 0xa0, 0xdb, 0x1f, 0x45, 0x07, 0x4e, 0xc3, 0x3f, 0x2d, 0x80, 0x97, 0xe0, 0x6c, 0x9c,
- 0x30, 0xb6, 0xdb, 0x9a, 0x6d, 0xa3, 0xb5, 0xa6, 0x23, 0xff, 0xc0, 0xf7, 0xa1, 0x29, 0x7e, 0xec,
- 0xec, 0xd1, 0x30, 0xd8, 0x4b, 0x5b, 0x67, 0x44, 0x39, 0xc3, 0x1e, 0xa5, 0xda, 0x7e, 0x28, 0x22,
- 0xb6, 0xe7, 0x5e, 0xfd, 0xb9, 0x32, 0xe3, 0x34, 0x44, 0x96, 0x34, 0x59, 0xde, 0x28, 0x5e, 0x5e,
- 0x4c, 0xfa, 0x00, 0xe0, 0x74, 0x11, 0x0a, 0xed, 0xbb, 0xb6, 0xdc, 0x9a, 0x9d, 0x6f, 0xcd, 0x96,
- 0x2b, 0x56, 0x5b, 0xb3, 0x9f, 0xba, 0x41, 0xc1, 0x92, 0x53, 0xca, 0xb4, 0x7e, 0x43, 0x70, 0x45,
- 0xd3, 0x44, 0xb1, 0x12, 0xc1, 0x85, 0x32, 0x2b, 0xbc, 0x85, 0xda, 0x67, 0xd6, 0x1a, 0x9d, 0x9b,
- 0xba, 0x39, 0x1e, 0x75, 0x69, 0x94, 0x86, 0xbb, 0x21, 0xed, 0x96, 0x4a, 0x6d, 0x9b, 0xf9, 0x58,
- 0xdf, 0x1f, 0xad, 0x2c, 0x6b, 0xdd, 0xdc, 0x69, 0x96, 0xb8, 0xe4, 0xf8, 0xe3, 0xca, 0x54, 0xb3,
- 0x62, 0xaa, 0x1b, 0x13, 0xa7, 0x92, 0x60, 0x2b, 0x63, 0xfd, 0x80, 0xc0, 0x90, 0x63, 0xe5, 0xae,
- 0x88, 0x67, 0xbc, 0xf6, 0x9d, 0xe0, 0x1b, 0xb0, 0x98, 0xd0, 0xfd, 0x90, 0x87, 0x2c, 0xda, 0x89,
- 0xb2, 0xbe, 0x47, 0x13, 0x81, 0x64, 0xce, 0x59, 0x28, 0xcc, 0x8f, 0x85, 0xb5, 0x12, 0x58, 0xda,
- 0x73, 0x29, 0x50, 0x2e, 0x12, 0xaf, 0xc2, 0x85, 0x5e, 0x3e, 0x5f, 0x5a, 0x84, 0xcd, 0xb5, 0xd1,
- 0xda, 0x79, 0xa7, 0x29, 0x8d, 0x6a, 0xdb, 0x3f, 0x21, 0xb8, 0xaa, 0x85, 0xac, 0x76, 0xf1, 0x21,
- 0x2c, 0xfa, 0x85, 0xa7, 0xc6, 0x91, 0x2e, 0xf8, 0x95, 0x32, 0x6f, 0xf2, 0x4e, 0x5f, 0xe8, 0x91,
- 0xf3, 0x5a, 0x6c, 0x3f, 0xd0, 0xac, 0xfc, 0xbf, 0x1c, 0xf2, 0x2f, 0x08, 0xae, 0xe9, 0x41, 0x28,
- 0xfe, 0xbe, 0x84, 0xb7, 0x86, 0xf8, 0x2b, 0xce, 0x79, 0x5d, 0x37, 0x6e, 0xb5, 0xcc, 0xe7, 0x61,
- 0xba, 0x57, 0x21, 0x60, 0xb1, 0x4a, 0xef, 0xff, 0x78, 0xba, 0x46, 0xe5, 0xd5, 0x3f, 0x75, 0x13,
- 0xb7, 0x5f, 0x30, 0x69, 0x3d, 0xa9, 0x3c, 0xd6, 0xc2, 0xa7, 0x06, 0xec, 0xc0, 0xb9, 0x58, 0x58,
- 0xd4, 0x5d, 0x68, 0xb7, 0xa8, 0x72, 0x54, 0xa4, 0x75, 0x1d, 0x56, 0x44, 0xc1, 0xcf, 0xe2, 0x20,
- 0x71, 0xbb, 0x95, 0xb7, 0x59, 0xf4, 0xec, 0x41, 0x7b, 0x7c, 0x88, 0x6a, 0xfd, 0x10, 0x2e, 0x65,
- 0xca, 0xbd, 0x53, 0x5b, 0x46, 0x2f, 0x66, 0xa3, 0x15, 0xad, 0x77, 0xc0, 0xaa, 0x76, 0xd3, 0xbd,
- 0x5f, 0x2b, 0x83, 0xd5, 0x7f, 0x8d, 0x52, 0xb0, 0x1e, 0x43, 0xeb, 0x14, 0xd6, 0x14, 0x6f, 0x67,
- 0x39, 0xd3, 0xd6, 0xed, 0xfc, 0x31, 0x0f, 0x67, 0x45, 0x5f, 0xfc, 0x1d, 0x82, 0x46, 0x09, 0x36,
- 0x7e, 0x4f, 0xc7, 0xf5, 0x98, 0xaf, 0x94, 0xb1, 0x5e, 0x2f, 0x58, 0x0e, 0x61, 0xdd, 0x79, 0xf1,
- 0xeb, 0xdf, 0xdf, 0xcc, 0x12, 0xbc, 0x41, 0xc6, 0x7e, 0x67, 0xd5, 0x39, 0x93, 0xe7, 0x83, 0x67,
- 0x76, 0x88, 0xbf, 0x45, 0xd0, 0x2c, 0x2b, 0x2d, 0xae, 0xd5, 0xb5, 0xb8, 0x34, 0x63, 0xa3, 0x66,
- 0xb4, 0x02, 0x79, 0x53, 0x80, 0x5c, 0xc5, 0xd7, 0x27, 0x82, 0xc4, 0x47, 0x08, 0x16, 0xaa, 0xbc,
- 0x62, 0x7b, 0x7c, 0x33, 0xdd, 0xfa, 0x0d, 0x52, 0x3b, 0x5e, 0xc1, 0xeb, 0x09, 0x78, 0xbb, 0xb8,
- 0xab, 0x85, 0x37, 0xa4, 0x0a, 0x65, 0x1a, 0x49, 0xa1, 0xe4, 0xe4, 0xf9, 0xd0, 0x37, 0xe1, 0x90,
- 0x48, 0xc9, 0x2c, 0x39, 0xa4, 0xe1, 0x10, 0xbf, 0x44, 0xb0, 0x38, 0xa4, 0x42, 0xb8, 0x2e, 0xe4,
- 0xc1, 0x02, 0x36, 0xeb, 0x27, 0xa8, 0x21, 0x3f, 0x10, 0x43, 0x76, 0xf0, 0xe6, 0xb4, 0x43, 0xe2,
- 0xaf, 0x06, 0xb7, 0x22, 0xe5, 0x61, 0xe2, 0xad, 0x54, 0x54, 0x69, 0xe2, 0xad, 0x54, 0x75, 0xca,
- 0x7a, 0x5b, 0xe0, 0xbc, 0x8c, 0x2f, 0x49, 0x9c, 0x03, 0x88, 0x52, 0x92, 0xf0, 0x8f, 0x08, 0x2e,
- 0x6a, 0xb4, 0x06, 0xdf, 0x1e, 0xdb, 0x65, 0xbc, 0x78, 0x19, 0xef, 0x4f, 0x97, 0xa4, 0x10, 0x76,
- 0x04, 0xc2, 0x75, 0x7c, 0x4b, 0xc7, 0xa4, 0x56, 0xe8, 0x38, 0xfe, 0x19, 0xc1, 0xb2, 0x5e, 0x8e,
- 0xf0, 0xdd, 0xc9, 0x20, 0xb4, 0x67, 0x7e, 0x6f, 0xea, 0xbc, 0x3a, 0x92, 0x31, 0x4e, 0x11, 0xf9,
- 0xf6, 0x93, 0x57, 0xc7, 0x26, 0x7a, 0x7d, 0x6c, 0xa2, 0xbf, 0x8e, 0x4d, 0xf4, 0xf5, 0x89, 0x39,
- 0xf3, 0xfa, 0xc4, 0x9c, 0xf9, 0xfd, 0xc4, 0x9c, 0xf9, 0xe2, 0x4e, 0x10, 0xa6, 0x7b, 0x99, 0x67,
- 0xfb, 0xac, 0x4f, 0xd4, 0x7f, 0x06, 0xa1, 0xe7, 0x6f, 0x04, 0x8c, 0xf4, 0x59, 0x37, 0xeb, 0x51,
- 0x2e, 0x9b, 0x6c, 0x76, 0x36, 0x54, 0x9f, 0xf4, 0x20, 0xa6, 0xdc, 0x3b, 0x27, 0x54, 0xf5, 0xf6,
- 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x5d, 0xec, 0x67, 0x82, 0x0c, 0x00, 0x00,
+ // 981 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45,
+ 0x14, 0xcf, 0xa4, 0x69, 0xd4, 0x3e, 0xbb, 0x09, 0x9a, 0xa6, 0xa9, 0xbb, 0x2d, 0x8e, 0xbb, 0x41,
+ 0x34, 0x2d, 0xc9, 0x4e, 0xe2, 0xd0, 0x86, 0x0b, 0x07, 0x52, 0xa9, 0xb4, 0x97, 0xb6, 0x2c, 0x42,
+ 0x48, 0x48, 0x28, 0xda, 0x5d, 0x4f, 0x36, 0x2b, 0xd9, 0x3b, 0xae, 0x67, 0x27, 0x52, 0x54, 0xe5,
+ 0x40, 0x8f, 0x9c, 0x90, 0x90, 0xb8, 0x22, 0x71, 0xe4, 0x50, 0x71, 0x40, 0xe2, 0xca, 0x09, 0xf5,
+ 0x58, 0x09, 0x0e, 0x9c, 0x08, 0x4a, 0xf8, 0x43, 0x90, 0x67, 0x66, 0xe3, 0x9d, 0x78, 0x8c, 0xd7,
+ 0x08, 0x6e, 0x3b, 0xef, 0xf3, 0xf7, 0x3e, 0xe6, 0x37, 0x5a, 0xa8, 0x27, 0x61, 0x44, 0x22, 0xd6,
+ 0xa3, 0x24, 0x6a, 0x27, 0x34, 0xcd, 0xc8, 0xfe, 0x06, 0x79, 0x26, 0x68, 0xef, 0xc0, 0xeb, 0xf6,
+ 0x58, 0xc6, 0x30, 0x4e, 0xc2, 0xc8, 0xeb, 0xeb, 0x3d, 0xa5, 0xf7, 0xf6, 0x37, 0x9c, 0x3b, 0x11,
+ 0xe3, 0x1d, 0xc6, 0x49, 0x18, 0x70, 0xaa, 0x8c, 0xc9, 0xfe, 0x46, 0x48, 0xb3, 0x60, 0x83, 0x74,
+ 0x83, 0x38, 0x49, 0x83, 0x2c, 0x61, 0xa9, 0xf2, 0x77, 0x96, 0x2c, 0xf1, 0x75, 0x24, 0x65, 0x70,
+ 0x2d, 0x66, 0x2c, 0x6e, 0x53, 0x22, 0x4f, 0xa1, 0xd8, 0x25, 0x41, 0xaa, 0x73, 0x3b, 0x37, 0xb4,
+ 0x2a, 0xe8, 0x26, 0x24, 0x48, 0x53, 0x96, 0xc9, 0xc0, 0x5c, 0x6b, 0x17, 0x62, 0x16, 0x33, 0xf9,
+ 0x49, 0xfa, 0x5f, 0x4a, 0xea, 0xde, 0x83, 0xab, 0x1f, 0xf5, 0x11, 0xdd, 0x97, 0x39, 0x3e, 0xce,
+ 0x82, 0x8c, 0xfa, 0xf4, 0x99, 0xa0, 0x3c, 0xc3, 0xd7, 0xe1, 0xa2, 0xca, 0xbc, 0x93, 0xb4, 0x6a,
+ 0xa8, 0x81, 0x56, 0x2e, 0xfa, 0x17, 0x94, 0xe0, 0x51, 0xcb, 0x7d, 0x89, 0xa0, 0x36, 0xec, 0xc8,
+ 0xbb, 0x2c, 0xe5, 0x14, 0x6f, 0x41, 0x55, 0x7b, 0xf2, 0xbe, 0x5c, 0x3a, 0x57, 0x9a, 0x0b, 0x9e,
+ 0xc2, 0xe7, 0xe5, 0xd0, 0xbd, 0x0f, 0xd2, 0x03, 0xbf, 0x12, 0x0d, 0x02, 0xe0, 0x05, 0x38, 0xdf,
+ 0xed, 0x31, 0xb6, 0x5b, 0x9b, 0x6e, 0xa0, 0x95, 0xaa, 0xaf, 0x0e, 0xf8, 0x3e, 0x54, 0xe5, 0xc7,
+ 0xce, 0x1e, 0x4d, 0xe2, 0xbd, 0xac, 0x76, 0x4e, 0x86, 0x73, 0xbc, 0xe1, 0x56, 0x7b, 0x0f, 0xa5,
+ 0xc5, 0xf6, 0xcc, 0xab, 0x3f, 0x96, 0xa6, 0xfc, 0x8a, 0xf4, 0x52, 0x22, 0x37, 0x1c, 0xc6, 0xcb,
+ 0xf3, 0x4a, 0x1f, 0x00, 0x0c, 0x06, 0xa1, 0xd1, 0xbe, 0xed, 0xa9, 0xa9, 0x79, 0xfd, 0xa9, 0x79,
+ 0x6a, 0xc4, 0x7a, 0x6a, 0xde, 0xd3, 0x20, 0xce, 0xbb, 0xe4, 0x17, 0x3c, 0xdd, 0xdf, 0x10, 0x5c,
+ 0xb3, 0x24, 0xd1, 0x5d, 0x49, 0xe1, 0x52, 0xb1, 0x2b, 0xbc, 0x86, 0x1a, 0xe7, 0x56, 0x2a, 0xcd,
+ 0xdb, 0xb6, 0x3a, 0x1e, 0xb5, 0x68, 0x9a, 0x25, 0xbb, 0x09, 0x6d, 0x15, 0x42, 0x6d, 0xd7, 0xfb,
+ 0x65, 0x7d, 0x7f, 0xb4, 0xb4, 0x68, 0x55, 0x73, 0xbf, 0x5a, 0xe8, 0x25, 0xc7, 0x1f, 0x1a, 0x55,
+ 0x4d, 0xcb, 0xaa, 0x6e, 0x8d, 0xad, 0x4a, 0x81, 0x35, 0xca, 0xfa, 0x01, 0x81, 0xa3, 0xca, 0xea,
+ 0xab, 0x52, 0x2e, 0x78, 0xe9, 0x3d, 0xc1, 0xb7, 0x60, 0xbe, 0x47, 0xf7, 0x13, 0x9e, 0xb0, 0x74,
+ 0x27, 0x15, 0x9d, 0x90, 0xf6, 0x24, 0x92, 0x19, 0x7f, 0x2e, 0x17, 0x3f, 0x96, 0x52, 0xc3, 0xb0,
+ 0x30, 0xe7, 0x82, 0xa1, 0x1a, 0x24, 0x5e, 0x86, 0x4b, 0xed, 0x7e, 0x7d, 0x59, 0x6e, 0x36, 0xd3,
+ 0x40, 0x2b, 0x17, 0xfc, 0xaa, 0x12, 0xea, 0x69, 0xff, 0x84, 0xe0, 0xba, 0x15, 0xb2, 0x9e, 0xc5,
+ 0xfb, 0x30, 0x1f, 0xe5, 0x9a, 0x12, 0x4b, 0x3a, 0x17, 0x19, 0x61, 0xfe, 0xcf, 0x3d, 0x7d, 0x61,
+ 0x47, 0xce, 0x4b, 0x75, 0xfb, 0x81, 0x65, 0xe4, 0xff, 0x66, 0x91, 0x7f, 0x41, 0x70, 0xc3, 0x0e,
+ 0x42, 0xf7, 0xef, 0x73, 0x78, 0xe3, 0x4c, 0xff, 0xf2, 0x75, 0x5e, 0xb5, 0x95, 0x6b, 0x86, 0xf9,
+ 0x34, 0xc9, 0xf6, 0x8c, 0x06, 0xcc, 0x9b, 0xed, 0xfd, 0x0f, 0x57, 0x77, 0x6b, 0xe8, 0xd6, 0x8b,
+ 0x52, 0x9d, 0x74, 0x37, 0x87, 0x6e, 0xb2, 0x18, 0x54, 0xbf, 0x08, 0xb3, 0x5c, 0x4a, 0xb4, 0x9b,
+ 0x3e, 0xb9, 0x8e, 0x91, 0xed, 0x69, 0xd0, 0x0b, 0x3a, 0x79, 0x36, 0xf7, 0x89, 0x11, 0x30, 0xd7,
+ 0xe9, 0x80, 0x4d, 0x98, 0xed, 0x4a, 0x89, 0xde, 0x42, 0xeb, 0xce, 0x68, 0x1f, 0x6d, 0xe9, 0xde,
+ 0x84, 0x25, 0x19, 0xf0, 0x93, 0x6e, 0xdc, 0x0b, 0x5a, 0x06, 0x13, 0xe4, 0x39, 0xdb, 0xd0, 0x18,
+ 0x6d, 0xa2, 0x53, 0x3f, 0x84, 0x2b, 0x42, 0xab, 0x77, 0x4a, 0x93, 0xf6, 0x65, 0x31, 0x1c, 0xd1,
+ 0x7d, 0x0b, 0x5c, 0x33, 0x9b, 0x8d, 0x2d, 0x5c, 0x01, 0xcb, 0xff, 0x68, 0xa5, 0x61, 0x3d, 0x86,
+ 0xda, 0x00, 0xd6, 0x04, 0x37, 0x75, 0x51, 0x58, 0xe3, 0x36, 0xbf, 0xa8, 0xc0, 0x79, 0x99, 0x17,
+ 0x7f, 0x8b, 0xa0, 0x52, 0x80, 0x8d, 0xdf, 0xb1, 0xf5, 0x7a, 0xc4, 0x9b, 0xe8, 0xac, 0x96, 0x33,
+ 0x56, 0x45, 0xb8, 0x77, 0x5f, 0xfc, 0xfa, 0xd7, 0xd7, 0xd3, 0x04, 0xaf, 0x91, 0x91, 0xaf, 0xba,
+ 0xbe, 0x3c, 0xe4, 0xf9, 0xe9, 0x2a, 0x1e, 0xe2, 0x6f, 0x10, 0x54, 0x8b, 0xbc, 0x8e, 0x4b, 0x65,
+ 0xcd, 0x37, 0xcd, 0x59, 0x2b, 0x69, 0xad, 0x41, 0xde, 0x96, 0x20, 0x97, 0xf1, 0xcd, 0xb1, 0x20,
+ 0xf1, 0x11, 0x82, 0x39, 0xb3, 0xaf, 0xd8, 0x1b, 0x9d, 0xcc, 0x36, 0x7e, 0x87, 0x94, 0xb6, 0xd7,
+ 0xf0, 0xda, 0x12, 0xde, 0x2e, 0x6e, 0x59, 0xe1, 0x9d, 0xe1, 0xa0, 0x62, 0x1b, 0x49, 0xfe, 0x6e,
+ 0x90, 0xe7, 0x67, 0x5e, 0xa0, 0x43, 0xa2, 0x08, 0xba, 0xa0, 0x50, 0x82, 0x43, 0xfc, 0x12, 0xc1,
+ 0xfc, 0x19, 0xce, 0xc3, 0x65, 0x21, 0x9f, 0x0e, 0x60, 0xbd, 0xbc, 0x83, 0x2e, 0xf2, 0x3d, 0x59,
+ 0x64, 0x13, 0xaf, 0x4f, 0x5a, 0x24, 0xfe, 0xce, 0xd8, 0x15, 0x51, 0x6e, 0x57, 0xc4, 0x44, 0xbb,
+ 0x32, 0x20, 0xbe, 0xd2, 0x0b, 0x2d, 0x4c, 0x90, 0x5f, 0x9e, 0x82, 0x54, 0x1c, 0x36, 0x16, 0xa4,
+ 0x41, 0x9d, 0x63, 0x41, 0x9a, 0x64, 0xea, 0xbe, 0x29, 0x41, 0x5e, 0xc5, 0x57, 0x14, 0xc8, 0x53,
+ 0x7c, 0x8a, 0x37, 0xf1, 0x8f, 0x08, 0x2e, 0x5b, 0x08, 0x11, 0x6f, 0x8e, 0xcc, 0x32, 0x9a, 0x61,
+ 0x9d, 0x77, 0x27, 0x73, 0xd2, 0x08, 0x9b, 0x12, 0xe1, 0x2a, 0xbe, 0x63, 0x6b, 0xa3, 0x95, 0x8d,
+ 0x39, 0xfe, 0x19, 0xc1, 0xa2, 0x9d, 0x33, 0xf1, 0xbd, 0xf1, 0x20, 0xac, 0x77, 0x71, 0x6b, 0x62,
+ 0xbf, 0x32, 0x6b, 0x30, 0x8a, 0xb6, 0xf9, 0xf6, 0x93, 0x57, 0xc7, 0x75, 0xf4, 0xfa, 0xb8, 0x8e,
+ 0xfe, 0x3c, 0xae, 0xa3, 0xaf, 0x4e, 0xea, 0x53, 0xaf, 0x4f, 0xea, 0x53, 0xbf, 0x9f, 0xd4, 0xa7,
+ 0x3e, 0xbb, 0x1b, 0x27, 0xd9, 0x9e, 0x08, 0xbd, 0x88, 0x75, 0x88, 0xfe, 0x59, 0x4a, 0xc2, 0x68,
+ 0x2d, 0x66, 0xa4, 0xc3, 0x5a, 0xa2, 0x4d, 0xb9, 0x4a, 0xb2, 0xde, 0x5c, 0xd3, 0x79, 0xb2, 0x83,
+ 0x2e, 0xe5, 0xe1, 0xac, 0xa4, 0xfe, 0xcd, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x50, 0xa9, 0x40,
+ 0x56, 0x95, 0x0d, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -859,6 +957,8 @@ type QueryClient interface {
// ConsensusStates queries all the consensus state associated with a given
// client.
ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error)
+ // Status queries the status of an IBC client.
+ ClientStatus(ctx context.Context, in *QueryClientStatusRequest, opts ...grpc.CallOption) (*QueryClientStatusResponse, error)
// ClientParams queries all parameters of the ibc client.
ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error)
// UpgradedClientState queries an Upgraded IBC light client.
@@ -911,6 +1011,15 @@ func (c *queryClient) ConsensusStates(ctx context.Context, in *QueryConsensusSta
return out, nil
}
+func (c *queryClient) ClientStatus(ctx context.Context, in *QueryClientStatusRequest, opts ...grpc.CallOption) (*QueryClientStatusResponse, error) {
+ out := new(QueryClientStatusResponse)
+ err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ClientStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *queryClient) ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error) {
out := new(QueryClientParamsResponse)
err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Query/ClientParams", in, out, opts...)
@@ -950,6 +1059,8 @@ type QueryServer interface {
// ConsensusStates queries all the consensus state associated with a given
// client.
ConsensusStates(context.Context, *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error)
+ // Status queries the status of an IBC client.
+ ClientStatus(context.Context, *QueryClientStatusRequest) (*QueryClientStatusResponse, error)
// ClientParams queries all parameters of the ibc client.
ClientParams(context.Context, *QueryClientParamsRequest) (*QueryClientParamsResponse, error)
// UpgradedClientState queries an Upgraded IBC light client.
@@ -974,6 +1085,9 @@ func (*UnimplementedQueryServer) ConsensusState(ctx context.Context, req *QueryC
func (*UnimplementedQueryServer) ConsensusStates(ctx context.Context, req *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ConsensusStates not implemented")
}
+func (*UnimplementedQueryServer) ClientStatus(ctx context.Context, req *QueryClientStatusRequest) (*QueryClientStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ClientStatus not implemented")
+}
func (*UnimplementedQueryServer) ClientParams(ctx context.Context, req *QueryClientParamsRequest) (*QueryClientParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ClientParams not implemented")
}
@@ -1060,6 +1174,24 @@ func _Query_ConsensusStates_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
+func _Query_ClientStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryClientStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServer).ClientStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/ibc.core.client.v1.Query/ClientStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServer).ClientStatus(ctx, req.(*QueryClientStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Query_ClientParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryClientParamsRequest)
if err := dec(in); err != nil {
@@ -1134,6 +1266,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{
MethodName: "ConsensusStates",
Handler: _Query_ConsensusStates_Handler,
},
+ {
+ MethodName: "ClientStatus",
+ Handler: _Query_ClientStatus_Handler,
+ },
{
MethodName: "ClientParams",
Handler: _Query_ClientParams_Handler,
@@ -1510,6 +1646,66 @@ func (m *QueryConsensusStatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, e
return len(dAtA) - i, nil
}
+func (m *QueryClientStatusRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientStatusRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *QueryClientStatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *QueryClientStatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QueryClientStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Status) > 0 {
+ i -= len(m.Status)
+ copy(dAtA[i:], m.Status)
+ i = encodeVarintQuery(dAtA, i, uint64(len(m.Status)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *QueryClientParamsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1836,6 +2032,32 @@ func (m *QueryConsensusStatesResponse) Size() (n int) {
return n
}
+func (m *QueryClientStatusRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
+func (m *QueryClientStatusResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Status)
+ if l > 0 {
+ n += 1 + l + sovQuery(uint64(l))
+ }
+ return n
+}
+
func (m *QueryClientParamsRequest) Size() (n int) {
if m == nil {
return 0
@@ -2880,6 +3102,170 @@ func (m *QueryConsensusStatesResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *QueryClientStatusRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientStatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *QueryClientStatusResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: QueryClientStatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: QueryClientStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowQuery
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthQuery
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipQuery(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthQuery
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *QueryClientParamsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/modules/core/02-client/types/query.pb.gw.go b/modules/core/02-client/types/query.pb.gw.go
index ed3df357..3704fb07 100644
--- a/modules/core/02-client/types/query.pb.gw.go
+++ b/modules/core/02-client/types/query.pb.gw.go
@@ -309,6 +309,60 @@ func local_request_Query_ConsensusStates_0(ctx context.Context, marshaler runtim
}
+func request_Query_ClientStatus_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientStatusRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ msg, err := client.ClientStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_Query_ClientStatus_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq QueryClientStatusRequest
+ var metadata runtime.ServerMetadata
+
+ var (
+ val string
+ ok bool
+ err error
+ _ = err
+ )
+
+ val, ok = pathParams["client_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id")
+ }
+
+ protoReq.ClientId, err = runtime.String(val)
+
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err)
+ }
+
+ msg, err := server.ClientStatus(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
func request_Query_ClientParams_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryClientParamsRequest
var metadata runtime.ServerMetadata
@@ -449,6 +503,26 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
})
+ mux.Handle("GET", pattern_Query_ClientStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Query_ClientStatus_0(rctx, inboundMarshaler, server, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
mux.Handle("GET", pattern_Query_ClientParams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -630,6 +704,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie
})
+ mux.Handle("GET", pattern_Query_ClientStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Query_ClientStatus_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Query_ClientStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
mux.Handle("GET", pattern_Query_ClientParams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -702,6 +796,8 @@ var (
pattern_Query_ConsensusStates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "consensus_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
+ pattern_Query_ClientStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "client_status", "client_id"}, "", runtime.AssumeColonVerbOpt(true)))
+
pattern_Query_ClientParams_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"ibc", "client", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_Query_UpgradedClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "client", "v1", "upgraded_client_states"}, "", runtime.AssumeColonVerbOpt(true)))
@@ -718,6 +814,8 @@ var (
forward_Query_ConsensusStates_0 = runtime.ForwardResponseMessage
+ forward_Query_ClientStatus_0 = runtime.ForwardResponseMessage
+
forward_Query_ClientParams_0 = runtime.ForwardResponseMessage
forward_Query_UpgradedClientState_0 = runtime.ForwardResponseMessage
diff --git a/modules/core/03-connection/keeper/verify.go b/modules/core/03-connection/keeper/verify.go
index 9355d4ef..38722f98 100644
--- a/modules/core/03-connection/keeper/verify.go
+++ b/modules/core/03-connection/keeper/verify.go
@@ -17,15 +17,21 @@ func (k Keeper) VerifyClientState(
clientState exported.ClientState,
) error {
clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
targetClient, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
}
+ if status := targetClient.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
+ }
+
if err := targetClient.VerifyClientState(
- k.clientKeeper.ClientStore(ctx, clientID), k.cdc, height,
+ clientStore, k.cdc, height,
connection.GetCounterparty().GetPrefix(), connection.GetCounterparty().GetClientID(), proof, clientState); err != nil {
- return sdkerrors.Wrapf(err, "failed client state verification for target client: %s", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed client state verification for target client: %s", clientID)
}
return nil
@@ -42,16 +48,22 @@ func (k Keeper) VerifyClientConsensusState(
consensusState exported.ConsensusState,
) error {
clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
}
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
+ }
+
if err := clientState.VerifyClientConsensusState(
- k.clientKeeper.ClientStore(ctx, clientID), k.cdc, height,
+ clientStore, k.cdc, height,
connection.GetCounterparty().GetClientID(), consensusHeight, connection.GetCounterparty().GetPrefix(), proof, consensusState,
); err != nil {
- return sdkerrors.Wrapf(err, "failed consensus state verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed consensus state verification for client (%s)", clientID)
}
return nil
@@ -67,16 +79,23 @@ func (k Keeper) VerifyConnectionState(
connectionID string,
connectionEnd exported.ConnectionI, // opposite connection
) error {
- clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
- return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
if err := clientState.VerifyConnectionState(
- k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ clientStore, k.cdc, height,
connection.GetCounterparty().GetPrefix(), proof, connectionID, connectionEnd,
); err != nil {
- return sdkerrors.Wrapf(err, "failed connection state verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed connection state verification for client (%s)", clientID)
}
return nil
@@ -93,17 +112,24 @@ func (k Keeper) VerifyChannelState(
channelID string,
channel exported.ChannelI,
) error {
- clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
- return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
if err := clientState.VerifyChannelState(
- k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ clientStore, k.cdc, height,
connection.GetCounterparty().GetPrefix(), proof,
portID, channelID, channel,
); err != nil {
- return sdkerrors.Wrapf(err, "failed channel state verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed channel state verification for client (%s)", clientID)
}
return nil
@@ -121,18 +147,25 @@ func (k Keeper) VerifyPacketCommitment(
sequence uint64,
commitmentBytes []byte,
) error {
- clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
- return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
if err := clientState.VerifyPacketCommitment(
- k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ clientStore, k.cdc, height,
uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
sequence, commitmentBytes,
); err != nil {
- return sdkerrors.Wrapf(err, "failed packet commitment verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed packet commitment verification for client (%s)", clientID)
}
return nil
@@ -150,18 +183,25 @@ func (k Keeper) VerifyPacketAcknowledgement(
sequence uint64,
acknowledgement []byte,
) error {
- clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
- return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
if err := clientState.VerifyPacketAcknowledgement(
- k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ clientStore, k.cdc, height,
uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
sequence, acknowledgement,
); err != nil {
- return sdkerrors.Wrapf(err, "failed packet acknowledgement verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed packet acknowledgement verification for client (%s)", clientID)
}
return nil
@@ -179,18 +219,25 @@ func (k Keeper) VerifyPacketReceiptAbsence(
channelID string,
sequence uint64,
) error {
- clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
- return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
if err := clientState.VerifyPacketReceiptAbsence(
- k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ clientStore, k.cdc, height,
uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
sequence,
); err != nil {
- return sdkerrors.Wrapf(err, "failed packet receipt absence verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed packet receipt absence verification for client (%s)", clientID)
}
return nil
@@ -207,18 +254,25 @@ func (k Keeper) VerifyNextSequenceRecv(
channelID string,
nextSequenceRecv uint64,
) error {
- clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID())
+ clientID := connection.GetClientID()
+ clientStore := k.clientKeeper.ClientStore(ctx, clientID)
+
+ clientState, found := k.clientKeeper.GetClientState(ctx, clientID)
if !found {
- return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID())
+ return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID)
+ }
+
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
if err := clientState.VerifyNextSequenceRecv(
- k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height,
+ clientStore, k.cdc, height,
uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
nextSequenceRecv,
); err != nil {
- return sdkerrors.Wrapf(err, "failed next sequence receive verification for client (%s)", connection.GetClientID())
+ return sdkerrors.Wrapf(err, "failed next sequence receive verification for client (%s)", clientID)
}
return nil
diff --git a/modules/core/03-connection/keeper/verify_test.go b/modules/core/03-connection/keeper/verify_test.go
index 2c63ea97..f57953fe 100644
--- a/modules/core/03-connection/keeper/verify_test.go
+++ b/modules/core/03-connection/keeper/verify_test.go
@@ -19,44 +19,56 @@ var defaultTimeoutHeight = clienttypes.NewHeight(0, 100000)
// TestVerifyClientState verifies a client state of chainA
// stored on path.EndpointB (which is on chainB)
func (suite *KeeperTestSuite) TestVerifyClientState() {
+ var (
+ path *ibctesting.Path
+ heightDiff uint64
+ )
cases := []struct {
- msg string
- changeClientID bool
- heightDiff uint64
- malleateCounterparty bool
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, 0, false, true},
- {"client state not found", true, 0, false, false},
- {"consensus state for proof height not found", false, 5, false, false},
- {"verification failed", false, 0, true, false},
+ {"verification success", func() {}, true},
+ {"client state not found", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"consensus state for proof height not found", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed", func() {
+ counterpartyClient := path.EndpointB.GetClientState().(*ibctmtypes.ClientState)
+ counterpartyClient.ChainId = "wrongChainID"
+ path.EndpointB.SetClientState(counterpartyClient)
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
+ suite.Run(tc.name, func() {
suite.SetupTest() // reset
+ heightDiff = 0 // must be explicitly changed
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupConnections(path)
+ tc.malleate()
+
counterpartyClient, clientProof := path.EndpointB.QueryClientStateProof()
proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1))
- if tc.malleateCounterparty {
- tmClient, _ := counterpartyClient.(*ibctmtypes.ClientState)
- tmClient.ChainId = "wrongChainID"
- }
-
connection := path.EndpointA.GetConnection()
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyClientState(
suite.chainA.GetContext(), connection,
- malleateHeight(proofHeight, tc.heightDiff), clientProof, counterpartyClient,
+ malleateHeight(proofHeight, heightDiff), clientProof, counterpartyClient,
)
if tc.expPass {
@@ -73,19 +85,19 @@ func (suite *KeeperTestSuite) TestVerifyClientState() {
// state for chainA at that height.
func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
var (
- path *ibctesting.Path
- changeClientID bool
- heightDiff uint64
+ path *ibctesting.Path
+ heightDiff uint64
)
cases := []struct {
- msg string
+ name string
malleate func()
expPass bool
}{
- {"verification success", func() {
- }, true},
+ {"verification success", func() {}, true},
{"client state not found", func() {
- changeClientID = true
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
}, false},
{"consensus state not found", func() {
heightDiff = 5
@@ -105,24 +117,25 @@ func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
suite.coordinator.CommitBlock(suite.chainB)
}, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
- suite.SetupTest() // reset
- heightDiff = 0 // must be explicitly changed in malleate
- changeClientID = false // must be explicitly changed in malleate
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+ heightDiff = 0 // must be explicitly changed in malleate
path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupConnections(path)
tc.malleate()
connection := path.EndpointA.GetConnection()
- if changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
proof, consensusHeight := suite.chainB.QueryConsensusStateProof(path.EndpointB.ClientID)
proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1))
@@ -146,44 +159,57 @@ func (suite *KeeperTestSuite) TestVerifyClientConsensusState() {
// TestVerifyConnectionState verifies the connection state of the connection
// on chainB. The connections on chainA and chainB are fully opened.
func (suite *KeeperTestSuite) TestVerifyConnectionState() {
+ var (
+ path *ibctesting.Path
+ heightDiff uint64
+ )
cases := []struct {
- msg string
- changeClientID bool
- changeConnectionState bool
- heightDiff uint64
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, false, 0, true},
- {"client state not found - changed client ID", true, false, 0, false},
- {"consensus state not found - increased proof height", false, false, 5, false},
- {"verification failed - connection state is different than proof", false, true, 0, false},
+ {"verification success", func() {}, true},
+ {"client state not found - changed client ID", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"consensus state not found - increased proof height", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed - connection state is different than proof", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.State = types.TRYOPEN
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
+ suite.Run(tc.name, func() {
suite.SetupTest() // reset
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupConnections(path)
- connection := path.EndpointA.GetConnection()
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
- expectedConnection := path.EndpointB.GetConnection()
-
connectionKey := host.ConnectionKey(path.EndpointB.ConnectionID)
proof, proofHeight := suite.chainB.QueryProof(connectionKey)
- if tc.changeConnectionState {
- expectedConnection.State = types.TRYOPEN
- }
+ tc.malleate()
+
+ connection := path.EndpointA.GetConnection()
+
+ expectedConnection := path.EndpointB.GetConnection()
err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyConnectionState(
suite.chainA.GetContext(), connection,
- malleateHeight(proofHeight, tc.heightDiff), proof, path.EndpointB.ConnectionID, expectedConnection,
+ malleateHeight(proofHeight, heightDiff), proof, path.EndpointB.ConnectionID, expectedConnection,
)
if tc.expPass {
@@ -198,42 +224,55 @@ func (suite *KeeperTestSuite) TestVerifyConnectionState() {
// TestVerifyChannelState verifies the channel state of the channel on
// chainB. The channels on chainA and chainB are fully opened.
func (suite *KeeperTestSuite) TestVerifyChannelState() {
+ var (
+ path *ibctesting.Path
+ heightDiff uint64
+ )
cases := []struct {
- msg string
- changeClientID bool
- changeChannelState bool
- heightDiff uint64
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, false, 0, true},
- {"client state not found- changed client ID", true, false, 0, false},
- {"consensus state not found - increased proof height", false, false, 5, false},
- {"verification failed - changed channel state", false, true, 0, false},
+ {"verification success", func() {}, true},
+ {"client state not found- changed client ID", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"consensus state not found - increased proof height", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed - changed channel state", func() {
+ channel := path.EndpointA.GetChannel()
+ channel.State = channeltypes.TRYOPEN
+ path.EndpointA.SetChannel(channel)
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(fmt.Sprintf("Case %s", tc.msg), func() {
+ suite.Run(fmt.Sprintf("Case %s", tc.name), func() {
suite.SetupTest() // reset
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.Setup(path)
- connection := path.EndpointA.GetConnection()
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
proof, proofHeight := suite.chainB.QueryProof(channelKey)
+ tc.malleate()
+ connection := path.EndpointA.GetConnection()
+
channel := path.EndpointB.GetChannel()
- if tc.changeChannelState {
- channel.State = channeltypes.TRYOPEN
- }
err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyChannelState(
- suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel,
)
@@ -250,51 +289,65 @@ func (suite *KeeperTestSuite) TestVerifyChannelState() {
// on channelA. The channels on chainA and chainB are fully opened and a
// packet is sent from chainA to chainB, but has not been received.
func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
+ var (
+ path *ibctesting.Path
+ packet channeltypes.Packet
+ heightDiff uint64
+ delayPeriod uint64
+ )
cases := []struct {
- msg string
- changeClientID bool
- changePacketCommitmentState bool
- heightDiff uint64
- delayPeriod uint64
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, false, 0, 0, true},
- {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true},
- {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false},
- {"client state not found- changed client ID", true, false, 0, 0, false},
- {"consensus state not found - increased proof height", false, false, 5, 0, false},
- {"verification failed - changed packet commitment state", false, true, 0, 0, false},
+ {"verification success", func() {}, true},
+ {"verification success: delay period passed", func() {
+ delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ }, true},
+ {"delay period has not passed", func() {
+ delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"client state not found- changed client ID", func() {
+ connection := path.EndpointB.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointB.SetConnection(connection)
+ }, false},
+ {"consensus state not found - increased proof height", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed - changed packet commitment state", func() {
+ packet.Data = []byte(ibctesting.InvalidID)
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointB.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointB.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
+ suite.Run(tc.name, func() {
suite.SetupTest() // reset
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.Setup(path)
- connection := path.EndpointB.GetConnection()
- connection.DelayPeriod = tc.delayPeriod
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
-
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
+ tc.malleate()
+
+ connection := path.EndpointB.GetConnection()
+ connection.DelayPeriod = delayPeriod
commitmentKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
proof, proofHeight := suite.chainA.QueryProof(commitmentKey)
- if tc.changePacketCommitmentState {
- packet.Data = []byte(ibctesting.InvalidID)
- }
-
commitment := channeltypes.CommitPacket(suite.chainB.App.GetIBCKeeper().Codec(), packet)
err = suite.chainB.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketCommitment(
- suite.chainB.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ suite.chainB.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment,
)
@@ -311,37 +364,53 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
// channelB. The channels on chainA and chainB are fully opened and a packet
// is sent from chainA to chainB and received.
func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
+ var (
+ path *ibctesting.Path
+ ack exported.Acknowledgement
+ heightDiff uint64
+ delayPeriod uint64
+ )
+
cases := []struct {
- msg string
- changeClientID bool
- changeAcknowledgement bool
- heightDiff uint64
- delayPeriod uint64
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, false, 0, 0, true},
- {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true},
- {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false},
- {"client state not found- changed client ID", true, false, 0, 0, false},
- {"consensus state not found - increased proof height", false, false, 5, 0, false},
- {"verification failed - changed acknowledgement", false, true, 0, 0, false},
+ {"verification success", func() {}, true},
+ {"verification success: delay period passed", func() {
+ delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ }, true},
+ {"delay period has not passed", func() {
+ delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"client state not found- changed client ID", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"consensus state not found - increased proof height", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed - changed acknowledgement", func() {
+ ack = ibcmock.MockFailAcknowledgement
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
- suite.SetupTest() // reset
+ suite.Run(tc.name, func() {
+ suite.SetupTest() // reset
+ ack = ibcmock.MockAcknowledgement // must be explicitly changed
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.Setup(path)
- connection := path.EndpointA.GetConnection()
- connection.DelayPeriod = tc.delayPeriod
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
-
// send and receive packet
packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
err := path.EndpointA.SendPacket(packet)
@@ -357,13 +426,13 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
packetAckKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetAckKey)
- ack := ibcmock.MockAcknowledgement
- if tc.changeAcknowledgement {
- ack = ibcmock.MockFailAcknowledgement
- }
+ tc.malleate()
+
+ connection := path.EndpointA.GetConnection()
+ connection.DelayPeriod = delayPeriod
err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketAcknowledgement(
- suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack.Acknowledgement(),
)
@@ -380,51 +449,70 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
// absence on channelB. The channels on chainA and chainB are fully opened and
// a packet is sent from chainA to chainB and not received.
func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
+ var (
+ path *ibctesting.Path
+ packet channeltypes.Packet
+ heightDiff uint64
+ delayPeriod uint64
+ )
+
cases := []struct {
- msg string
- changeClientID bool
- recvAck bool
- heightDiff uint64
- delayPeriod uint64
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, false, 0, 0, true},
- {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true},
- {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false},
- {"client state not found - changed client ID", true, false, 0, 0, false},
- {"consensus state not found - increased proof height", false, false, 5, 0, false},
- {"verification failed - acknowledgement was received", false, true, 0, 0, false},
+ {"verification success", func() {}, true},
+ {"verification success: delay period passed", func() {
+ delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ }, true},
+ {"delay period has not passed", func() {
+ delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"client state not found - changed client ID", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"consensus state not found - increased proof height", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed - acknowledgement was received", func() {
+ // increment receiving chain's (chainB) time by 2 hour to always pass receive
+ suite.coordinator.IncrementTimeBy(time.Hour * 2)
+ suite.coordinator.CommitBlock(suite.chainB)
+
+ err := path.EndpointB.RecvPacket(packet)
+ suite.Require().NoError(err)
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
+ suite.Run(tc.name, func() {
suite.SetupTest() // reset
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.Setup(path)
- connection := path.EndpointA.GetConnection()
- connection.DelayPeriod = tc.delayPeriod
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
-
- // send, only receive if specified
- packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
+ // send, only receive in malleate if applicable
+ packet = channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
- if tc.recvAck {
- // increment receiving chain's (chainB) time by 2 hour to always pass receive
- suite.coordinator.IncrementTimeBy(time.Hour * 2)
- suite.coordinator.CommitBlock(suite.chainB)
+ tc.malleate()
- err = path.EndpointB.RecvPacket(packet)
- suite.Require().NoError(err)
- } else {
- // need to update height to prove absence
+ connection := path.EndpointA.GetConnection()
+ connection.DelayPeriod = delayPeriod
+
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ if clientState.FrozenHeight.IsZero() {
+ // need to update height to prove absence or receipt
suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
path.EndpointA.UpdateClient()
}
@@ -433,7 +521,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
proof, proofHeight := suite.chainB.QueryProof(packetReceiptKey)
err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketReceiptAbsence(
- suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
)
@@ -450,37 +538,52 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
// channelB. The channels on chainA and chainB are fully opened and a packet
// is sent from chainA to chainB and received.
func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
+ var (
+ path *ibctesting.Path
+ heightDiff uint64
+ delayPeriod uint64
+ offsetSeq uint64
+ )
+
cases := []struct {
- msg string
- changeClientID bool
- offsetSeq uint64
- heightDiff uint64
- delayPeriod uint64
- expPass bool
+ name string
+ malleate func()
+ expPass bool
}{
- {"verification success", false, 0, 0, 0, true},
- {"verification success: delay period passed", false, 0, 0, uint64(1 * time.Second.Nanoseconds()), true},
- {"delay period has not passed", false, 0, 0, uint64(1 * time.Hour.Nanoseconds()), false},
- {"client state not found- changed client ID", true, 0, 0, 0, false},
- {"consensus state not found - increased proof height", false, 0, 5, 0, false},
- {"verification failed - wrong expected next seq recv", false, 1, 0, 0, false},
+ {"verification success", func() {}, true},
+ {"verification success: delay period passed", func() {
+ delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ }, true},
+ {"delay period has not passed", func() {
+ delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"client state not found- changed client ID", func() {
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = ibctesting.InvalidID
+ path.EndpointA.SetConnection(connection)
+ }, false},
+ {"consensus state not found - increased proof height", func() {
+ heightDiff = 5
+ }, false},
+ {"verification failed - wrong expected next seq recv", func() {
+ offsetSeq = 1
+ }, false},
+ {"client status is not active - client is expired", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, false},
}
for _, tc := range cases {
tc := tc
- suite.Run(tc.msg, func() {
+ suite.Run(tc.name, func() {
suite.SetupTest() // reset
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.Setup(path)
- connection := path.EndpointA.GetConnection()
- connection.DelayPeriod = tc.delayPeriod
- if tc.changeClientID {
- connection.ClientId = ibctesting.InvalidID
- }
-
// send and receive packet
packet := channeltypes.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0)
err := path.EndpointA.SendPacket(packet)
@@ -496,9 +599,13 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
proof, proofHeight := suite.chainB.QueryProof(nextSeqRecvKey)
+ tc.malleate()
+
+ connection := path.EndpointA.GetConnection()
+ connection.DelayPeriod = delayPeriod
err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyNextSequenceRecv(
- suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof,
- packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+tc.offsetSeq,
+ suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
+ packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+offsetSeq,
)
if tc.expPass {
diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
index 7634ebf4..72a1ff5b 100644
--- a/modules/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -69,8 +69,9 @@ func (k Keeper) SendPacket(
}
// prevent accidental sends with clients that cannot be updated
- if clientState.IsFrozen() {
- return sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "cannot send packet on a frozen client with ID %s", connectionEnd.GetClientID())
+ clientStore := k.clientKeeper.ClientStore(ctx, connectionEnd.GetClientID())
+ if status := clientState.Status(ctx, clientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "cannot send packet using client (%s) with status %s", connectionEnd.GetClientID(), status)
}
// check if packet timeouted on the receiving chain
diff --git a/modules/core/04-channel/types/expected_keepers.go b/modules/core/04-channel/types/expected_keepers.go
index b109b6c1..ec590cd5 100644
--- a/modules/core/04-channel/types/expected_keepers.go
+++ b/modules/core/04-channel/types/expected_keepers.go
@@ -11,6 +11,7 @@ import (
type ClientKeeper interface {
GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool)
GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool)
+ ClientStore(ctx sdk.Context, clientID string) sdk.KVStore
}
// ConnectionKeeper expected account IBC connection keeper
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index 3d552b07..890a93d1 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -8,6 +8,9 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
)
+// Status represents the status of a client
+type Status string
+
const (
// TypeClientMisbehaviour is the shared evidence misbehaviour type
TypeClientMisbehaviour string = "client_misbehaviour"
@@ -21,6 +24,18 @@ const (
// Localhost is the client type for a localhost client. It is also used as the clientID
// for the localhost client.
Localhost string = "09-localhost"
+
+ // Active is a status type of a client. An active client is allowed to be used.
+ Active Status = "Active"
+
+ // Frozen is a status type of a client. A frozen client is not allowed to be used.
+ Frozen Status = "Frozen"
+
+ // Expired is a status type of a client. An expired client is not allowed to be used.
+ Expired Status = "Expired"
+
+ // Unknown indicates there was an error in determining the status of a client.
+ Unknown Status = "Unknown"
)
// ClientState defines the required common functions for light clients.
@@ -29,7 +44,6 @@ type ClientState interface {
ClientType() string
GetLatestHeight() Height
- IsFrozen() bool
GetFrozenHeight() Height
Validate() error
GetProofSpecs() []*ics23.ProofSpec
@@ -39,6 +53,10 @@ type ClientState interface {
// necessary for correct light client operation
Initialize(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, ConsensusState) error
+ // Status function
+ // Clients must return their status. Only Active clients are allowed to process packets.
+ Status(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryMarshaler) Status
+
// Genesis function
ExportMetadata(sdk.KVStore) []GenesisMetadata
@@ -221,3 +239,8 @@ type GenesisMetadata interface {
// returns metadata value
GetValue() []byte
}
+
+// String returns the string representation of a client status.
+func (s Status) String() string {
+ return string(s)
+}
diff --git a/modules/core/keeper/grpc_query.go b/modules/core/keeper/grpc_query.go
index 365cae03..f647ab5a 100644
--- a/modules/core/keeper/grpc_query.go
+++ b/modules/core/keeper/grpc_query.go
@@ -28,6 +28,11 @@ func (q Keeper) ConsensusStates(c context.Context, req *clienttypes.QueryConsens
return q.ClientKeeper.ConsensusStates(c, req)
}
+// ClientStatus implements the IBC QueryServer interface
+func (q Keeper) ClientStatus(c context.Context, req *clienttypes.QueryClientStatusRequest) (*clienttypes.QueryClientStatusResponse, error) {
+ return q.ClientKeeper.ClientStatus(c, req)
+}
+
// ClientParams implements the IBC QueryServer interface
func (q Keeper) ClientParams(c context.Context, req *clienttypes.QueryClientParamsRequest) (*clienttypes.QueryClientParamsResponse, error) {
return q.ClientKeeper.ClientParams(c, req)
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index efa740ca..a45ccc31 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -40,6 +40,18 @@ func (cs ClientState) GetLatestHeight() exported.Height {
return clienttypes.NewHeight(0, cs.Sequence)
}
+// Status returns the status of the solo machine client.
+// The client may be:
+// - Active: if frozen sequence is 0
+// - Frozen: otherwise solo machine is frozen
+func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryMarshaler) exported.Status {
+ if cs.FrozenSequence != 0 {
+ return exported.Frozen
+ }
+
+ return exported.Active
+}
+
// IsFrozen returns true if the client is frozen.
func (cs ClientState) IsFrozen() bool {
return cs.FrozenSequence != 0
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index 6bab40ff..bd2587e3 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -25,6 +25,18 @@ var (
consensusHeight = clienttypes.ZeroHeight()
)
+func (suite *SoloMachineTestSuite) TestStatus() {
+ clientState := suite.solomachine.ClientState()
+ // solo machine discards arguements
+ status := clientState.Status(suite.chainA.GetContext(), nil, nil)
+ suite.Require().Equal(exported.Active, status)
+
+ // freeze solo machine
+ clientState.FrozenSequence = 1
+ status = clientState.Status(suite.chainA.GetContext(), nil, nil)
+ suite.Require().Equal(exported.Frozen, status)
+}
+
func (suite *SoloMachineTestSuite) TestClientStateValidateBasic() {
// test singlesig and multisig public keys
for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} {
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
index efdd0722..9f35f7ed 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
@@ -264,7 +264,7 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
if tc.expPass {
suite.Require().NoError(err)
- suite.Require().True(clientState.IsFrozen(), "client not frozen")
+ suite.Require().True(clientState.(*types.ClientState).FrozenSequence != 0, "client not frozen")
} else {
suite.Require().Error(err)
suite.Require().Nil(clientState)
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 060150d9..e16a9754 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -58,9 +58,34 @@ func (cs ClientState) GetLatestHeight() exported.Height {
return cs.LatestHeight
}
-// IsFrozen returns true if the frozen height has been set.
-func (cs ClientState) IsFrozen() bool {
- return !cs.FrozenHeight.IsZero()
+// Status returns the status of the tendermint client.
+// The client may be:
+// - Active: FrozenHeight is zero and client is not expired
+// - Frozen: Frozen Height is not zero
+// - Expired: the latest consensus state timestamp + trusting period <= current time
+//
+// A frozen client will become expired, so the Frozen status
+// has higher precedence.
+func (cs ClientState) Status(
+ ctx sdk.Context,
+ clientStore sdk.KVStore,
+ cdc codec.BinaryMarshaler,
+) exported.Status {
+ if !cs.FrozenHeight.IsZero() {
+ return exported.Frozen
+ }
+
+ // get latest consensus state from clientStore to check for expiry
+ consState, err := GetConsensusState(clientStore, cdc, cs.GetLatestHeight())
+ if err != nil {
+ return exported.Unknown
+ }
+
+ if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) {
+ return exported.Expired
+ }
+
+ return exported.Active
}
// GetFrozenHeight returns the height at which client is frozen
@@ -507,10 +532,6 @@ func produceVerificationArgs(
)
}
- if cs.IsFrozen() && !cs.FrozenHeight.GT(height) {
- return commitmenttypes.MerkleProof{}, nil, clienttypes.ErrClientFrozen
- }
-
if prefix == nil {
return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty")
}
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index 94de0199..e6b62ddc 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -28,6 +28,46 @@ var (
invalidProof = []byte("invalid proof")
)
+func (suite *TendermintTestSuite) TestStatus() {
+ var (
+ path *ibctesting.Path
+ clientState *types.ClientState
+ )
+
+ testCases := []struct {
+ name string
+ malleate func()
+ expStatus exported.Status
+ }{
+ {"client is active", func() {}, exported.Active},
+ {"client is frozen", func() {
+ clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
+ path.EndpointA.SetClientState(clientState)
+ }, exported.Frozen},
+ {"client status is unknown", func() {
+ clientState.LatestHeight = clientState.LatestHeight.Increment().(clienttypes.Height)
+ path.EndpointA.SetClientState(clientState)
+ }, exported.Unknown},
+ {"client status is expired", func() {
+ suite.coordinator.IncrementTimeBy(clientState.TrustingPeriod)
+ }, exported.Expired},
+ }
+
+ for _, tc := range testCases {
+ path = ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ clientState = path.EndpointA.GetClientState().(*types.ClientState)
+
+ tc.malleate()
+
+ status := clientState.Status(suite.chainA.GetContext(), clientStore, suite.chainA.App.AppCodec())
+ suite.Require().Equal(tc.expStatus, status)
+
+ }
+}
+
func (suite *TendermintTestSuite) TestValidate() {
testCases := []struct {
name string
@@ -179,15 +219,6 @@ func (suite *TendermintTestSuite) TestVerifyClientConsensusState() {
prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")),
expPass: false,
},
- {
- name: "client is frozen",
- clientState: &types.ClientState{LatestHeight: height, FrozenHeight: clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1)},
- consensusState: &types.ConsensusState{
- Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()),
- },
- prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")),
- expPass: false,
- },
{
name: "proof verification failed",
clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
@@ -244,11 +275,6 @@ func (suite *TendermintTestSuite) TestVerifyConnectionState() {
proofHeight = clientState.LatestHeight.Increment()
}, false,
},
- {
- "client is frozen", func() {
- clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
- }, false,
- },
{
"proof verification failed", func() {
proof = invalidProof
@@ -323,11 +349,6 @@ func (suite *TendermintTestSuite) TestVerifyChannelState() {
proofHeight = clientState.LatestHeight.Increment()
}, false,
},
- {
- "client is frozen", func() {
- clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
- }, false,
- },
{
"proof verification failed", func() {
proof = invalidProof
@@ -418,11 +439,6 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
proofHeight = clientState.LatestHeight.Increment()
}, false,
},
- {
- "client is frozen", func() {
- clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
- }, false,
- },
{
"proof verification failed", func() {
proof = invalidProof
@@ -518,11 +534,6 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
proofHeight = clientState.LatestHeight.Increment()
}, false,
},
- {
- "client is frozen", func() {
- clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
- }, false,
- },
{
"proof verification failed", func() {
proof = invalidProof
@@ -623,11 +634,6 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
proofHeight = clientState.LatestHeight.Increment()
}, false,
},
- {
- "client is frozen", func() {
- clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
- }, false,
- },
{
"proof verification failed", func() {
proof = invalidProof
@@ -724,11 +730,6 @@ func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
proofHeight = clientState.LatestHeight.Increment()
}, false,
},
- {
- "client is frozen", func() {
- clientState.FrozenHeight = clienttypes.NewHeight(0, 1)
- }, false,
- },
{
"proof verification failed", func() {
proof = invalidProof
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_handle.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
index 0622372a..0cda7858 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
@@ -30,11 +30,7 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "expected type %T, got %T", misbehaviour, &Misbehaviour{})
}
- // If client is already frozen at earlier height than misbehaviour, return with error
- if cs.IsFrozen() && cs.FrozenHeight.LTE(misbehaviour.GetHeight()) {
- return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour,
- "client is already frozen at earlier height %s than misbehaviour height %s", cs.FrozenHeight, misbehaviour.GetHeight())
- }
+ // The status of the client is checked in 02-client
// Retrieve trusted consensus states for each Header in misbehaviour
// and unmarshal from clientStore
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
index ee811847..1ce0b154 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
@@ -360,7 +360,7 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
if tc.expPass {
suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
suite.Require().NotNil(clientState, "valid test case %d failed: %s", i, tc.name)
- suite.Require().True(clientState.IsFrozen(), "valid test case %d failed: %s", i, tc.name)
+ suite.Require().True(!clientState.(*types.ClientState).FrozenHeight.IsZero(), "valid test case %d failed: %s", i, tc.name)
suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(),
"valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight())
} else {
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle.go b/modules/light-clients/07-tendermint/types/proposal_handle.go
index a4ccaea9..f913ce9b 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle.go
@@ -61,7 +61,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
switch {
- case cs.IsFrozen():
+ case !cs.FrozenHeight.IsZero():
if !cs.AllowUpdateAfterMisbehaviour {
return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unfrozen")
}
diff --git a/modules/light-clients/09-localhost/types/client_state.go b/modules/light-clients/09-localhost/types/client_state.go
index 6336a213..6ef27ba8 100644
--- a/modules/light-clients/09-localhost/types/client_state.go
+++ b/modules/light-clients/09-localhost/types/client_state.go
@@ -43,9 +43,10 @@ func (cs ClientState) GetLatestHeight() exported.Height {
return cs.Height
}
-// IsFrozen returns false.
-func (cs ClientState) IsFrozen() bool {
- return false
+// Status always returns Active. The localhost status cannot be changed.
+func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryMarshaler,
+) exported.Status {
+ return exported.Active
}
// GetFrozenHeight returns an uninitialized IBC Height.
diff --git a/modules/light-clients/09-localhost/types/client_state_test.go b/modules/light-clients/09-localhost/types/client_state_test.go
index 658824df..f9832303 100644
--- a/modules/light-clients/09-localhost/types/client_state_test.go
+++ b/modules/light-clients/09-localhost/types/client_state_test.go
@@ -19,6 +19,14 @@ const (
testSequence = 1
)
+func (suite *LocalhostTestSuite) TestStatus() {
+ clientState := types.NewClientState("chainID", clienttypes.NewHeight(3, 10))
+
+ // localhost should always return active
+ status := clientState.Status(suite.ctx, nil, nil)
+ suite.Require().Equal(exported.Active, status)
+}
+
func (suite *LocalhostTestSuite) TestValidate() {
testCases := []struct {
name string
diff --git a/proto/ibc/apps/transfer/v1/genesis.proto b/proto/ibc/apps/transfer/v1/genesis.proto
index 39cf7a65..84798eb4 100644
--- a/proto/ibc/apps/transfer/v1/genesis.proto
+++ b/proto/ibc/apps/transfer/v1/genesis.proto
@@ -9,11 +9,11 @@ import "gogoproto/gogo.proto";
// GenesisState defines the ibc-transfer genesis state
message GenesisState {
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
repeated DenomTrace denom_traces = 2 [
(gogoproto.castrepeated) = "Traces",
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"denom_traces\""
+ (gogoproto.nullable) = false,
+ (gogoproto.moretags) = "yaml:\"denom_traces\""
];
- Params params = 3 [ (gogoproto.nullable) = false ];
+ Params params = 3 [(gogoproto.nullable) = false];
}
diff --git a/proto/ibc/apps/transfer/v1/query.proto b/proto/ibc/apps/transfer/v1/query.proto
index f4014d62..09e5b458 100644
--- a/proto/ibc/apps/transfer/v1/query.proto
+++ b/proto/ibc/apps/transfer/v1/query.proto
@@ -52,8 +52,7 @@ message QueryDenomTracesRequest {
// method.
message QueryDenomTracesResponse {
// denom_traces returns all denominations trace information.
- repeated DenomTrace denom_traces = 1
- [ (gogoproto.castrepeated) = "Traces", (gogoproto.nullable) = false ];
+ repeated DenomTrace denom_traces = 1 [(gogoproto.castrepeated) = "Traces", (gogoproto.nullable) = false];
// pagination defines the pagination in the response.
cosmos.base.query.v1beta1.PageResponse pagination = 2;
}
diff --git a/proto/ibc/apps/transfer/v1/transfer.proto b/proto/ibc/apps/transfer/v1/transfer.proto
index e43971ee..0aa0224a 100644
--- a/proto/ibc/apps/transfer/v1/transfer.proto
+++ b/proto/ibc/apps/transfer/v1/transfer.proto
@@ -37,9 +37,8 @@ message DenomTrace {
message Params {
// send_enabled enables or disables all cross-chain token transfers from this
// chain.
- bool send_enabled = 1 [ (gogoproto.moretags) = "yaml:\"send_enabled\"" ];
+ bool send_enabled = 1 [(gogoproto.moretags) = "yaml:\"send_enabled\""];
// receive_enabled enables or disables all cross-chain token transfers to this
// chain.
- bool receive_enabled = 2
- [ (gogoproto.moretags) = "yaml:\"receive_enabled\"" ];
+ bool receive_enabled = 2 [(gogoproto.moretags) = "yaml:\"receive_enabled\""];
}
diff --git a/proto/ibc/apps/transfer/v1/tx.proto b/proto/ibc/apps/transfer/v1/tx.proto
index 73887e79..b7464310 100644
--- a/proto/ibc/apps/transfer/v1/tx.proto
+++ b/proto/ibc/apps/transfer/v1/tx.proto
@@ -18,30 +18,26 @@ service Msg {
// ICS20 enabled chains. See ICS Spec here:
// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
message MsgTransfer {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
// the port on which the packet will be sent
- string source_port = 1 [ (gogoproto.moretags) = "yaml:\"source_port\"" ];
+ string source_port = 1 [(gogoproto.moretags) = "yaml:\"source_port\""];
// the channel by which the packet will be sent
- string source_channel = 2
- [ (gogoproto.moretags) = "yaml:\"source_channel\"" ];
+ string source_channel = 2 [(gogoproto.moretags) = "yaml:\"source_channel\""];
// the tokens to be transferred
- cosmos.base.v1beta1.Coin token = 3 [ (gogoproto.nullable) = false ];
+ cosmos.base.v1beta1.Coin token = 3 [(gogoproto.nullable) = false];
// the sender address
string sender = 4;
// the recipient address on the destination chain
string receiver = 5;
// Timeout height relative to the current block height.
// The timeout is disabled when set to 0.
- ibc.core.client.v1.Height timeout_height = 6 [
- (gogoproto.moretags) = "yaml:\"timeout_height\"",
- (gogoproto.nullable) = false
- ];
+ ibc.core.client.v1.Height timeout_height = 6
+ [(gogoproto.moretags) = "yaml:\"timeout_height\"", (gogoproto.nullable) = false];
// Timeout timestamp (in nanoseconds) relative to the current block timestamp.
// The timeout is disabled when set to 0.
- uint64 timeout_timestamp = 7
- [ (gogoproto.moretags) = "yaml:\"timeout_timestamp\"" ];
+ uint64 timeout_timestamp = 7 [(gogoproto.moretags) = "yaml:\"timeout_timestamp\""];
}
// MsgTransferResponse defines the Msg/Transfer response type.
diff --git a/proto/ibc/core/channel/v1/channel.proto b/proto/ibc/core/channel/v1/channel.proto
index 81a92a73..edb39d04 100644
--- a/proto/ibc/core/channel/v1/channel.proto
+++ b/proto/ibc/core/channel/v1/channel.proto
@@ -18,11 +18,10 @@ message Channel {
// whether the channel is ordered or unordered
Order ordering = 2;
// counterparty channel end
- Counterparty counterparty = 3 [ (gogoproto.nullable) = false ];
+ Counterparty counterparty = 3 [(gogoproto.nullable) = false];
// list of connection identifiers, in order, along which packets sent on
// this channel will travel
- repeated string connection_hops = 4
- [ (gogoproto.moretags) = "yaml:\"connection_hops\"" ];
+ repeated string connection_hops = 4 [(gogoproto.moretags) = "yaml:\"connection_hops\""];
// opaque channel version, which is agreed upon during the handshake
string version = 5;
}
@@ -37,11 +36,10 @@ message IdentifiedChannel {
// whether the channel is ordered or unordered
Order ordering = 2;
// counterparty channel end
- Counterparty counterparty = 3 [ (gogoproto.nullable) = false ];
+ Counterparty counterparty = 3 [(gogoproto.nullable) = false];
// list of connection identifiers, in order, along which packets sent on
// this channel will travel
- repeated string connection_hops = 4
- [ (gogoproto.moretags) = "yaml:\"connection_hops\"" ];
+ repeated string connection_hops = 4 [(gogoproto.moretags) = "yaml:\"connection_hops\""];
// opaque channel version, which is agreed upon during the handshake
string version = 5;
// port identifier
@@ -56,18 +54,17 @@ enum State {
option (gogoproto.goproto_enum_prefix) = false;
// Default State
- STATE_UNINITIALIZED_UNSPECIFIED = 0
- [ (gogoproto.enumvalue_customname) = "UNINITIALIZED" ];
+ STATE_UNINITIALIZED_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "UNINITIALIZED"];
// A channel has just started the opening handshake.
- STATE_INIT = 1 [ (gogoproto.enumvalue_customname) = "INIT" ];
+ STATE_INIT = 1 [(gogoproto.enumvalue_customname) = "INIT"];
// A channel has acknowledged the handshake step on the counterparty chain.
- STATE_TRYOPEN = 2 [ (gogoproto.enumvalue_customname) = "TRYOPEN" ];
+ STATE_TRYOPEN = 2 [(gogoproto.enumvalue_customname) = "TRYOPEN"];
// A channel has completed the handshake. Open channels are
// ready to send and receive packets.
- STATE_OPEN = 3 [ (gogoproto.enumvalue_customname) = "OPEN" ];
+ STATE_OPEN = 3 [(gogoproto.enumvalue_customname) = "OPEN"];
// A channel has been closed and can no longer be used to send or receive
// packets.
- STATE_CLOSED = 4 [ (gogoproto.enumvalue_customname) = "CLOSED" ];
+ STATE_CLOSED = 4 [(gogoproto.enumvalue_customname) = "CLOSED"];
}
// Order defines if a channel is ORDERED or UNORDERED
@@ -75,12 +72,12 @@ enum Order {
option (gogoproto.goproto_enum_prefix) = false;
// zero-value for channel ordering
- ORDER_NONE_UNSPECIFIED = 0 [ (gogoproto.enumvalue_customname) = "NONE" ];
+ ORDER_NONE_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "NONE"];
// packets can be delivered in any order, which may differ from the order in
// which they were sent.
- ORDER_UNORDERED = 1 [ (gogoproto.enumvalue_customname) = "UNORDERED" ];
+ ORDER_UNORDERED = 1 [(gogoproto.enumvalue_customname) = "UNORDERED"];
// packets are delivered exactly in the order which they were sent
- ORDER_ORDERED = 2 [ (gogoproto.enumvalue_customname) = "ORDERED" ];
+ ORDER_ORDERED = 2 [(gogoproto.enumvalue_customname) = "ORDERED"];
}
// Counterparty defines a channel end counterparty
@@ -88,9 +85,9 @@ message Counterparty {
option (gogoproto.goproto_getters) = false;
// port on the counterparty chain which owns the other end of the channel.
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
// channel end on the counterparty chain
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
}
// Packet defines a type that carries data across different chains through IBC
@@ -102,26 +99,20 @@ message Packet {
// with a later sequence number.
uint64 sequence = 1;
// identifies the port on the sending chain.
- string source_port = 2 [ (gogoproto.moretags) = "yaml:\"source_port\"" ];
+ string source_port = 2 [(gogoproto.moretags) = "yaml:\"source_port\""];
// identifies the channel end on the sending chain.
- string source_channel = 3
- [ (gogoproto.moretags) = "yaml:\"source_channel\"" ];
+ string source_channel = 3 [(gogoproto.moretags) = "yaml:\"source_channel\""];
// identifies the port on the receiving chain.
- string destination_port = 4
- [ (gogoproto.moretags) = "yaml:\"destination_port\"" ];
+ string destination_port = 4 [(gogoproto.moretags) = "yaml:\"destination_port\""];
// identifies the channel end on the receiving chain.
- string destination_channel = 5
- [ (gogoproto.moretags) = "yaml:\"destination_channel\"" ];
+ string destination_channel = 5 [(gogoproto.moretags) = "yaml:\"destination_channel\""];
// actual opaque bytes transferred directly to the application module
bytes data = 6;
// block height after which the packet times out
- ibc.core.client.v1.Height timeout_height = 7 [
- (gogoproto.moretags) = "yaml:\"timeout_height\"",
- (gogoproto.nullable) = false
- ];
+ ibc.core.client.v1.Height timeout_height = 7
+ [(gogoproto.moretags) = "yaml:\"timeout_height\"", (gogoproto.nullable) = false];
// block timestamp (in nanoseconds) after which the packet times out
- uint64 timeout_timestamp = 8
- [ (gogoproto.moretags) = "yaml:\"timeout_timestamp\"" ];
+ uint64 timeout_timestamp = 8 [(gogoproto.moretags) = "yaml:\"timeout_timestamp\""];
}
// PacketState defines the generic type necessary to retrieve and store
@@ -132,9 +123,9 @@ message PacketState {
option (gogoproto.goproto_getters) = false;
// channel port identifier.
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
// channel unique identifier.
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
// packet sequence.
uint64 sequence = 3;
// embedded data that represents packet state.
@@ -151,7 +142,7 @@ message PacketState {
message Acknowledgement {
// response contains either a result or an error and must be non-empty
oneof response {
- bytes result = 21;
- string error = 22;
+ bytes result = 21;
+ string error = 22;
}
}
diff --git a/proto/ibc/core/channel/v1/genesis.proto b/proto/ibc/core/channel/v1/genesis.proto
index 00a7bcbb..75bf1fdb 100644
--- a/proto/ibc/core/channel/v1/genesis.proto
+++ b/proto/ibc/core/channel/v1/genesis.proto
@@ -9,34 +9,24 @@ import "ibc/core/channel/v1/channel.proto";
// GenesisState defines the ibc channel submodule's genesis state.
message GenesisState {
- repeated IdentifiedChannel channels = 1 [
- (gogoproto.casttype) = "IdentifiedChannel",
- (gogoproto.nullable) = false
- ];
- repeated PacketState acknowledgements = 2 [ (gogoproto.nullable) = false ];
- repeated PacketState commitments = 3 [ (gogoproto.nullable) = false ];
- repeated PacketState receipts = 4 [ (gogoproto.nullable) = false ];
- repeated PacketSequence send_sequences = 5 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"send_sequences\""
- ];
- repeated PacketSequence recv_sequences = 6 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"recv_sequences\""
- ];
- repeated PacketSequence ack_sequences = 7 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"ack_sequences\""
- ];
+ repeated IdentifiedChannel channels = 1 [(gogoproto.casttype) = "IdentifiedChannel", (gogoproto.nullable) = false];
+ repeated PacketState acknowledgements = 2 [(gogoproto.nullable) = false];
+ repeated PacketState commitments = 3 [(gogoproto.nullable) = false];
+ repeated PacketState receipts = 4 [(gogoproto.nullable) = false];
+ repeated PacketSequence send_sequences = 5
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"send_sequences\""];
+ repeated PacketSequence recv_sequences = 6
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"recv_sequences\""];
+ repeated PacketSequence ack_sequences = 7
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"ack_sequences\""];
// the sequence for the next generated channel identifier
- uint64 next_channel_sequence = 8
- [ (gogoproto.moretags) = "yaml:\"next_channel_sequence\"" ];
+ uint64 next_channel_sequence = 8 [(gogoproto.moretags) = "yaml:\"next_channel_sequence\""];
}
// PacketSequence defines the genesis type necessary to retrieve and store
// next send and receive sequences.
message PacketSequence {
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
- uint64 sequence = 3;
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
+ uint64 sequence = 3;
}
diff --git a/proto/ibc/core/channel/v1/query.proto b/proto/ibc/core/channel/v1/query.proto
index 1c98fe2a..194fe867 100644
--- a/proto/ibc/core/channel/v1/query.proto
+++ b/proto/ibc/core/channel/v1/query.proto
@@ -15,8 +15,7 @@ import "gogoproto/gogo.proto";
service Query {
// Channel queries an IBC Channel.
rpc Channel(QueryChannelRequest) returns (QueryChannelResponse) {
- option (google.api.http).get =
- "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}";
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}";
}
// Channels queries all the IBC channels of a chain.
@@ -26,90 +25,76 @@ service Query {
// ConnectionChannels queries all the channels associated with a connection
// end.
- rpc ConnectionChannels(QueryConnectionChannelsRequest)
- returns (QueryConnectionChannelsResponse) {
- option (google.api.http).get =
- "/ibc/core/channel/v1/connections/{connection}/channels";
+ rpc ConnectionChannels(QueryConnectionChannelsRequest) returns (QueryConnectionChannelsResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/connections/{connection}/channels";
}
// ChannelClientState queries for the client state for the channel associated
// with the provided channel identifiers.
- rpc ChannelClientState(QueryChannelClientStateRequest)
- returns (QueryChannelClientStateResponse) {
+ rpc ChannelClientState(QueryChannelClientStateRequest) returns (QueryChannelClientStateResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/client_state";
}
// ChannelConsensusState queries for the consensus state for the channel
// associated with the provided channel identifiers.
- rpc ChannelConsensusState(QueryChannelConsensusStateRequest)
- returns (QueryChannelConsensusStateResponse) {
+ rpc ChannelConsensusState(QueryChannelConsensusStateRequest) returns (QueryChannelConsensusStateResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/consensus_state/revision/"
"{revision_number}/height/{revision_height}";
}
// PacketCommitment queries a stored packet commitment hash.
- rpc PacketCommitment(QueryPacketCommitmentRequest)
- returns (QueryPacketCommitmentResponse) {
- option (google.api.http).get =
- "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/"
- "packet_commitments/{sequence}";
+ rpc PacketCommitment(QueryPacketCommitmentRequest) returns (QueryPacketCommitmentResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/"
+ "packet_commitments/{sequence}";
}
// PacketCommitments returns all the packet commitments hashes associated
// with a channel.
- rpc PacketCommitments(QueryPacketCommitmentsRequest)
- returns (QueryPacketCommitmentsResponse) {
+ rpc PacketCommitments(QueryPacketCommitmentsRequest) returns (QueryPacketCommitmentsResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/packet_commitments";
}
// PacketReceipt queries if a given packet sequence has been received on the
// queried chain
- rpc PacketReceipt(QueryPacketReceiptRequest)
- returns (QueryPacketReceiptResponse) {
+ rpc PacketReceipt(QueryPacketReceiptRequest) returns (QueryPacketReceiptResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/packet_receipts/{sequence}";
}
// PacketAcknowledgement queries a stored packet acknowledgement hash.
- rpc PacketAcknowledgement(QueryPacketAcknowledgementRequest)
- returns (QueryPacketAcknowledgementResponse) {
+ rpc PacketAcknowledgement(QueryPacketAcknowledgementRequest) returns (QueryPacketAcknowledgementResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/packet_acks/{sequence}";
}
// PacketAcknowledgements returns all the packet acknowledgements associated
// with a channel.
- rpc PacketAcknowledgements(QueryPacketAcknowledgementsRequest)
- returns (QueryPacketAcknowledgementsResponse) {
+ rpc PacketAcknowledgements(QueryPacketAcknowledgementsRequest) returns (QueryPacketAcknowledgementsResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/packet_acknowledgements";
}
// UnreceivedPackets returns all the unreceived IBC packets associated with a
// channel and sequences.
- rpc UnreceivedPackets(QueryUnreceivedPacketsRequest)
- returns (QueryUnreceivedPacketsResponse) {
- option (google.api.http).get =
- "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/"
- "packet_commitments/"
- "{packet_commitment_sequences}/unreceived_packets";
+ rpc UnreceivedPackets(QueryUnreceivedPacketsRequest) returns (QueryUnreceivedPacketsResponse) {
+ option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/"
+ "packet_commitments/"
+ "{packet_commitment_sequences}/unreceived_packets";
}
// UnreceivedAcks returns all the unreceived IBC acknowledgements associated
// with a channel and sequences.
- rpc UnreceivedAcks(QueryUnreceivedAcksRequest)
- returns (QueryUnreceivedAcksResponse) {
+ rpc UnreceivedAcks(QueryUnreceivedAcksRequest) returns (QueryUnreceivedAcksResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/packet_commitments/"
"{packet_ack_sequences}/unreceived_acks";
}
// NextSequenceReceive returns the next receive sequence for a given channel.
- rpc NextSequenceReceive(QueryNextSequenceReceiveRequest)
- returns (QueryNextSequenceReceiveResponse) {
+ rpc NextSequenceReceive(QueryNextSequenceReceiveRequest) returns (QueryNextSequenceReceiveResponse) {
option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/"
"ports/{port_id}/next_sequence";
}
@@ -132,7 +117,7 @@ message QueryChannelResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryChannelsRequest is the request type for the Query/Channels RPC method
@@ -148,7 +133,7 @@ message QueryChannelsResponse {
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [(gogoproto.nullable) = false];
}
// QueryConnectionChannelsRequest is the request type for the
@@ -168,7 +153,7 @@ message QueryConnectionChannelsResponse {
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [(gogoproto.nullable) = false];
}
// QueryChannelClientStateRequest is the request type for the Query/ClientState
@@ -188,7 +173,7 @@ message QueryChannelClientStateResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryChannelConsensusStateRequest is the request type for the
@@ -214,7 +199,7 @@ message QueryChannelConsensusStateResponse {
// merkle proof of existence
bytes proof = 3;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 4 [(gogoproto.nullable) = false];
}
// QueryPacketCommitmentRequest is the request type for the
@@ -237,7 +222,7 @@ message QueryPacketCommitmentResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryPacketCommitmentsRequest is the request type for the
@@ -258,7 +243,7 @@ message QueryPacketCommitmentsResponse {
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [(gogoproto.nullable) = false];
}
// QueryPacketReceiptRequest is the request type for the
@@ -281,7 +266,7 @@ message QueryPacketReceiptResponse {
// merkle proof of existence
bytes proof = 3;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 4 [(gogoproto.nullable) = false];
}
// QueryPacketAcknowledgementRequest is the request type for the
@@ -304,7 +289,7 @@ message QueryPacketAcknowledgementResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryPacketAcknowledgementsRequest is the request type for the
@@ -325,7 +310,7 @@ message QueryPacketAcknowledgementsResponse {
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [(gogoproto.nullable) = false];
}
// QueryUnreceivedPacketsRequest is the request type for the
@@ -345,7 +330,7 @@ message QueryUnreceivedPacketsResponse {
// list of unreceived packet sequences
repeated uint64 sequences = 1;
// query block height
- ibc.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 2 [(gogoproto.nullable) = false];
}
// QueryUnreceivedAcks is the request type for the
@@ -365,7 +350,7 @@ message QueryUnreceivedAcksResponse {
// list of unreceived acknowledgement sequences
repeated uint64 sequences = 1;
// query block height
- ibc.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 2 [(gogoproto.nullable) = false];
}
// QueryNextSequenceReceiveRequest is the request type for the
@@ -385,5 +370,5 @@ message QueryNextSequenceReceiveResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
diff --git a/proto/ibc/core/channel/v1/tx.proto b/proto/ibc/core/channel/v1/tx.proto
index 8fe5feb5..59af0d3f 100644
--- a/proto/ibc/core/channel/v1/tx.proto
+++ b/proto/ibc/core/channel/v1/tx.proto
@@ -20,17 +20,14 @@ service Msg {
rpc ChannelOpenAck(MsgChannelOpenAck) returns (MsgChannelOpenAckResponse);
// ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.
- rpc ChannelOpenConfirm(MsgChannelOpenConfirm)
- returns (MsgChannelOpenConfirmResponse);
+ rpc ChannelOpenConfirm(MsgChannelOpenConfirm) returns (MsgChannelOpenConfirmResponse);
// ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.
- rpc ChannelCloseInit(MsgChannelCloseInit)
- returns (MsgChannelCloseInitResponse);
+ rpc ChannelCloseInit(MsgChannelCloseInit) returns (MsgChannelCloseInitResponse);
// ChannelCloseConfirm defines a rpc handler method for
// MsgChannelCloseConfirm.
- rpc ChannelCloseConfirm(MsgChannelCloseConfirm)
- returns (MsgChannelCloseConfirmResponse);
+ rpc ChannelCloseConfirm(MsgChannelCloseConfirm) returns (MsgChannelCloseConfirmResponse);
// RecvPacket defines a rpc handler method for MsgRecvPacket.
rpc RecvPacket(MsgRecvPacket) returns (MsgRecvPacketResponse);
@@ -48,12 +45,12 @@ service Msg {
// MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
// is called by a relayer on Chain A.
message MsgChannelOpenInit {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
- Channel channel = 2 [ (gogoproto.nullable) = false ];
- string signer = 3;
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
+ Channel channel = 2 [(gogoproto.nullable) = false];
+ string signer = 3;
}
// MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
@@ -62,22 +59,18 @@ message MsgChannelOpenInitResponse {}
// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
// on Chain B.
message MsgChannelOpenTry {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
// in the case of crossing hello's, when both chains call OpenInit, we need
// the channel identifier of the previous channel in state INIT
- string previous_channel_id = 2
- [ (gogoproto.moretags) = "yaml:\"previous_channel_id\"" ];
- Channel channel = 3 [ (gogoproto.nullable) = false ];
- string counterparty_version = 4
- [ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ];
- bytes proof_init = 5 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
- ibc.core.client.v1.Height proof_height = 6 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ string previous_channel_id = 2 [(gogoproto.moretags) = "yaml:\"previous_channel_id\""];
+ Channel channel = 3 [(gogoproto.nullable) = false];
+ string counterparty_version = 4 [(gogoproto.moretags) = "yaml:\"counterparty_version\""];
+ bytes proof_init = 5 [(gogoproto.moretags) = "yaml:\"proof_init\""];
+ ibc.core.client.v1.Height proof_height = 6
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 7;
}
@@ -87,20 +80,16 @@ message MsgChannelOpenTryResponse {}
// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
// the change of channel state to TRYOPEN on Chain B.
message MsgChannelOpenAck {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
- string counterparty_channel_id = 3
- [ (gogoproto.moretags) = "yaml:\"counterparty_channel_id\"" ];
- string counterparty_version = 4
- [ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ];
- bytes proof_try = 5 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ];
- ibc.core.client.v1.Height proof_height = 6 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
+ string counterparty_channel_id = 3 [(gogoproto.moretags) = "yaml:\"counterparty_channel_id\""];
+ string counterparty_version = 4 [(gogoproto.moretags) = "yaml:\"counterparty_version\""];
+ bytes proof_try = 5 [(gogoproto.moretags) = "yaml:\"proof_try\""];
+ ibc.core.client.v1.Height proof_height = 6
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 7;
}
@@ -110,16 +99,14 @@ message MsgChannelOpenAckResponse {}
// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
// acknowledge the change of channel state to OPEN on Chain A.
message MsgChannelOpenConfirm {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
- bytes proof_ack = 3 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ];
- ibc.core.client.v1.Height proof_height = 4 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
+ bytes proof_ack = 3 [(gogoproto.moretags) = "yaml:\"proof_ack\""];
+ ibc.core.client.v1.Height proof_height = 4
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 5;
}
@@ -130,12 +117,12 @@ message MsgChannelOpenConfirmResponse {}
// MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
// to close a channel with Chain B.
message MsgChannelCloseInit {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
- string signer = 3;
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
+ string signer = 3;
}
// MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
@@ -144,16 +131,14 @@ message MsgChannelCloseInitResponse {}
// MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
// to acknowledge the change of channel state to CLOSED on Chain A.
message MsgChannelCloseConfirm {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ];
- string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ];
- bytes proof_init = 3 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
- ibc.core.client.v1.Height proof_height = 4 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ string port_id = 1 [(gogoproto.moretags) = "yaml:\"port_id\""];
+ string channel_id = 2 [(gogoproto.moretags) = "yaml:\"channel_id\""];
+ bytes proof_init = 3 [(gogoproto.moretags) = "yaml:\"proof_init\""];
+ ibc.core.client.v1.Height proof_height = 4
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 5;
}
@@ -163,16 +148,13 @@ message MsgChannelCloseConfirmResponse {}
// MsgRecvPacket receives incoming IBC packet
message MsgRecvPacket {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- Packet packet = 1 [ (gogoproto.nullable) = false ];
- bytes proof_commitment = 2
- [ (gogoproto.moretags) = "yaml:\"proof_commitment\"" ];
- ibc.core.client.v1.Height proof_height = 3 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ Packet packet = 1 [(gogoproto.nullable) = false];
+ bytes proof_commitment = 2 [(gogoproto.moretags) = "yaml:\"proof_commitment\""];
+ ibc.core.client.v1.Height proof_height = 3
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 4;
}
@@ -181,19 +163,15 @@ message MsgRecvPacketResponse {}
// MsgTimeout receives timed-out packet
message MsgTimeout {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- Packet packet = 1 [ (gogoproto.nullable) = false ];
- bytes proof_unreceived = 2
- [ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ];
- ibc.core.client.v1.Height proof_height = 3 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
- uint64 next_sequence_recv = 4
- [ (gogoproto.moretags) = "yaml:\"next_sequence_recv\"" ];
- string signer = 5;
+ Packet packet = 1 [(gogoproto.nullable) = false];
+ bytes proof_unreceived = 2 [(gogoproto.moretags) = "yaml:\"proof_unreceived\""];
+ ibc.core.client.v1.Height proof_height = 3
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
+ uint64 next_sequence_recv = 4 [(gogoproto.moretags) = "yaml:\"next_sequence_recv\""];
+ string signer = 5;
}
// MsgTimeoutResponse defines the Msg/Timeout response type.
@@ -201,20 +179,16 @@ message MsgTimeoutResponse {}
// MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
message MsgTimeoutOnClose {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- Packet packet = 1 [ (gogoproto.nullable) = false ];
- bytes proof_unreceived = 2
- [ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ];
- bytes proof_close = 3 [ (gogoproto.moretags) = "yaml:\"proof_close\"" ];
- ibc.core.client.v1.Height proof_height = 4 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
- uint64 next_sequence_recv = 5
- [ (gogoproto.moretags) = "yaml:\"next_sequence_recv\"" ];
- string signer = 6;
+ Packet packet = 1 [(gogoproto.nullable) = false];
+ bytes proof_unreceived = 2 [(gogoproto.moretags) = "yaml:\"proof_unreceived\""];
+ bytes proof_close = 3 [(gogoproto.moretags) = "yaml:\"proof_close\""];
+ ibc.core.client.v1.Height proof_height = 4
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
+ uint64 next_sequence_recv = 5 [(gogoproto.moretags) = "yaml:\"next_sequence_recv\""];
+ string signer = 6;
}
// MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
@@ -222,16 +196,14 @@ message MsgTimeoutOnCloseResponse {}
// MsgAcknowledgement receives incoming IBC acknowledgement
message MsgAcknowledgement {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- Packet packet = 1 [ (gogoproto.nullable) = false ];
- bytes acknowledgement = 2;
- bytes proof_acked = 3 [ (gogoproto.moretags) = "yaml:\"proof_acked\"" ];
- ibc.core.client.v1.Height proof_height = 4 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ Packet packet = 1 [(gogoproto.nullable) = false];
+ bytes acknowledgement = 2;
+ bytes proof_acked = 3 [(gogoproto.moretags) = "yaml:\"proof_acked\""];
+ ibc.core.client.v1.Height proof_height = 4
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 5;
}
diff --git a/proto/ibc/core/client/v1/client.proto b/proto/ibc/core/client/v1/client.proto
index 92728700..a4a2cc85 100644
--- a/proto/ibc/core/client/v1/client.proto
+++ b/proto/ibc/core/client/v1/client.proto
@@ -12,32 +12,28 @@ import "cosmos/upgrade/v1beta1/upgrade.proto";
// identifier field.
message IdentifiedClientState {
// client identifier
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// client state
- google.protobuf.Any client_state = 2
- [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""];
}
// ConsensusStateWithHeight defines a consensus state with an additional height
// field.
message ConsensusStateWithHeight {
// consensus state height
- Height height = 1 [ (gogoproto.nullable) = false ];
+ Height height = 1 [(gogoproto.nullable) = false];
// consensus state
- google.protobuf.Any consensus_state = 2
- [ (gogoproto.moretags) = "yaml\"consensus_state\"" ];
+ google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml\"consensus_state\""];
}
// ClientConsensusStates defines all the stored consensus states for a given
// client.
message ClientConsensusStates {
// client identifier
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// consensus states and their heights associated with the client
- repeated ConsensusStateWithHeight consensus_states = 2 [
- (gogoproto.moretags) = "yaml:\"consensus_states\"",
- (gogoproto.nullable) = false
- ];
+ repeated ConsensusStateWithHeight consensus_states = 2
+ [(gogoproto.moretags) = "yaml:\"consensus_states\"", (gogoproto.nullable) = false];
}
// ClientUpdateProposal is a governance proposal. If it passes, the substitute
@@ -53,30 +49,25 @@ message ClientUpdateProposal {
// the description of the proposal
string description = 2;
// the client identifier for the client to be updated if the proposal passes
- string subject_client_id = 3
- [ (gogoproto.moretags) = "yaml:\"subject_client_id\"" ];
+ string subject_client_id = 3 [(gogoproto.moretags) = "yaml:\"subject_client_id\""];
// the substitute client identifier for the client standing in for the subject
// client
- string substitute_client_id = 4
- [ (gogoproto.moretags) = "yaml:\"susbtitute_client_id\"" ];
+ string substitute_client_id = 4 [(gogoproto.moretags) = "yaml:\"susbtitute_client_id\""];
// the intital height to copy consensus states from the substitute to the
// subject
- Height initial_height = 5 [
- (gogoproto.moretags) = "yaml:\"initial_height\"",
- (gogoproto.nullable) = false
- ];
+ Height initial_height = 5 [(gogoproto.moretags) = "yaml:\"initial_height\"", (gogoproto.nullable) = false];
}
// UpgradeProposal is a gov Content type for initiating an IBC breaking
// upgrade.
message UpgradeProposal {
- option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_getters) = false;
option (gogoproto.goproto_stringer) = false;
- option (gogoproto.equal) = true;
+ option (gogoproto.equal) = true;
- string title = 1;
- string description = 2;
- cosmos.upgrade.v1beta1.Plan plan = 3 [ (gogoproto.nullable) = false ];
+ string title = 1;
+ string description = 2;
+ cosmos.upgrade.v1beta1.Plan plan = 3 [(gogoproto.nullable) = false];
// An UpgradedClientState must be provided to perform an IBC breaking upgrade.
// This will make the chain commit to the correct upgraded (self) client state
@@ -84,8 +75,7 @@ message UpgradeProposal {
// new upgraded client is valid by verifying a proof on the previous version
// of the chain. This will allow IBC connections to persist smoothly across
// planned chain upgrades
- google.protobuf.Any upgraded_client_state = 4
- [ (gogoproto.moretags) = "yaml:\"upgraded_client_state\"" ];
+ google.protobuf.Any upgraded_client_state = 4 [(gogoproto.moretags) = "yaml:\"upgraded_client_state\""];
}
// Height is a monotonically increasing data type
@@ -99,20 +89,17 @@ message UpgradeProposal {
// height continues to be monitonically increasing even as the RevisionHeight
// gets reset
message Height {
- option (gogoproto.goproto_getters) = false;
+ option (gogoproto.goproto_getters) = false;
option (gogoproto.goproto_stringer) = false;
// the revision that the client is currently on
- uint64 revision_number = 1
- [ (gogoproto.moretags) = "yaml:\"revision_number\"" ];
+ uint64 revision_number = 1 [(gogoproto.moretags) = "yaml:\"revision_number\""];
// the height within the given revision
- uint64 revision_height = 2
- [ (gogoproto.moretags) = "yaml:\"revision_height\"" ];
+ uint64 revision_height = 2 [(gogoproto.moretags) = "yaml:\"revision_height\""];
}
// Params defines the set of IBC light client parameters.
message Params {
// allowed_clients defines the list of allowed client state types.
- repeated string allowed_clients = 1
- [ (gogoproto.moretags) = "yaml:\"allowed_clients\"" ];
+ repeated string allowed_clients = 1 [(gogoproto.moretags) = "yaml:\"allowed_clients\""];
}
diff --git a/proto/ibc/core/client/v1/genesis.proto b/proto/ibc/core/client/v1/genesis.proto
index d6a74256..30592cf7 100644
--- a/proto/ibc/core/client/v1/genesis.proto
+++ b/proto/ibc/core/client/v1/genesis.proto
@@ -10,28 +10,22 @@ import "gogoproto/gogo.proto";
// GenesisState defines the ibc client submodule's genesis state.
message GenesisState {
// client states with their corresponding identifiers
- repeated IdentifiedClientState clients = 1 [
- (gogoproto.nullable) = false,
- (gogoproto.castrepeated) = "IdentifiedClientStates"
- ];
+ repeated IdentifiedClientState clients = 1
+ [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "IdentifiedClientStates"];
// consensus states from each client
repeated ClientConsensusStates clients_consensus = 2 [
- (gogoproto.nullable) = false,
+ (gogoproto.nullable) = false,
(gogoproto.castrepeated) = "ClientsConsensusStates",
- (gogoproto.moretags) = "yaml:\"clients_consensus\""
+ (gogoproto.moretags) = "yaml:\"clients_consensus\""
];
// metadata from each client
- repeated IdentifiedGenesisMetadata clients_metadata = 3 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"clients_metadata\""
- ];
- Params params = 4 [ (gogoproto.nullable) = false ];
+ repeated IdentifiedGenesisMetadata clients_metadata = 3
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"clients_metadata\""];
+ Params params = 4 [(gogoproto.nullable) = false];
// create localhost on initialization
- bool create_localhost = 5
- [ (gogoproto.moretags) = "yaml:\"create_localhost\"" ];
+ bool create_localhost = 5 [(gogoproto.moretags) = "yaml:\"create_localhost\""];
// the sequence for the next generated client identifier
- uint64 next_client_sequence = 6
- [ (gogoproto.moretags) = "yaml:\"next_client_sequence\"" ];
+ uint64 next_client_sequence = 6 [(gogoproto.moretags) = "yaml:\"next_client_sequence\""];
}
// GenesisMetadata defines the genesis type for metadata that clients may return
@@ -48,9 +42,7 @@ message GenesisMetadata {
// IdentifiedGenesisMetadata has the client metadata with the corresponding
// client id.
message IdentifiedGenesisMetadata {
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
- repeated GenesisMetadata client_metadata = 2 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"client_metadata\""
- ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
+ repeated GenesisMetadata client_metadata = 2
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"client_metadata\""];
}
diff --git a/proto/ibc/core/client/v1/query.proto b/proto/ibc/core/client/v1/query.proto
index 0aa988ba..36a51357 100644
--- a/proto/ibc/core/client/v1/query.proto
+++ b/proto/ibc/core/client/v1/query.proto
@@ -14,20 +14,17 @@ import "gogoproto/gogo.proto";
service Query {
// ClientState queries an IBC light client.
rpc ClientState(QueryClientStateRequest) returns (QueryClientStateResponse) {
- option (google.api.http).get =
- "/ibc/core/client/v1/client_states/{client_id}";
+ option (google.api.http).get = "/ibc/core/client/v1/client_states/{client_id}";
}
// ClientStates queries all the IBC light clients of a chain.
- rpc ClientStates(QueryClientStatesRequest)
- returns (QueryClientStatesResponse) {
+ rpc ClientStates(QueryClientStatesRequest) returns (QueryClientStatesResponse) {
option (google.api.http).get = "/ibc/core/client/v1/client_states";
}
// ConsensusState queries a consensus state associated with a client state at
// a given height.
- rpc ConsensusState(QueryConsensusStateRequest)
- returns (QueryConsensusStateResponse) {
+ rpc ConsensusState(QueryConsensusStateRequest) returns (QueryConsensusStateResponse) {
option (google.api.http).get = "/ibc/core/client/v1/consensus_states/"
"{client_id}/revision/{revision_number}/"
"height/{revision_height}";
@@ -35,30 +32,28 @@ service Query {
// ConsensusStates queries all the consensus state associated with a given
// client.
- rpc ConsensusStates(QueryConsensusStatesRequest)
- returns (QueryConsensusStatesResponse) {
- option (google.api.http).get =
- "/ibc/core/client/v1/consensus_states/{client_id}";
+ rpc ConsensusStates(QueryConsensusStatesRequest) returns (QueryConsensusStatesResponse) {
+ option (google.api.http).get = "/ibc/core/client/v1/consensus_states/{client_id}";
+ }
+
+ // Status queries the status of an IBC client.
+ rpc ClientStatus(QueryClientStatusRequest) returns (QueryClientStatusResponse) {
+ option (google.api.http).get = "/ibc/core/client/v1/client_status/{client_id}";
}
// ClientParams queries all parameters of the ibc client.
- rpc ClientParams(QueryClientParamsRequest)
- returns (QueryClientParamsResponse) {
+ rpc ClientParams(QueryClientParamsRequest) returns (QueryClientParamsResponse) {
option (google.api.http).get = "/ibc/client/v1/params";
}
// UpgradedClientState queries an Upgraded IBC light client.
- rpc UpgradedClientState(QueryUpgradedClientStateRequest)
- returns (QueryUpgradedClientStateResponse) {
- option (google.api.http).get =
- "/ibc/core/client/v1/upgraded_client_states";
+ rpc UpgradedClientState(QueryUpgradedClientStateRequest) returns (QueryUpgradedClientStateResponse) {
+ option (google.api.http).get = "/ibc/core/client/v1/upgraded_client_states";
}
// UpgradedConsensusState queries an Upgraded IBC consensus state.
- rpc UpgradedConsensusState(QueryUpgradedConsensusStateRequest)
- returns (QueryUpgradedConsensusStateResponse) {
- option (google.api.http).get =
- "/ibc/core/client/v1/upgraded_consensus_states";
+ rpc UpgradedConsensusState(QueryUpgradedConsensusStateRequest) returns (QueryUpgradedConsensusStateResponse) {
+ option (google.api.http).get = "/ibc/core/client/v1/upgraded_consensus_states";
}
}
@@ -78,7 +73,7 @@ message QueryClientStateResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryClientStatesRequest is the request type for the Query/ClientStates RPC
@@ -92,10 +87,8 @@ message QueryClientStatesRequest {
// method.
message QueryClientStatesResponse {
// list of stored ClientStates of the chain.
- repeated IdentifiedClientState client_states = 1 [
- (gogoproto.nullable) = false,
- (gogoproto.castrepeated) = "IdentifiedClientStates"
- ];
+ repeated IdentifiedClientState client_states = 1
+ [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "IdentifiedClientStates"];
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
}
@@ -123,7 +116,7 @@ message QueryConsensusStateResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
@@ -139,12 +132,24 @@ message QueryConsensusStatesRequest {
// Query/ConsensusStates RPC method
message QueryConsensusStatesResponse {
// consensus states associated with the identifier
- repeated ConsensusStateWithHeight consensus_states = 1
- [ (gogoproto.nullable) = false ];
+ repeated ConsensusStateWithHeight consensus_states = 1 [(gogoproto.nullable) = false];
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
}
+// QueryClientStatusRequest is the request type for the Query/ClientStatus RPC
+// method
+message QueryClientStatusRequest {
+ // client unique identifier
+ string client_id = 1;
+}
+
+// QueryClientStatusResponse is the response type for the Query/ClientStatus RPC
+// method. It returns the current status of the IBC client.
+message QueryClientStatusResponse {
+ string status = 1;
+}
+
// QueryClientParamsRequest is the request type for the Query/ClientParams RPC
// method.
message QueryClientParamsRequest {}
@@ -158,7 +163,7 @@ message QueryClientParamsResponse {
// QueryUpgradedClientStateRequest is the request type for the
// Query/UpgradedClientState RPC method
-message QueryUpgradedClientStateRequest { }
+message QueryUpgradedClientStateRequest {}
// QueryUpgradedClientStateResponse is the response type for the
// Query/UpgradedClientState RPC method.
@@ -169,7 +174,7 @@ message QueryUpgradedClientStateResponse {
// QueryUpgradedConsensusStateRequest is the request type for the
// Query/UpgradedConsensusState RPC method
-message QueryUpgradedConsensusStateRequest { }
+message QueryUpgradedConsensusStateRequest {}
// QueryUpgradedConsensusStateResponse is the response type for the
// Query/UpgradedConsensusState RPC method.
diff --git a/proto/ibc/core/client/v1/tx.proto b/proto/ibc/core/client/v1/tx.proto
index 56f12911..35386adb 100644
--- a/proto/ibc/core/client/v1/tx.proto
+++ b/proto/ibc/core/client/v1/tx.proto
@@ -20,22 +20,19 @@ service Msg {
rpc UpgradeClient(MsgUpgradeClient) returns (MsgUpgradeClientResponse);
// SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.
- rpc SubmitMisbehaviour(MsgSubmitMisbehaviour)
- returns (MsgSubmitMisbehaviourResponse);
+ rpc SubmitMisbehaviour(MsgSubmitMisbehaviour) returns (MsgSubmitMisbehaviourResponse);
}
// MsgCreateClient defines a message to create an IBC client
message MsgCreateClient {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
// light client state
- google.protobuf.Any client_state = 1
- [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ google.protobuf.Any client_state = 1 [(gogoproto.moretags) = "yaml:\"client_state\""];
// consensus state associated with the client that corresponds to a given
// height.
- google.protobuf.Any consensus_state = 2
- [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
// signer address
string signer = 3;
}
@@ -46,11 +43,11 @@ message MsgCreateClientResponse {}
// MsgUpdateClient defines an sdk.Msg to update a IBC client state using
// the given header.
message MsgUpdateClient {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
// client unique identifier
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// header to update the light client
google.protobuf.Any header = 2;
// signer address
@@ -63,24 +60,20 @@ message MsgUpdateClientResponse {}
// MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
// state
message MsgUpgradeClient {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
// client unique identifier
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// upgraded client state
- google.protobuf.Any client_state = 2
- [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""];
// upgraded consensus state, only contains enough information to serve as a
// basis of trust in update logic
- google.protobuf.Any consensus_state = 3
- [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ google.protobuf.Any consensus_state = 3 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
// proof that old chain committed to new client
- bytes proof_upgrade_client = 4
- [ (gogoproto.moretags) = "yaml:\"proof_upgrade_client\"" ];
+ bytes proof_upgrade_client = 4 [(gogoproto.moretags) = "yaml:\"proof_upgrade_client\""];
// proof that old chain committed to new consensus state
- bytes proof_upgrade_consensus_state = 5
- [ (gogoproto.moretags) = "yaml:\"proof_upgrade_consensus_state\"" ];
+ bytes proof_upgrade_consensus_state = 5 [(gogoproto.moretags) = "yaml:\"proof_upgrade_consensus_state\""];
// signer address
string signer = 6;
}
@@ -91,11 +84,11 @@ message MsgUpgradeClientResponse {}
// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
// light client misbehaviour.
message MsgSubmitMisbehaviour {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
// client unique identifier
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// misbehaviour used for freezing the light client
google.protobuf.Any misbehaviour = 2;
// signer address
diff --git a/proto/ibc/core/commitment/v1/commitment.proto b/proto/ibc/core/commitment/v1/commitment.proto
index 4fc56fe8..47d82394 100644
--- a/proto/ibc/core/commitment/v1/commitment.proto
+++ b/proto/ibc/core/commitment/v1/commitment.proto
@@ -19,7 +19,7 @@ message MerkleRoot {
// The constructed key from the Path and the key will be append(Path.KeyPath,
// append(Path.KeyPrefix, key...))
message MerklePrefix {
- bytes key_prefix = 1 [ (gogoproto.moretags) = "yaml:\"key_prefix\"" ];
+ bytes key_prefix = 1 [(gogoproto.moretags) = "yaml:\"key_prefix\""];
}
// MerklePath is the path used to verify commitment proofs, which can be an
@@ -28,7 +28,7 @@ message MerklePrefix {
message MerklePath {
option (gogoproto.goproto_stringer) = false;
- repeated string key_path = 1 [ (gogoproto.moretags) = "yaml:\"key_path\"" ];
+ repeated string key_path = 1 [(gogoproto.moretags) = "yaml:\"key_path\""];
}
// MerkleProof is a wrapper type over a chain of CommitmentProofs.
@@ -36,4 +36,6 @@ message MerklePath {
// elements, verifiable in conjunction with a known commitment root. Proofs
// should be succinct.
// MerkleProofs are ordered from leaf-to-root
-message MerkleProof { repeated ics23.CommitmentProof proofs = 1; }
+message MerkleProof {
+ repeated ics23.CommitmentProof proofs = 1;
+}
diff --git a/proto/ibc/core/connection/v1/connection.proto b/proto/ibc/core/connection/v1/connection.proto
index c9112710..5b4e32bf 100644
--- a/proto/ibc/core/connection/v1/connection.proto
+++ b/proto/ibc/core/connection/v1/connection.proto
@@ -17,18 +17,18 @@ import "ibc/core/commitment/v1/commitment.proto";
message ConnectionEnd {
option (gogoproto.goproto_getters) = false;
// client associated with this connection.
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// IBC version which can be utilised to determine encodings or protocols for
// channels or packets utilising this connection.
repeated Version versions = 2;
// current state of the connection end.
State state = 3;
// counterparty chain associated with this connection.
- Counterparty counterparty = 4 [ (gogoproto.nullable) = false ];
+ Counterparty counterparty = 4 [(gogoproto.nullable) = false];
// delay period that must pass before a consensus state can be used for
// packet-verification NOTE: delay period logic is only implemented by some
// clients.
- uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
+ uint64 delay_period = 5 [(gogoproto.moretags) = "yaml:\"delay_period\""];
}
// IdentifiedConnection defines a connection with additional connection
@@ -36,18 +36,18 @@ message ConnectionEnd {
message IdentifiedConnection {
option (gogoproto.goproto_getters) = false;
// connection identifier.
- string id = 1 [ (gogoproto.moretags) = "yaml:\"id\"" ];
+ string id = 1 [(gogoproto.moretags) = "yaml:\"id\""];
// client associated with this connection.
- string client_id = 2 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 2 [(gogoproto.moretags) = "yaml:\"client_id\""];
// IBC version which can be utilised to determine encodings or protocols for
// channels or packets utilising this connection
repeated Version versions = 3;
// current state of the connection end.
State state = 4;
// counterparty chain associated with this connection.
- Counterparty counterparty = 5 [ (gogoproto.nullable) = false ];
+ Counterparty counterparty = 5 [(gogoproto.nullable) = false];
// delay period associated with this connection.
- uint64 delay_period = 6 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
+ uint64 delay_period = 6 [(gogoproto.moretags) = "yaml:\"delay_period\""];
}
// State defines if a connection is in one of the following states:
@@ -56,15 +56,14 @@ enum State {
option (gogoproto.goproto_enum_prefix) = false;
// Default State
- STATE_UNINITIALIZED_UNSPECIFIED = 0
- [ (gogoproto.enumvalue_customname) = "UNINITIALIZED" ];
+ STATE_UNINITIALIZED_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "UNINITIALIZED"];
// A connection end has just started the opening handshake.
- STATE_INIT = 1 [ (gogoproto.enumvalue_customname) = "INIT" ];
+ STATE_INIT = 1 [(gogoproto.enumvalue_customname) = "INIT"];
// A connection end has acknowledged the handshake step on the counterparty
// chain.
- STATE_TRYOPEN = 2 [ (gogoproto.enumvalue_customname) = "TRYOPEN" ];
+ STATE_TRYOPEN = 2 [(gogoproto.enumvalue_customname) = "TRYOPEN"];
// A connection end has completed the handshake.
- STATE_OPEN = 3 [ (gogoproto.enumvalue_customname) = "OPEN" ];
+ STATE_OPEN = 3 [(gogoproto.enumvalue_customname) = "OPEN"];
}
// Counterparty defines the counterparty chain associated with a connection end.
@@ -73,13 +72,12 @@ message Counterparty {
// identifies the client on the counterparty chain associated with a given
// connection.
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// identifies the connection end on the counterparty chain associated with a
// given connection.
- string connection_id = 2 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ string connection_id = 2 [(gogoproto.moretags) = "yaml:\"connection_id\""];
// commitment merkle prefix of the counterparty chain.
- ibc.core.commitment.v1.MerklePrefix prefix = 3
- [ (gogoproto.nullable) = false ];
+ ibc.core.commitment.v1.MerklePrefix prefix = 3 [(gogoproto.nullable) = false];
}
// ClientPaths define all the connection paths for a client state.
@@ -91,7 +89,7 @@ message ClientPaths {
// ConnectionPaths define all the connection paths for a given client state.
message ConnectionPaths {
// client state unique identifier
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// list of connection paths
repeated string paths = 2;
}
diff --git a/proto/ibc/core/connection/v1/genesis.proto b/proto/ibc/core/connection/v1/genesis.proto
index 8a9d9fd0..62296e1e 100644
--- a/proto/ibc/core/connection/v1/genesis.proto
+++ b/proto/ibc/core/connection/v1/genesis.proto
@@ -9,13 +9,9 @@ import "ibc/core/connection/v1/connection.proto";
// GenesisState defines the ibc connection submodule's genesis state.
message GenesisState {
- repeated IdentifiedConnection connections = 1
- [ (gogoproto.nullable) = false ];
- repeated ConnectionPaths client_connection_paths = 2 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"client_connection_paths\""
- ];
+ repeated IdentifiedConnection connections = 1 [(gogoproto.nullable) = false];
+ repeated ConnectionPaths client_connection_paths = 2
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"client_connection_paths\""];
// the sequence for the next generated connection identifier
- uint64 next_connection_sequence = 3
- [ (gogoproto.moretags) = "yaml:\"next_connection_sequence\"" ];
+ uint64 next_connection_sequence = 3 [(gogoproto.moretags) = "yaml:\"next_connection_sequence\""];
}
diff --git a/proto/ibc/core/connection/v1/query.proto b/proto/ibc/core/connection/v1/query.proto
index efd29c2a..ca90e0ee 100644
--- a/proto/ibc/core/connection/v1/query.proto
+++ b/proto/ibc/core/connection/v1/query.proto
@@ -15,8 +15,7 @@ import "google/protobuf/any.proto";
service Query {
// Connection queries an IBC connection end.
rpc Connection(QueryConnectionRequest) returns (QueryConnectionResponse) {
- option (google.api.http).get =
- "/ibc/core/connection/v1/connections/{connection_id}";
+ option (google.api.http).get = "/ibc/core/connection/v1/connections/{connection_id}";
}
// Connections queries all the IBC connections of a chain.
@@ -26,27 +25,21 @@ service Query {
// ClientConnections queries the connection paths associated with a client
// state.
- rpc ClientConnections(QueryClientConnectionsRequest)
- returns (QueryClientConnectionsResponse) {
- option (google.api.http).get =
- "/ibc/core/connection/v1/client_connections/{client_id}";
+ rpc ClientConnections(QueryClientConnectionsRequest) returns (QueryClientConnectionsResponse) {
+ option (google.api.http).get = "/ibc/core/connection/v1/client_connections/{client_id}";
}
// ConnectionClientState queries the client state associated with the
// connection.
- rpc ConnectionClientState(QueryConnectionClientStateRequest)
- returns (QueryConnectionClientStateResponse) {
- option (google.api.http).get =
- "/ibc/core/connection/v1/connections/{connection_id}/client_state";
+ rpc ConnectionClientState(QueryConnectionClientStateRequest) returns (QueryConnectionClientStateResponse) {
+ option (google.api.http).get = "/ibc/core/connection/v1/connections/{connection_id}/client_state";
}
// ConnectionConsensusState queries the consensus state associated with the
// connection.
- rpc ConnectionConsensusState(QueryConnectionConsensusStateRequest)
- returns (QueryConnectionConsensusStateResponse) {
- option (google.api.http).get =
- "/ibc/core/connection/v1/connections/{connection_id}/consensus_state/"
- "revision/{revision_number}/height/{revision_height}";
+ rpc ConnectionConsensusState(QueryConnectionConsensusStateRequest) returns (QueryConnectionConsensusStateResponse) {
+ option (google.api.http).get = "/ibc/core/connection/v1/connections/{connection_id}/consensus_state/"
+ "revision/{revision_number}/height/{revision_height}";
}
}
@@ -66,7 +59,7 @@ message QueryConnectionResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryConnectionsRequest is the request type for the Query/Connections RPC
@@ -83,7 +76,7 @@ message QueryConnectionsResponse {
// pagination response
cosmos.base.query.v1beta1.PageResponse pagination = 2;
// query block height
- ibc.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 3 [(gogoproto.nullable) = false];
}
// QueryClientConnectionsRequest is the request type for the
@@ -101,14 +94,14 @@ message QueryClientConnectionsResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was generated
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryConnectionClientStateRequest is the request type for the
// Query/ConnectionClientState RPC method
message QueryConnectionClientStateRequest {
// connection identifier
- string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ string connection_id = 1 [(gogoproto.moretags) = "yaml:\"connection_id\""];
}
// QueryConnectionClientStateResponse is the response type for the
@@ -119,14 +112,14 @@ message QueryConnectionClientStateResponse {
// merkle proof of existence
bytes proof = 2;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 3 [(gogoproto.nullable) = false];
}
// QueryConnectionConsensusStateRequest is the request type for the
// Query/ConnectionConsensusState RPC method
message QueryConnectionConsensusStateRequest {
// connection identifier
- string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ string connection_id = 1 [(gogoproto.moretags) = "yaml:\"connection_id\""];
uint64 revision_number = 2;
uint64 revision_height = 3;
}
@@ -141,5 +134,5 @@ message QueryConnectionConsensusStateResponse {
// merkle proof of existence
bytes proof = 3;
// height at which the proof was retrieved
- ibc.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height proof_height = 4 [(gogoproto.nullable) = false];
}
diff --git a/proto/ibc/core/connection/v1/tx.proto b/proto/ibc/core/connection/v1/tx.proto
index 2a71469c..6318f9fd 100644
--- a/proto/ibc/core/connection/v1/tx.proto
+++ b/proto/ibc/core/connection/v1/tx.proto
@@ -12,34 +12,30 @@ import "ibc/core/connection/v1/connection.proto";
// Msg defines the ibc/connection Msg service.
service Msg {
// ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.
- rpc ConnectionOpenInit(MsgConnectionOpenInit)
- returns (MsgConnectionOpenInitResponse);
+ rpc ConnectionOpenInit(MsgConnectionOpenInit) returns (MsgConnectionOpenInitResponse);
// ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.
- rpc ConnectionOpenTry(MsgConnectionOpenTry)
- returns (MsgConnectionOpenTryResponse);
+ rpc ConnectionOpenTry(MsgConnectionOpenTry) returns (MsgConnectionOpenTryResponse);
// ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.
- rpc ConnectionOpenAck(MsgConnectionOpenAck)
- returns (MsgConnectionOpenAckResponse);
+ rpc ConnectionOpenAck(MsgConnectionOpenAck) returns (MsgConnectionOpenAckResponse);
// ConnectionOpenConfirm defines a rpc handler method for
// MsgConnectionOpenConfirm.
- rpc ConnectionOpenConfirm(MsgConnectionOpenConfirm)
- returns (MsgConnectionOpenConfirmResponse);
+ rpc ConnectionOpenConfirm(MsgConnectionOpenConfirm) returns (MsgConnectionOpenConfirmResponse);
}
// MsgConnectionOpenInit defines the msg sent by an account on Chain A to
// initialize a connection with Chain B.
message MsgConnectionOpenInit {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
- Counterparty counterparty = 2 [ (gogoproto.nullable) = false ];
- Version version = 3;
- uint64 delay_period = 4 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
- string signer = 5;
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
+ Counterparty counterparty = 2 [(gogoproto.nullable) = false];
+ Version version = 3;
+ uint64 delay_period = 4 [(gogoproto.moretags) = "yaml:\"delay_period\""];
+ string signer = 5;
}
// MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
@@ -49,36 +45,28 @@ message MsgConnectionOpenInitResponse {}
// MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
// connection on Chain B.
message MsgConnectionOpenTry {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
// in the case of crossing hello's, when both chains call OpenInit, we need
// the connection identifier of the previous connection in state INIT
- string previous_connection_id = 2
- [ (gogoproto.moretags) = "yaml:\"previous_connection_id\"" ];
- google.protobuf.Any client_state = 3
- [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
- Counterparty counterparty = 4 [ (gogoproto.nullable) = false ];
- uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ];
- repeated Version counterparty_versions = 6
- [ (gogoproto.moretags) = "yaml:\"counterparty_versions\"" ];
- ibc.core.client.v1.Height proof_height = 7 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ string previous_connection_id = 2 [(gogoproto.moretags) = "yaml:\"previous_connection_id\""];
+ google.protobuf.Any client_state = 3 [(gogoproto.moretags) = "yaml:\"client_state\""];
+ Counterparty counterparty = 4 [(gogoproto.nullable) = false];
+ uint64 delay_period = 5 [(gogoproto.moretags) = "yaml:\"delay_period\""];
+ repeated Version counterparty_versions = 6 [(gogoproto.moretags) = "yaml:\"counterparty_versions\""];
+ ibc.core.client.v1.Height proof_height = 7
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
// proof of the initialization the connection on Chain A: `UNITIALIZED ->
// INIT`
- bytes proof_init = 8 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ];
+ bytes proof_init = 8 [(gogoproto.moretags) = "yaml:\"proof_init\""];
// proof of client state included in message
- bytes proof_client = 9 [ (gogoproto.moretags) = "yaml:\"proof_client\"" ];
+ bytes proof_client = 9 [(gogoproto.moretags) = "yaml:\"proof_client\""];
// proof of client consensus state
- bytes proof_consensus = 10
- [ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ];
- ibc.core.client.v1.Height consensus_height = 11 [
- (gogoproto.moretags) = "yaml:\"consensus_height\"",
- (gogoproto.nullable) = false
- ];
+ bytes proof_consensus = 10 [(gogoproto.moretags) = "yaml:\"proof_consensus\""];
+ ibc.core.client.v1.Height consensus_height = 11
+ [(gogoproto.moretags) = "yaml:\"consensus_height\"", (gogoproto.nullable) = false];
string signer = 12;
}
@@ -88,31 +76,24 @@ message MsgConnectionOpenTryResponse {}
// MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
// acknowledge the change of connection state to TRYOPEN on Chain B.
message MsgConnectionOpenAck {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
- string counterparty_connection_id = 2
- [ (gogoproto.moretags) = "yaml:\"counterparty_connection_id\"" ];
- Version version = 3;
- google.protobuf.Any client_state = 4
- [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
- ibc.core.client.v1.Height proof_height = 5 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ string connection_id = 1 [(gogoproto.moretags) = "yaml:\"connection_id\""];
+ string counterparty_connection_id = 2 [(gogoproto.moretags) = "yaml:\"counterparty_connection_id\""];
+ Version version = 3;
+ google.protobuf.Any client_state = 4 [(gogoproto.moretags) = "yaml:\"client_state\""];
+ ibc.core.client.v1.Height proof_height = 5
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
// proof of the initialization the connection on Chain B: `UNITIALIZED ->
// TRYOPEN`
- bytes proof_try = 6 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ];
+ bytes proof_try = 6 [(gogoproto.moretags) = "yaml:\"proof_try\""];
// proof of client state included in message
- bytes proof_client = 7 [ (gogoproto.moretags) = "yaml:\"proof_client\"" ];
+ bytes proof_client = 7 [(gogoproto.moretags) = "yaml:\"proof_client\""];
// proof of client consensus state
- bytes proof_consensus = 8
- [ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ];
- ibc.core.client.v1.Height consensus_height = 9 [
- (gogoproto.moretags) = "yaml:\"consensus_height\"",
- (gogoproto.nullable) = false
- ];
+ bytes proof_consensus = 8 [(gogoproto.moretags) = "yaml:\"proof_consensus\""];
+ ibc.core.client.v1.Height consensus_height = 9
+ [(gogoproto.moretags) = "yaml:\"consensus_height\"", (gogoproto.nullable) = false];
string signer = 10;
}
@@ -122,16 +103,14 @@ message MsgConnectionOpenAckResponse {}
// MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
// acknowledge the change of connection state to OPEN on Chain A.
message MsgConnectionOpenConfirm {
- option (gogoproto.equal) = false;
+ option (gogoproto.equal) = false;
option (gogoproto.goproto_getters) = false;
- string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ];
+ string connection_id = 1 [(gogoproto.moretags) = "yaml:\"connection_id\""];
// proof for the change of the connection state on Chain A: `INIT -> OPEN`
- bytes proof_ack = 2 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ];
- ibc.core.client.v1.Height proof_height = 3 [
- (gogoproto.moretags) = "yaml:\"proof_height\"",
- (gogoproto.nullable) = false
- ];
+ bytes proof_ack = 2 [(gogoproto.moretags) = "yaml:\"proof_ack\""];
+ ibc.core.client.v1.Height proof_height = 3
+ [(gogoproto.moretags) = "yaml:\"proof_height\"", (gogoproto.nullable) = false];
string signer = 4;
}
diff --git a/proto/ibc/core/types/v1/genesis.proto b/proto/ibc/core/types/v1/genesis.proto
index 2b5f9cd2..2451da32 100644
--- a/proto/ibc/core/types/v1/genesis.proto
+++ b/proto/ibc/core/types/v1/genesis.proto
@@ -12,18 +12,12 @@ import "ibc/core/channel/v1/genesis.proto";
// GenesisState defines the ibc module's genesis state.
message GenesisState {
// ICS002 - Clients genesis state
- ibc.core.client.v1.GenesisState client_genesis = 1 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"client_genesis\""
- ];
+ ibc.core.client.v1.GenesisState client_genesis = 1
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"client_genesis\""];
// ICS003 - Connections genesis state
- ibc.core.connection.v1.GenesisState connection_genesis = 2 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"connection_genesis\""
- ];
+ ibc.core.connection.v1.GenesisState connection_genesis = 2
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"connection_genesis\""];
// ICS004 - Channel genesis state
- ibc.core.channel.v1.GenesisState channel_genesis = 3 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"channel_genesis\""
- ];
+ ibc.core.channel.v1.GenesisState channel_genesis = 3
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"channel_genesis\""];
}
diff --git a/proto/ibc/lightclients/localhost/v1/localhost.proto b/proto/ibc/lightclients/localhost/v1/localhost.proto
index e090e0de..77e17bc3 100644
--- a/proto/ibc/lightclients/localhost/v1/localhost.proto
+++ b/proto/ibc/lightclients/localhost/v1/localhost.proto
@@ -12,7 +12,7 @@ import "ibc/core/client/v1/client.proto";
message ClientState {
option (gogoproto.goproto_getters) = false;
// self chain ID
- string chain_id = 1 [ (gogoproto.moretags) = "yaml:\"chain_id\"" ];
+ string chain_id = 1 [(gogoproto.moretags) = "yaml:\"chain_id\""];
// self latest block height
- ibc.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ];
+ ibc.core.client.v1.Height height = 2 [(gogoproto.nullable) = false];
}
diff --git a/proto/ibc/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v1/solomachine.proto
index e7f2b022..68f0c1ea 100644
--- a/proto/ibc/lightclients/solomachine/v1/solomachine.proto
+++ b/proto/ibc/lightclients/solomachine/v1/solomachine.proto
@@ -16,14 +16,11 @@ message ClientState {
// latest sequence of the client state
uint64 sequence = 1;
// frozen sequence of the solo machine
- uint64 frozen_sequence = 2
- [ (gogoproto.moretags) = "yaml:\"frozen_sequence\"" ];
- ConsensusState consensus_state = 3
- [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ uint64 frozen_sequence = 2 [(gogoproto.moretags) = "yaml:\"frozen_sequence\""];
+ ConsensusState consensus_state = 3 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
// when set to true, will allow governance to update a solo machine client.
// The client will be unfrozen if it is frozen.
- bool allow_update_after_proposal = 4
- [ (gogoproto.moretags) = "yaml:\"allow_update_after_proposal\"" ];
+ bool allow_update_after_proposal = 4 [(gogoproto.moretags) = "yaml:\"allow_update_after_proposal\""];
}
// ConsensusState defines a solo machine consensus state. The sequence of a
@@ -32,67 +29,62 @@ message ClientState {
message ConsensusState {
option (gogoproto.goproto_getters) = false;
// public key of the solo machine
- google.protobuf.Any public_key = 1
- [ (gogoproto.moretags) = "yaml:\"public_key\"" ];
+ google.protobuf.Any public_key = 1 [(gogoproto.moretags) = "yaml:\"public_key\""];
// diversifier allows the same public key to be re-used across different solo
// machine clients (potentially on different chains) without being considered
// misbehaviour.
string diversifier = 2;
- uint64 timestamp = 3;
+ uint64 timestamp = 3;
}
// Header defines a solo machine consensus header
message Header {
option (gogoproto.goproto_getters) = false;
// sequence to update solo machine public key at
- uint64 sequence = 1;
- uint64 timestamp = 2;
- bytes signature = 3;
- google.protobuf.Any new_public_key = 4
- [ (gogoproto.moretags) = "yaml:\"new_public_key\"" ];
- string new_diversifier = 5
- [ (gogoproto.moretags) = "yaml:\"new_diversifier\"" ];
+ uint64 sequence = 1;
+ uint64 timestamp = 2;
+ bytes signature = 3;
+ google.protobuf.Any new_public_key = 4 [(gogoproto.moretags) = "yaml:\"new_public_key\""];
+ string new_diversifier = 5 [(gogoproto.moretags) = "yaml:\"new_diversifier\""];
}
// Misbehaviour defines misbehaviour for a solo machine which consists
// of a sequence and two signatures over different messages at that sequence.
message Misbehaviour {
option (gogoproto.goproto_getters) = false;
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
- uint64 sequence = 2;
- SignatureAndData signature_one = 3
- [ (gogoproto.moretags) = "yaml:\"signature_one\"" ];
- SignatureAndData signature_two = 4
- [ (gogoproto.moretags) = "yaml:\"signature_two\"" ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
+ uint64 sequence = 2;
+ SignatureAndData signature_one = 3 [(gogoproto.moretags) = "yaml:\"signature_one\""];
+ SignatureAndData signature_two = 4 [(gogoproto.moretags) = "yaml:\"signature_two\""];
}
// SignatureAndData contains a signature and the data signed over to create that
// signature.
message SignatureAndData {
option (gogoproto.goproto_getters) = false;
- bytes signature = 1;
- DataType data_type = 2 [ (gogoproto.moretags) = "yaml:\"data_type\"" ];
- bytes data = 3;
- uint64 timestamp = 4;
+ bytes signature = 1;
+ DataType data_type = 2 [(gogoproto.moretags) = "yaml:\"data_type\""];
+ bytes data = 3;
+ uint64 timestamp = 4;
}
// TimestampedSignatureData contains the signature data and the timestamp of the
// signature.
message TimestampedSignatureData {
option (gogoproto.goproto_getters) = false;
- bytes signature_data = 1 [ (gogoproto.moretags) = "yaml:\"signature_data\"" ];
- uint64 timestamp = 2;
+ bytes signature_data = 1 [(gogoproto.moretags) = "yaml:\"signature_data\""];
+ uint64 timestamp = 2;
}
// SignBytes defines the signed bytes used for signature verification.
message SignBytes {
option (gogoproto.goproto_getters) = false;
- uint64 sequence = 1;
- uint64 timestamp = 2;
+ uint64 sequence = 1;
+ uint64 timestamp = 2;
string diversifier = 3;
// type of the data used
- DataType data_type = 4 [ (gogoproto.moretags) = "yaml:\"data_type\"" ];
+ DataType data_type = 4 [(gogoproto.moretags) = "yaml:\"data_type\""];
// marshaled data
bytes data = 5;
}
@@ -103,32 +95,25 @@ enum DataType {
option (gogoproto.goproto_enum_prefix) = false;
// Default State
- DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0
- [ (gogoproto.enumvalue_customname) = "UNSPECIFIED" ];
+ DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "UNSPECIFIED"];
// Data type for client state verification
- DATA_TYPE_CLIENT_STATE = 1 [ (gogoproto.enumvalue_customname) = "CLIENT" ];
+ DATA_TYPE_CLIENT_STATE = 1 [(gogoproto.enumvalue_customname) = "CLIENT"];
// Data type for consensus state verification
- DATA_TYPE_CONSENSUS_STATE = 2
- [ (gogoproto.enumvalue_customname) = "CONSENSUS" ];
+ DATA_TYPE_CONSENSUS_STATE = 2 [(gogoproto.enumvalue_customname) = "CONSENSUS"];
// Data type for connection state verification
- DATA_TYPE_CONNECTION_STATE = 3
- [ (gogoproto.enumvalue_customname) = "CONNECTION" ];
+ DATA_TYPE_CONNECTION_STATE = 3 [(gogoproto.enumvalue_customname) = "CONNECTION"];
// Data type for channel state verification
- DATA_TYPE_CHANNEL_STATE = 4 [ (gogoproto.enumvalue_customname) = "CHANNEL" ];
+ DATA_TYPE_CHANNEL_STATE = 4 [(gogoproto.enumvalue_customname) = "CHANNEL"];
// Data type for packet commitment verification
- DATA_TYPE_PACKET_COMMITMENT = 5
- [ (gogoproto.enumvalue_customname) = "PACKETCOMMITMENT" ];
+ DATA_TYPE_PACKET_COMMITMENT = 5 [(gogoproto.enumvalue_customname) = "PACKETCOMMITMENT"];
// Data type for packet acknowledgement verification
- DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6
- [ (gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT" ];
+ DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6 [(gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT"];
// Data type for packet receipt absence verification
- DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7
- [ (gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE" ];
+ DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7 [(gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE"];
// Data type for next sequence recv verification
- DATA_TYPE_NEXT_SEQUENCE_RECV = 8
- [ (gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV" ];
+ DATA_TYPE_NEXT_SEQUENCE_RECV = 8 [(gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV"];
// Data type for header verification
- DATA_TYPE_HEADER = 9 [ (gogoproto.enumvalue_customname) = "HEADER" ];
+ DATA_TYPE_HEADER = 9 [(gogoproto.enumvalue_customname) = "HEADER"];
}
// HeaderData returns the SignBytes data for update verification.
@@ -136,20 +121,17 @@ message HeaderData {
option (gogoproto.goproto_getters) = false;
// header public key
- google.protobuf.Any new_pub_key = 1
- [ (gogoproto.moretags) = "yaml:\"new_pub_key\"" ];
+ google.protobuf.Any new_pub_key = 1 [(gogoproto.moretags) = "yaml:\"new_pub_key\""];
// header diversifier
- string new_diversifier = 2
- [ (gogoproto.moretags) = "yaml:\"new_diversifier\"" ];
+ string new_diversifier = 2 [(gogoproto.moretags) = "yaml:\"new_diversifier\""];
}
// ClientStateData returns the SignBytes data for client state verification.
message ClientStateData {
option (gogoproto.goproto_getters) = false;
- bytes path = 1;
- google.protobuf.Any client_state = 2
- [ (gogoproto.moretags) = "yaml:\"client_state\"" ];
+ bytes path = 1;
+ google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""];
}
// ConsensusStateData returns the SignBytes data for consensus state
@@ -157,9 +139,8 @@ message ClientStateData {
message ConsensusStateData {
option (gogoproto.goproto_getters) = false;
- bytes path = 1;
- google.protobuf.Any consensus_state = 2
- [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ];
+ bytes path = 1;
+ google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
}
// ConnectionStateData returns the SignBytes data for connection state
@@ -167,7 +148,7 @@ message ConsensusStateData {
message ConnectionStateData {
option (gogoproto.goproto_getters) = false;
- bytes path = 1;
+ bytes path = 1;
ibc.core.connection.v1.ConnectionEnd connection = 2;
}
@@ -176,31 +157,33 @@ message ConnectionStateData {
message ChannelStateData {
option (gogoproto.goproto_getters) = false;
- bytes path = 1;
+ bytes path = 1;
ibc.core.channel.v1.Channel channel = 2;
}
// PacketCommitmentData returns the SignBytes data for packet commitment
// verification.
message PacketCommitmentData {
- bytes path = 1;
+ bytes path = 1;
bytes commitment = 2;
}
// PacketAcknowledgementData returns the SignBytes data for acknowledgement
// verification.
message PacketAcknowledgementData {
- bytes path = 1;
+ bytes path = 1;
bytes acknowledgement = 2;
}
// PacketReceiptAbsenceData returns the SignBytes data for
// packet receipt absence verification.
-message PacketReceiptAbsenceData { bytes path = 1; }
+message PacketReceiptAbsenceData {
+ bytes path = 1;
+}
// NextSequenceRecvData returns the SignBytes data for verification of the next
// sequence to be received.
message NextSequenceRecvData {
- bytes path = 1;
- uint64 next_seq_recv = 2 [ (gogoproto.moretags) = "yaml:\"next_seq_recv\"" ];
+ bytes path = 1;
+ uint64 next_seq_recv = 2 [(gogoproto.moretags) = "yaml:\"next_seq_recv\""];
}
diff --git a/proto/ibc/lightclients/tendermint/v1/tendermint.proto b/proto/ibc/lightclients/tendermint/v1/tendermint.proto
index de59589b..17a6cce4 100644
--- a/proto/ibc/lightclients/tendermint/v1/tendermint.proto
+++ b/proto/ibc/lightclients/tendermint/v1/tendermint.proto
@@ -18,44 +18,30 @@ import "gogoproto/gogo.proto";
message ClientState {
option (gogoproto.goproto_getters) = false;
- string chain_id = 1;
- Fraction trust_level = 2 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"trust_level\""
- ];
+ string chain_id = 1;
+ Fraction trust_level = 2 [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"trust_level\""];
// duration of the period since the LastestTimestamp during which the
// submitted headers are valid for upgrade
- google.protobuf.Duration trusting_period = 3 [
- (gogoproto.nullable) = false,
- (gogoproto.stdduration) = true,
- (gogoproto.moretags) = "yaml:\"trusting_period\""
- ];
+ google.protobuf.Duration trusting_period = 3
+ [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.moretags) = "yaml:\"trusting_period\""];
// duration of the staking unbonding period
google.protobuf.Duration unbonding_period = 4 [
- (gogoproto.nullable) = false,
+ (gogoproto.nullable) = false,
(gogoproto.stdduration) = true,
- (gogoproto.moretags) = "yaml:\"unbonding_period\""
+ (gogoproto.moretags) = "yaml:\"unbonding_period\""
];
// defines how much new (untrusted) header's Time can drift into the future.
- google.protobuf.Duration max_clock_drift = 5 [
- (gogoproto.nullable) = false,
- (gogoproto.stdduration) = true,
- (gogoproto.moretags) = "yaml:\"max_clock_drift\""
- ];
+ google.protobuf.Duration max_clock_drift = 5
+ [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.moretags) = "yaml:\"max_clock_drift\""];
// Block height when the client was frozen due to a misbehaviour
- ibc.core.client.v1.Height frozen_height = 6 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"frozen_height\""
- ];
+ ibc.core.client.v1.Height frozen_height = 6
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"frozen_height\""];
// Latest height the client was updated to
- ibc.core.client.v1.Height latest_height = 7 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"latest_height\""
- ];
+ ibc.core.client.v1.Height latest_height = 7
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"latest_height\""];
// Proof specifications used in verifying counterparty state
- repeated ics23.ProofSpec proof_specs = 8
- [ (gogoproto.moretags) = "yaml:\"proof_specs\"" ];
+ repeated ics23.ProofSpec proof_specs = 8 [(gogoproto.moretags) = "yaml:\"proof_specs\""];
// Path at which next upgraded client will be committed.
// Each element corresponds to the key for a single CommitmentProof in the
@@ -64,17 +50,14 @@ message ClientState {
// under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using
// the default upgrade module, upgrade_path should be []string{"upgrade",
// "upgradedIBCState"}`
- repeated string upgrade_path = 9
- [ (gogoproto.moretags) = "yaml:\"upgrade_path\"" ];
+ repeated string upgrade_path = 9 [(gogoproto.moretags) = "yaml:\"upgrade_path\""];
// This flag, when set to true, will allow governance to recover a client
// which has expired
- bool allow_update_after_expiry = 10
- [ (gogoproto.moretags) = "yaml:\"allow_update_after_expiry\"" ];
+ bool allow_update_after_expiry = 10 [(gogoproto.moretags) = "yaml:\"allow_update_after_expiry\""];
// This flag, when set to true, will allow governance to unfreeze a client
// whose chain has experienced a misbehaviour event
- bool allow_update_after_misbehaviour = 11
- [ (gogoproto.moretags) = "yaml:\"allow_update_after_misbehaviour\"" ];
+ bool allow_update_after_misbehaviour = 11 [(gogoproto.moretags) = "yaml:\"allow_update_after_misbehaviour\""];
}
// ConsensusState defines the consensus state from Tendermint.
@@ -83,13 +66,11 @@ message ConsensusState {
// timestamp that corresponds to the block height in which the ConsensusState
// was stored.
- google.protobuf.Timestamp timestamp = 1
- [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ];
+ google.protobuf.Timestamp timestamp = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true];
// commitment root (i.e app hash)
- ibc.core.commitment.v1.MerkleRoot root = 2 [ (gogoproto.nullable) = false ];
- bytes next_validators_hash = 3 [
- (gogoproto.casttype) =
- "github.com/tendermint/tendermint/libs/bytes.HexBytes",
+ ibc.core.commitment.v1.MerkleRoot root = 2 [(gogoproto.nullable) = false];
+ bytes next_validators_hash = 3 [
+ (gogoproto.casttype) = "github.com/tendermint/tendermint/libs/bytes.HexBytes",
(gogoproto.moretags) = "yaml:\"next_validators_hash\""
];
}
@@ -99,15 +80,9 @@ message ConsensusState {
message Misbehaviour {
option (gogoproto.goproto_getters) = false;
- string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ];
- Header header_1 = 2 [
- (gogoproto.customname) = "Header1",
- (gogoproto.moretags) = "yaml:\"header_1\""
- ];
- Header header_2 = 3 [
- (gogoproto.customname) = "Header2",
- (gogoproto.moretags) = "yaml:\"header_2\""
- ];
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
+ Header header_1 = 2 [(gogoproto.customname) = "Header1", (gogoproto.moretags) = "yaml:\"header_1\""];
+ Header header_2 = 3 [(gogoproto.customname) = "Header2", (gogoproto.moretags) = "yaml:\"header_2\""];
}
// Header defines the Tendermint client consensus Header.
@@ -123,24 +98,18 @@ message Misbehaviour {
// hash to TrustedConsensusState.NextValidatorsHash since that is the last
// trusted validator set at the TrustedHeight.
message Header {
- .tendermint.types.SignedHeader signed_header = 1 [
- (gogoproto.embed) = true,
- (gogoproto.moretags) = "yaml:\"signed_header\""
- ];
+ .tendermint.types.SignedHeader signed_header = 1
+ [(gogoproto.embed) = true, (gogoproto.moretags) = "yaml:\"signed_header\""];
- .tendermint.types.ValidatorSet validator_set = 2
- [ (gogoproto.moretags) = "yaml:\"validator_set\"" ];
- ibc.core.client.v1.Height trusted_height = 3 [
- (gogoproto.nullable) = false,
- (gogoproto.moretags) = "yaml:\"trusted_height\""
- ];
- .tendermint.types.ValidatorSet trusted_validators = 4
- [ (gogoproto.moretags) = "yaml:\"trusted_validators\"" ];
+ .tendermint.types.ValidatorSet validator_set = 2 [(gogoproto.moretags) = "yaml:\"validator_set\""];
+ ibc.core.client.v1.Height trusted_height = 3
+ [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"trusted_height\""];
+ .tendermint.types.ValidatorSet trusted_validators = 4 [(gogoproto.moretags) = "yaml:\"trusted_validators\""];
}
// Fraction defines the protobuf message type for tmmath.Fraction that only
// supports positive values.
message Fraction {
- uint64 numerator = 1;
+ uint64 numerator = 1;
uint64 denominator = 2;
}
diff --git a/testing/chain.go b/testing/chain.go
index 19ee2183..92228eed 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -313,9 +313,17 @@ func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix {
// ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the
// light client on the source chain.
func (chain *TestChain) ConstructUpdateTMClientHeader(counterparty *TestChain, clientID string) (*ibctmtypes.Header, error) {
+ return chain.ConstructUpdateTMClientHeaderWithTrustedHeight(counterparty, clientID, clienttypes.ZeroHeight())
+}
+
+// ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the
+// light client on the source chain.
+func (chain *TestChain) ConstructUpdateTMClientHeaderWithTrustedHeight(counterparty *TestChain, clientID string, trustedHeight clienttypes.Height) (*ibctmtypes.Header, error) {
header := counterparty.LastHeader
- // Relayer must query for LatestHeight on client to get TrustedHeight
- trustedHeight := chain.GetClientState(clientID).GetLatestHeight().(clienttypes.Height)
+ // Relayer must query for LatestHeight on client to get TrustedHeight if the trusted height is not set
+ if trustedHeight.IsZero() {
+ trustedHeight = chain.GetClientState(clientID).GetLatestHeight().(clienttypes.Height)
+ }
var (
tmTrustedVals *tmtypes.ValidatorSet
ok bool
diff --git a/testing/endpoint.go b/testing/endpoint.go
index 3a9e8dbd..e32d0cdf 100644
--- a/testing/endpoint.go
+++ b/testing/endpoint.go
@@ -433,6 +433,25 @@ func (endpoint *Endpoint) GetClientState() exported.ClientState {
return endpoint.Chain.GetClientState(endpoint.ClientID)
}
+// SetClientState sets the client state for this endpoint.
+func (endpoint *Endpoint) SetClientState(clientState exported.ClientState) {
+ endpoint.Chain.App.GetIBCKeeper().ClientKeeper.SetClientState(endpoint.Chain.GetContext(), endpoint.ClientID, clientState)
+}
+
+// GetConsensusState retrieves the Consensus State for this endpoint at the provided height.
+// The consensus state is expected to exist otherwise testing will fail.
+func (endpoint *Endpoint) GetConsensusState(height exported.Height) exported.ConsensusState {
+ consensusState, found := endpoint.Chain.GetConsensusState(endpoint.ClientID, height)
+ require.True(endpoint.Chain.t, found)
+
+ return consensusState
+}
+
+// SetConsensusState sets the consensus state for this endpoint.
+func (endpoint *Endpoint) SetConsensusState(consensusState exported.ConsensusState, height exported.Height) {
+ endpoint.Chain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(endpoint.Chain.GetContext(), endpoint.ClientID, height, consensusState)
+}
+
// GetConnection retrieves an IBC Connection for the endpoint. The
// connection is expected to exist otherwise testing will fail.
func (endpoint *Endpoint) GetConnection() connectiontypes.ConnectionEnd {
@@ -442,6 +461,11 @@ func (endpoint *Endpoint) GetConnection() connectiontypes.ConnectionEnd {
return connection
}
+// SetConnection sets the connection for this endpoint.
+func (endpoint *Endpoint) SetConnection(connection connectiontypes.ConnectionEnd) {
+ endpoint.Chain.App.GetIBCKeeper().ConnectionKeeper.SetConnection(endpoint.Chain.GetContext(), endpoint.ConnectionID, connection)
+}
+
// GetChannel retrieves an IBC Channel for the endpoint. The channel
// is expected to exist otherwise testing will fail.
func (endpoint *Endpoint) GetChannel() channeltypes.Channel {
@@ -451,6 +475,11 @@ func (endpoint *Endpoint) GetChannel() channeltypes.Channel {
return channel
}
+// SetChannel sets the channel for this endpoint.
+func (endpoint *Endpoint) SetChannel(channel channeltypes.Channel) {
+ endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SetChannel(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID, channel)
+}
+
// QueryClientStateProof performs and abci query for a client stat associated
// with this endpoint and returns the ClientState along with the proof.
func (endpoint *Endpoint) QueryClientStateProof() (exported.ClientState, []byte) {
diff --git a/testing/values.go b/testing/values.go
index d71356ec..5ba3ab1d 100644
--- a/testing/values.go
+++ b/testing/values.go
@@ -17,6 +17,7 @@ import (
)
const (
+ FirstClientID = "07-tendermint-0"
FirstChannelID = "channel-0"
FirstConnectionID = "connection-0"
From e012a4af5614f8774bcb595962012455667db2cf Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 4 May 2021 12:13:27 +0200
Subject: [PATCH 046/393] ibc-go documentation website (#146)
* initial staging for ibc-go documentation
* fix up documentation site
* fix links, protobuf docs page
* rename custom to apps, fix link
---
docs/.vuepress/config.js | 179 +
.../public/android-chrome-192x192.png | Bin 0 -> 4110 bytes
.../public/android-chrome-256x256.png | Bin 0 -> 5421 bytes
.../public/apple-touch-icon-precomposed.png | Bin 0 -> 1473 bytes
docs/.vuepress/public/apple-touch-icon.png | Bin 0 -> 3744 bytes
docs/.vuepress/public/browserconfig.xml | 9 +
docs/.vuepress/public/favicon-16x16.png | Bin 0 -> 632 bytes
docs/.vuepress/public/favicon-32x32.png | Bin 0 -> 942 bytes
docs/.vuepress/public/favicon-svg.svg | 21 +
docs/.vuepress/public/logo-bw.svg | 8 +
docs/.vuepress/public/logo.svg | 18 +
docs/.vuepress/public/mstile-150x150.png | Bin 0 -> 3039 bytes
docs/.vuepress/public/safari-pinned-tab.svg | 25 +
docs/.vuepress/public/site.webmanifest | 19 +
docs/DOCS_README.md | 124 +
docs/{spec.md => OLD_README.md} | 0
docs/README.md | 17 +-
docs/{custom.md => ibc/apps.md} | 2 +-
docs/{ => ibc}/integration.md | 0
docs/{ => ibc}/overview.md | 4 +-
docs/{ => ibc}/proposals.md | 0
docs/ibc/proto-docs.md | 11126 +++----
docs/{ => ibc}/relayer.md | 0
docs/{ => ibc}/upgrades/README.md | 0
docs/{ => ibc}/upgrades/developer-guide.md | 0
docs/{ => ibc}/upgrades/quick-guide.md | 0
docs/migrations/ibc-migration-043.md | 8 +-
docs/package-lock.json | 24219 ++++++++++++++++
docs/package.json | 18 +
docs/post.sh | 3 +
docs/protodoc-markdown.tmpl | 105 +
scripts/protocgen.sh | 10 +-
32 files changed, 28138 insertions(+), 7777 deletions(-)
create mode 100644 docs/.vuepress/config.js
create mode 100644 docs/.vuepress/public/android-chrome-192x192.png
create mode 100644 docs/.vuepress/public/android-chrome-256x256.png
create mode 100644 docs/.vuepress/public/apple-touch-icon-precomposed.png
create mode 100644 docs/.vuepress/public/apple-touch-icon.png
create mode 100644 docs/.vuepress/public/browserconfig.xml
create mode 100644 docs/.vuepress/public/favicon-16x16.png
create mode 100644 docs/.vuepress/public/favicon-32x32.png
create mode 100644 docs/.vuepress/public/favicon-svg.svg
create mode 100644 docs/.vuepress/public/logo-bw.svg
create mode 100644 docs/.vuepress/public/logo.svg
create mode 100644 docs/.vuepress/public/mstile-150x150.png
create mode 100644 docs/.vuepress/public/safari-pinned-tab.svg
create mode 100644 docs/.vuepress/public/site.webmanifest
create mode 100644 docs/DOCS_README.md
rename docs/{spec.md => OLD_README.md} (100%)
rename docs/{custom.md => ibc/apps.md} (99%)
rename docs/{ => ibc}/integration.md (100%)
rename docs/{ => ibc}/overview.md (99%)
rename docs/{ => ibc}/proposals.md (100%)
rename docs/{ => ibc}/relayer.md (100%)
rename docs/{ => ibc}/upgrades/README.md (100%)
rename docs/{ => ibc}/upgrades/developer-guide.md (100%)
rename docs/{ => ibc}/upgrades/quick-guide.md (100%)
create mode 100644 docs/package-lock.json
create mode 100644 docs/package.json
create mode 100755 docs/post.sh
diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js
new file mode 100644
index 00000000..c8ea7715
--- /dev/null
+++ b/docs/.vuepress/config.js
@@ -0,0 +1,179 @@
+module.exports = {
+ theme: "cosmos",
+ title: "IBC-Go",
+ locales: {
+ "/": {
+ lang: "en-US"
+ },
+ },
+ base: process.env.VUEPRESS_BASE || "/",
+ themeConfig: {
+ repo: "cosmos/ibc-go",
+ docsRepo: "cosmos/ibc-go",
+ docsDir: "docs",
+ editLinks: true,
+ label: "ibc",
+ // label: "ibc-go",
+ // TODO
+ //algolia: {
+ // id: "BH4D9OD16A",
+ // key: "ac317234e6a42074175369b2f42e9754",
+ // index: "ibc-go"
+ //},
+ versions: [
+ {
+ "label": "main",
+ "key": "main"
+ }
+ ],
+ sidebar: {
+ auto: false,
+ nav: [
+ {
+ title: "Using IBC-Go",
+ children: [
+ {
+ title: "Overview",
+ directory: false,
+ path: "/ibc/overview.html"
+ },
+ {
+ title: "Integration",
+ directory: false,
+ path: "/ibc/integration.html"
+ },
+ {
+ title: "Applications",
+ directory: false,
+ path: "/ibc/apps.html"
+ },
+ {
+ title: "Upgrades",
+ directory: true,
+ path: "/ibc/upgrades"
+ },
+ {
+ title: "Governance Proposals",
+ directory: false,
+ path: "/ibc/proposals.html"
+ },
+ {
+ title: "Relayer",
+ directory: false,
+ path: "/ibc/relayer.html"
+ },
+ {
+ title: "Protobuf Documentation",
+ directory: false,
+ path: "/ibc/proto-docs.html"
+ },
+ ]
+ },
+ {
+ title: "Migrations",
+ children: [
+ {
+ title: "v0.43 SDK to IBC-Go v1.0.0",
+ directory: false,
+ path: "/migrations/ibc-migration-043.html"
+ },
+ ]
+ },
+ {
+ title: "Resources",
+ children: [
+ {
+ title: "IBC Specification",
+ path: "https://github.com/cosmos/ibc"
+ },
+ ]
+ }
+ ]
+ },
+ gutter: {
+ title: "Help & Support",
+ editLink: true,
+ chat: {
+ title: "Discord",
+ text: "Chat with IBC developers on Discord.",
+ url: "https://discordapp.com/channels/669268347736686612",
+ bg: "linear-gradient(225.11deg, #2E3148 0%, #161931 95.68%)"
+ },
+ github: {
+ title: "Found an Issue?",
+ text: "Help us improve this page by suggesting edits on GitHub."
+ }
+ },
+ footer: {
+ logo: "/logo-bw.svg",
+ textLink: {
+ text: "ibcprotocol.org",
+ url: "https://ibcprotocol.org"
+ },
+ links: [
+ {
+ title: "Documentation",
+ children: [
+ {
+ title: "Cosmos SDK",
+ url: "https://docs.cosmos.network"
+ },
+ {
+ title: "Cosmos Hub",
+ url: "https://hub.cosmos.network"
+ },
+ {
+ title: "Tendermint Core",
+ url: "https://docs.tendermint.com"
+ }
+ ]
+ },
+ {
+ title: "Community",
+ children: [
+ {
+ title: "Cosmos blog",
+ url: "https://blog.cosmos.network"
+ },
+ {
+ title: "Forum",
+ url: "https://forum.cosmos.network"
+ },
+ {
+ title: "Chat",
+ url: "https://discord.gg/W8trcGV"
+ }
+ ]
+ },
+ {
+ title: "Contributing",
+ children: [
+ {
+ title: "Contributing to the docs",
+ url:
+ "https://github.com/cosmos/ibc-go/blob/main/docs/DOCS_README.md"
+ },
+ {
+ title: "Source code on GitHub",
+ url: "https://github.com/cosmos/ibc-go/"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ plugins: [
+ [
+ "@vuepress/google-analytics",
+ {
+ ga: "UA-51029217-2"
+ }
+ ],
+ [
+ "sitemap",
+ {
+ hostname: "https://ibc.cosmos.network"
+ }
+ ]
+ ]
+};
diff --git a/docs/.vuepress/public/android-chrome-192x192.png b/docs/.vuepress/public/android-chrome-192x192.png
new file mode 100644
index 0000000000000000000000000000000000000000..6d04cf4c08573ab91036925d6df3bd4142e75c7a
GIT binary patch
literal 4110
zcmcInS6I{Av;EPeNDo~My$T^9H3S4CK#2565u}DDN(bqL-iZ)O=)DRYfk;&Z>AkA_
zJshO>4k9S%<=nUXaNq8In7wAr%=+eG_TJw&35NRGbTnKv007V-bu^7{u=`)5BELB=
zK9wim0J*ceo;m{JaTaw*bHo3IIE{001KZ0J~>?
zhY|cHL28fE)&%~<;l@7PFltX73m*Wu4f@wefZTkJn;@kxQV&77M#W4HFs6MgvH$@3
zLZqfTI^gSmLEx*${j6~x6n^C?+{x?YbypK>bJMl30y$WXYL;bs+so1<)9e7fkVGo#
zbvBkdbZHaEs34b~EOwgeb8!p}vouXSm8O#e8;6ra)&vhjY|irgFQKgmBtL`0d(Pg6
z9$x+v?EY=GYLbpRqMw_8_>75r+nj{ffdZP$+6^@YdjC%eCHhTbP|*)p3d71+rG*}i
z+|YmA61Qhz=;}4DoShm2o|>2{=A_c(E40cbB&$S841J6NdE$uM63L2l+a+B$(vJFeHA`fBa=}=>{FG
z^wECm)bqpnkclb*$43>J$|^Aa-N+T=aSwaNqSTC2g0nW}sUdf5VE!e|;d^cAsF08D
zolK@!!XAeHl-c^4QZXjRYOh70Q(uuyZp<#C^zwdN+ARFck>?|2$xaBspNx@FfuA{1
zY-y~d892*u_6r(pw3Zb^6n#|72ussf=k!~etjz&kDjRqLl|YV0;ihUmu#c9}{?AoT
zvVAGTMk^`Nr1v?SXzqh3BbMpe>@WsblCO+{znZF|1PYIbSFCOF4Kg0;01?IMhoV*K
z;yW(IX<9Q8vNOZ{d5dQy3ZV&%-o(vX0@LJtG$%66W8;A!xl2Ws%;(0ezvk72{N`rW
zP*w7L8s&Io^rIjjnTMQ@9t4R}j65UsllFUs6Q&=X4PWjlpB!)}GG?4oxLsi7_DXY1
zKjHaP(E3i~4>>;$mkYUA4nJQnj`}bm5|sFlDb`f}L?Q7q+M6MGc*7U6EFOyoogRLI
z_ndmITt6LNQWc*1``heZA?e%+XOcyayp5AJetS4@%|}|qH}J4-pU}FL5S#<^geBh>45%%don15W@E1~Vff<%zVUuwAJk?|ljGc64Q&ko5vFqQAaQ9%ckIQdHJ8z}DW+nV870C6$
z1>3ptfWmWIIm`Knc#*2C9v8_8kwiX{&xS+KDEIGg4aQjY8~%KyuR9hHerH^^semB>
zhZFS)#E@vhLH27Zwj!7B$`)WbKJ76m&U9$qLKNz*s>&9Z!3R{Kqbsqk#eQ|2CGRrqbTpU@__
z^@%{Z4YX_fdPhh$uRowP6Re}Q-jk94by}&>iFS7aZw7Of?qk>{mBJJ*lG)|r5JZ|c
z5$x4zExH@da74I)$|oJO*1pO#MBS__a{(#wI4crIcMNj!j;t!I&Dmbr!JV*%Ps`q8
zLX-iP=9+oLPnq!T;~gA~=LbU0detpdYgz=m*
zEZO{rosPdhk3+g_c^2CI}kDY$*G!5Th!7b!E+ox;5;O$4GG8{MFwzKM0YWVR1uboj~a7*A)
zK%jg-%c!PTHi&$FC_9~P(6f1W(1RKjs6Ks(G3v(r#7m9)U+@&|KvTdZsu_zDo8B#>
zq1n;}h(wDy&i&VxvASlh3dfCE&*|t$iAObp-I-%4kkO|Z_j6(rOQN6@j~24mv&LVf
zn|L_Z9}0;(l&E80JkKC*jUpP(tya3PELflwp|vUrJUSry)<(I2)h)&)i9P-y52Bzo0)g
z-=I#p?twO{MRNLxgWd`Yt0CK2HpI#ZA2+Q8KZ_<69t>PKPi^)-0a#Nj&;DU*1HR87
zhpc`o&f8F%SX3(>Ok&WHSHD5!)aIYKOeM(+bIm6q$fQMjng^!P@rYBT)uf0?5arx2
za{(AQ;`SRet}-KoC`rde@5_L|$Uq>HsxR5BWXW=LFkR@EL#oF|HKD{VMD4I}xn+
z7DU?kaaC^0(LTIRs76CSsgG&D@I%$g?oNc77K#siiLY7iyS&J@Ei7IBImfHE@>)lf
z@OCWX^tibYm+IoOL|TqoRG{b-3a+-5L9rCznuAoor^72`i*~$(_&rIC%SXli`Ir}k
zroF$`CYt>*ds@FOxIuQDIY*1E)se^)na0%I)xrCi>Fi}@MN8ux#po1QAtmU)SeP%znIO2OJ7k6
z-lt8dv5%0wl`$yC_0iLaSfBpxYuuMy31R2}-ot(6W!;L0CP|ep54VH!v9h6;Ky2st=wz{&ZZ5^<2n>Oj~jTxM0VM}
zd(b3yvK3-`-=rLcD0Or8)l~mcW`y-USkf@F+>q%hJB|1&AfAEcYs%8Cdbtqd=i6M}
zQdWv-R59QJoGsmt(-w{TEY+)A2j5)Hm%~k5UiH2F_DU;_q2JzlqRIju(i-jDL$V|a
zzrQtYBj**PI2>`=++SvotNN|mPrE*nke!gyF+N?XBG>pJ^sYo0O3U!@>$U6=_%zT)
zy8BDES7BGmKJ)v??8MH@4^}MKG5xG7kBxTPg0{eox(oMGnfcbG`o~x$gHrgS4?Ko4
zpat1hYYW;O6Mj~R2gaFS&EBNHyo`%N_=~X|#SbJ-?{8hjKBB;z?{(Zl-ilI8a#JcQ
zEKduXI?|gOvG#9{SZ8Tow5v(ELzryrpq<`+^Okx^um=w2<3C$MPb{HRt3G>2-<-tC8f-oenRv$s0-3*Y8p)Elh~s
za?AHc>KGo*JFY;|Zr)nEWr&nhJP484>r&Hf)4{>F$k_Qkj(a7RpOo1)uFdjtw7cDY
zqYLZPe4)7jnx`zl_c0z5=AFK-Jte1FQz_0DlQ5p}gseo+<5S+&t*|bZ%zSy=A(lU~
zztEi%@h(%S&v}ShJD=*A2N^$?)CV$q!U)ep{=cycMKnDAPR52l7!5F0vTstV7?C9_CJo*IZJCHEX5hFBTSRe%{V6_okprFBf0#io1~7og&z^9RJQt$
zG5ERb@x|qg5Kwo`Dd(zK@8uiyi!)*FPVuS
zo|0)24sMyKx(%k7??(S_u+o`vOWjqKaC)(oNl4vR`3Y}
z+EuYw!HeLzOY_sg!8r4&xP`%R```Vy?v`JqaVjJ`JHSf0l~0Viv&CkX;h|HjiX9DK
zU_6{%F=fii{JSA6$5Ki!IVOq9r7kKwZegq;V>28*_h|=$Rf+^|7m2K!$z9D3aAc9jaDUmRo8?u=7h
zvGa9=Ie0tX03ZXEfl5LZC8ZV7P+1rh3X_(<2bH;TWHs78{tv+m4~(l*;QuZ_@Bm>q
z0<$2rxv#O^OI}ZJ4=2~>j=a8so{qe(p1uwM5RkumLQCakDk?f+Fgk`@HwMTAZbKYy
zg9Jb*JPR3!R}(ASiy;$eM)Qu&&kgmB^>O!6j$(C+bs@kGJCpB;hFs=N3jm4G*KE+R
Gjr=dd&}xVP
literal 0
HcmV?d00001
diff --git a/docs/.vuepress/public/android-chrome-256x256.png b/docs/.vuepress/public/android-chrome-256x256.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c30cc026781e0ba3288a23e0e2f37ee1e553e62
GIT binary patch
literal 5421
zcmc(jS5(u@*2ez{ozRPfqJ$obkkETVQ#zqzp-Jx{pn$Z{MS4-e7pa2iixiP6B}gw)
zEFc{N5)l!Cr~zX*oXhXxyFM4Qo@ehpzcqW!&8*p(80*XItU{~+0I-{x8ruN?=*)ru
zX8JSv73;BgCiH#=Rt5mjk_VxBGMvRQFH<`!0Jtp;0Qgh@I5|t|PXbHoViYoh{f=k2EOM`f5qs
zni8?$IS;xQaPHnCC|jWJob}2;^z|DYU&9RwGA-y0jJfz8+x}X6wZ7~8V|3RypnIpg
zIl^CXS7{Y6#};KNOFg?3mJ`#!??71#+s`*b^eyCv)0laMCQ-dhX}fSCO|
zZz>Qzuj0Jhi|2MH)k0uM8DkK=n4@rlRnOiD&Ywsc{=;bn$#+NU9_Z+Awh-AXbIXF65j^l?`*g;ggZQ3>G
z&sjt+3C749;3CaDn)kaUuaJCca)&NRPIlAYZHwU+uZ--^t$1yuh<1cnQHiC^lIpVL
zwZTn3^O5xoEvjy3M(ceEbTW>kHJ548((r3`ST0miuXLJI(B|4zwJQRTwQuhie>B=4
zckqD%Wqlws2MX?+{JB1?z@!R&3F95=4Faa7@9tR`zZX@_eDTQ-&j^{l4Fwt%j<Ss=0RomFBfM1FPA5`9psmh3T
zPL6oBsNeqOFL_|oe!2t+hcs6%smH{Hg~dtX07|SfY2{2cTXrckB<1oLg?#n
zod2;1y@llal>g*uML#@19L=DFtjHTwOc(#UIm7V^_Ba#Z*PXep4|xT>iyiY)N2!a`
zUUH%1siiN)NyT3^tl8BZ7!PAlkXfP{(HUG9Mj)7Ri@U@yBTntNX}=^jrgCsjlS~
z3_c>8_U2lz#rozbu_r27yV;rR(V%BB_m2BEXQcAn*gO<^1z%sW>cNF;jU?P^eZ;vq
zS(I3`_dWlh2{
zEwC0#08^o?YZFRwmf7!f!HS;@`z&Oh?z#RtU~4)Qj5UQ*)JV!%K~s0vy+POed}Svk
zJEnc-kH`
zZih8|s`&PRmK&Ea`C)rF?E7S;(Au`xQ_8Y(lHhf@D|;H3vfWn3j#s89->xGIpGuG9
zwb+1OU6K3HRW0g#7{=zLKi`y{x47x`hdqwqKqr)7GinOTJ%ENWhw*Rf(o#fm_YV(?
zThe1cZf^?TYRRrTa&%KZg=*^RPRHs>uB;Av%4JcW7Le(B2IL*!O8+Eg^AdbG6S)#j
zrinOK!KNzrM-M4t<9~zeoMMitH_8AX-AV0%kQbfFh9iNpIS(nIcFg0ra+_1A4B^5B
zzBt)U#3LQ}EatxQd^5(D3e3EC)2U5BfXrjuuoxdX8n@6y;a&I7*T%vqDpEq`Uzuv?
zv5S*~$*z4P5ljO9a0{&h1(raI@{m4#=9w(xN=}))I`|bxp@)+MtDF^)b;y+KpFh`c2yO>uif+L2gdCZm
zQUv$3>C>tykNiVBhXYl}I`xhXpGvrPWrn7&Ccf*WCsXPsv)I}PbdMVkM19~YI9
zxs@_NeVBF~-fU9X2Ffw;zaVjTGc$}J5F^qcU_k!({O|H`7E+zy)ZTRy(Yhj;gV?C%
z5BXVmZt&1?zH*Yd`%rXRY|{;?$wm3|iO-tWBda&8tK{{x
zqho^y!=GBB=upUL?8&xr@~!d?%c~%3>FEqGn9Zm9g`RWk%4KRt#oL)vyGys%=0AIx
zXP4{{08rrKP5}2$K0!VNX0ANQWyE}(!i0-WvDIf_JLqYf>6wJU?Q8~xCCt!v{|QL`3PT)Qyk!O%Hh~Ha
z`K$Vc2sYHex^H$hFqLtQ{IHwQT-La5z4MQMZz<6h%j<4_gUy-^f363GBvrYCjRWzP
z-KP7aVbrzlqc@c`q_CSg{%q=%j0ZH;vWo}ZpB<<}6*8)|5jsiZ6^<8!NKAC}@LzGz
zt7g_M#T)uNPW`&RDn_vP?d9tZcd58|C=5TAlRpK^H+HRQVvBhV=_E1tJZo1&1ClON
ztmBkFtfNYG^^GRGCd_}OF-kwfsNS>v_}FW7e5S2J=!S{x*5Z477JFuiRE#G<3U(w!
z;Knvz^8aGWKItOHpZM3#TA7?>92f);sxnA}7^5!I{&UZ+CW_AjS^l@~t16>95cshp
zJJ;`(sLqG{3Y=mInj2mBabLJ;#{vl~BHAovOqcc4gdJP$v}x~!`VJQo_E(qBPb*h4
zGP~lTdJG&!nHelw&s@fpcPM|KZ~SGyO#f?MhdL;lz%n$QQu8fjo7Ztg^ePo2KHK}D
z>KY{jMUBO?F|vxJPx!8#cRGx{pMDPYjaW&zVCAAE8Od-S$I#gM=OZrt^y+}zIK`{1
z2A1$qx_J#Fd?`JVmZpTx{tgyMO7Sb{e4fnoc+=+M#`jYXzS!~;u@M*@Q3DN(RaOvO
z(MTqtzp+}J&BgI{8I^fWO#Qqfp8t?M*EvZ3p?*gvyGAluKj`naWN``|2MPWgKipI|
z7xjc7xqxJC0A!WN1>hs0YpA_2A6>X$zr8^J!wCxAtyo%cc3dorX@Nl!p$hI}pK^T5
zA|}5!?qZ8yM&A^7!lz}`YYd^FR>9yPJfGkZZy?*!h>pzPYqFew?zlg0b&d%3Eh2Gq
zaFB%NxPnuG*}IU`?_l){`z8*r2NlTt{oPNc-TeL~&G;U;8aZ((8v_Mei4!g}Pk`cs
z%R8-!K!oniqhAvS^N>}r^$T!WCI4oC(!uc_RTP_j*<~sC9o9>8%YMwJSaE~aNBH)d
z!%xLC>PcAGC8mUph0Ob-k`vTWBDz4&M8dj#Bu9
zVLH)&upj#JP7f07tx^#8oHWeLsy4*An_AZuSwi5FSOV;!62KNN=Mw0W-IxPAIgwCg
zNN0oVoEzne`U#ap3)h9W*X>^zJ`K!F6#X~aYU2lo7u3&`7sHxDe`hXFzU@7zrsUt+
zL)1`BkHSk6;YQvP6F4&)53r;%ILgOmg?!j$Id`yfo}}f(lB**!>Md~_Tuy+4l%cO3
z@AgNiMx8w|Yu;@h_71tWoOs!69zYwLQdo-K6|A{P%UM}4T_{#jDVI%ubUMP4ZvfJe
zjWrb-d~}s)(_G%wBiHE|6k3<(N$L*=qhl3Gx>D<^_jO92iO~Y*ky-r^&b_{+5`93%
zs|>($^xJVQyKMVso-*gDWObzaQRFvJ?5|F3JS&0cU8zCw38R`;Be`=%M<7L-&D&x|zuK?GfCQbP_FaZ4>17sbr{KrK=}2MC={H4iD`W{9*)T$nlsn
zU>67Mb(LPW{agLg>Oy?Vz$~7Mm9XqrXnQ755|f2l>sgcLZDpZ;w0nzd96V6c$2h
zhEjg;sa2+CNM^*8S?Z*d;~PCXXLR-ld&Vq+w=Bl@TDx*i#hoV$?C?QlKy8C>=|08&
zr5C)WGdq0Cd}VLGIn6mb&kDR5Z%uLv<##cIV=Z~nK2%v{sG;a3l>+W^wfgM{^#hLO
zV@rRt-FIDg)4*BdY4|JPVlvY(dEs5T?(M?MsKc=uVON{oA`&h;Y;i{MR&!~}xBEh2
zSUFNlNmh0oMTgwV&Qn?DsIb73s8
zQ^@+?VZ0?0Xsv%GNxurDcrz4WimE=n&eMJzMQTR|m~2_F1Gax_DAc!5nw%13tb>;N
z-Fh8o?#Ej?64t4Co;Z3Zy;9D}ylEpud?7-9Fh`IwCsGB$4BtJ+m#E%>g~VIMjn31H
zK`?J{JHywfykPxX+UpkA7%l3-@0%MKhEe6qokNvK)EcubUeki&3d!tOLlK>`5zV7E
z5vkJXl~M3_uafc0x?%{WCbx7P6*^c@ko`>
zk_?0aWJ80tle6~nz1@U$`I9@Vnhz`Fw8N1L%1MnoqWbg~@fDPe7tNUzdF8^7_?x
zemW54<_QC!?g*)ce4A_U+z)Tdn1ZN7?gTx*?Edcy7$dEU18`I8BlCfqnZl!Dm(Hz1
z2YoChACxr<+kD_D15P%ZWDKM}d?SkXj4X>twVAe-uySx9MXsupuw;J{*Um3m>xj+a
zEd~;ZGbA@f{!s6yQr7(8`kuU}9x@dlT%li2nPi-fBkP;C><9d?>c?9u*)2zv{Ljaf
z+pl%lBy+nudcdLO2?J{X3Dm(*ftq5^u-{XBeGJWcgzQ&w}X~0
z^)9^EXG2+IHUvvI?&J!~i;8WMW`#p(n-9|7FV#McK&2Z=x7@$oSCN|lvdmn6
z3-5lFI($Xk^_a7r%!GFW5fyxDx0G{JT!uKWp74JJR9P{D$~z1zOiL6x!)0WnXICCY
zVEN43vt!<{o)to<2Gu5bx(45STjGs$!PKN`13cZ-cKC#3`%5xY9uCBT$mqu?w$$dJ
z*!Un=ypC+O;SCGnL`}H<+6=im2lX5iH`GLAt>OoEA)cF@km1oDl_;``0mF@714_ol
zqCclshAop!Y;Ul6b3a(37#R+R?@z6+Y}h89Q1;|2nrCsu7)RI$>>P4+L{$7?pqmow
z5OgRW9RF8Ad-m2&h&3o1pB}uW3WE30Boce07RE<
z9gja&sqKJ=<7iNrC9rUJ?Xmx;1etbVRG&LkL#59RF
ctu$8zz6x+h9vZ6MJ39g}GqE;qH1tUR4VQ6cT2+9gUg^Ar!;hhDgdK
z9?2!-wra?wIB0|t>tZ=-uIH@3;GE}qFVE-w>3x6sq_{XcC_vFr000z7j<#-M_y0kN
zj9A0wbI4+o3v={{004Q#ACv$J3RT3RM1-3I2-FT?mc@tEd7Be90MKw#e%nVH0Hl8-
z+1ik!B|a5xxh@#NujjD0G%h4KNLx$7$X(@tw>12&=1YKc4N$O_C8FnC0V(v{h&63D
zrXRJp(DC8xn)NqylP9V&va<2sq_%AEMSzMO^c&07mIEgKKj=|uH2=t`CB{fkw{cN?9bTPAng&IU?n+!K-tY+3w7>gOYiE!LJyHh%ww?*tZi-v
z@n>fTmzI`Rw;CH81E|zd9*>vDWHPq}aBvHTV@^*`C&8wH+Pb=swl~dxDSV?-Z)hv}
zHmU{~8X6W&kW^68RQCOk&mQUO>gM3mZZX*WFW=+O7~1xakCS&Z#}k~e@GOFbzt;1;
zkOUfBZ((sYLcM0FZA
za4nO;kc^0o-2XM3IKZ9SAr5q1t?PV`=DLQ>%*^D83lh&(|Dw}##>?y7ginu-R#QHB
zUc-F6S5xBg9`jIb3uEZ0sfWn!N+`^H^U|YWKNXmaP5DvW6%~J+W;%RUcmGljKAb~F
znQDTmL?(iceR5P5W!|Tdg6gJRDWoKwh7?CGX9+u&%c3yqX*(Hxu@h0uaeN%#F`fF!@~lha3p4EYO0_bfrD&+r^h6^0xSs+M%w8)R(kUi@l(F!tB%L_V&kZlt3WJ
zl_<9At*Sq;XB~5F6EQLDgAOEwh8Vh=;B)NdHYO+u{g4d}e_%h>>V?@rdhbf^TN7w6
z`G~AO7)M9(7_4pt$_fN0Ag7sj$tN>hbECrvr)n$qJ@d;URwHvIO3Y|HIGq_BQ`Iz@
za(CXh;a+0Ig9j(wl7o%JL(kh_hTh)3`eAZrhUR@eOrv$ppg~SpElR>D2`b@gR*Gao
zh~+^>6qFTogusN&e#m398`D;yoIF8g_K0A;Z~~+imXvtfM>9*yR2A{$7ZxSO#qE6A
z2paHM{@qGyT3ee%-77=6GVS;qq}=SJJv+PS}O{`KK0O{`=$f_CZ`?87ags(kC6GjL(Hd25V;E&+MbOVzFe1m&3`B9bP&GQP7HMatxE$YhPNo(;SDfE(GKa^d)V-PrQtKYr|n$hlYN
uik`X`{=UOF{L}^e->}~Bs_CHC#{(m!{bl9xeO&R!1xR+zwzc57YySd~pPIq|
literal 0
HcmV?d00001
diff --git a/docs/.vuepress/public/apple-touch-icon.png b/docs/.vuepress/public/apple-touch-icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..397e21af2a526405cec535e36de67f0b6ea35f05
GIT binary patch
literal 3744
zcmb_fX*3k>+n%Bj$}-6^CR-R=m?XQEA!0l`AA^`tM_f=+WvYs
zE&zaL9sqDDsHoLkn^`#RW{5%n{yxhX?qo(BL3;Kf001Zdf9w>Xph%FJWW(wkBiWYO
z`B(uLKwn%*CF{@Y5SjKw{I{RH=0o9?`lhA%dv6Eb@s;UcPZ*?0bBs-hmU-Op=Ht0r0dZJc
zKUw$L$j+1jlnHZx<6P5HGiy08&=hHEqy6;aydQQ}oOO4vOB
z&uVA~OAK1&K&SVEqard$mXlmhw7Wx3rB4!~vE(f&A#>>lh36~aS%SY48`e9oiwsat
z3yZ4psn{xm@=DiB)va)DeWvR22bae&5ZqU4Y{s?IF2+C;c0O+P*QE)VkW*qo3(DL@
z3)hU7dBjsd>ru2jqF2-TORcC8-d}Zud1$5$gb2NP1ARbU~6#
zVt?r+jolTC4=Oq4T;6p*XX^@7Kg$>0P9Q39qo6`l1jkBuSRrwN(*YJsju**)cW-7G
zAOYDUx`X|*914e%c{WBw^>mZ&FNx9L8fmckY)|Q?rU-UplLd86x;_i9G+d`$mCTgI
z*$WT0Q&?`RK82BW?&Hwn2`?+rGMpZSq76)fLw?P{A;dytqqi4|*tOj*GJ5We*Msly
z<1$rEf0UIk34T&FmN59MkMs2ZdMS^i)U;nNUwjI00GC-8J}i!bNT^vuctI_Uy
zXpihOZX8GCWp^p~8>MCbu)7q=;&K8~@ju(19PD&T4%*u_=M
z@5}3CEOgo9`O4lFqGF{n@%~G;y^yLultgVng0!#(!cf*5i2aRNU*m2lMjgKE{#OI2
zoW+sq${PDU_Ik~Id=hNUP5vTiv9ixTGX#+jjQEW>(@jW+Wkt29UvLA3E9X}aNakP*
z^n7SH9PLMstqxznjCa}(*OCg-POTbPsk{=Az0ebhmhCoWn@
z4<9pZwYL19EY{^A1$!A8Skt`pF+D@oInqH+Vt#@*pyKW>3iD>1p7DH@{(O^A^I`~y
z^d52xxfE!!8<&sA{z8)N7dtX66epx!#r$k>mNRmSn;BaS5V;DGs#@NL+F&dPKk=&n
z$Q$*$0F`l!%$)FIb*V%>s0#YlkCR!}i+8sgvZABb^Ix8h>`Mheo6{|~<9}XR9gOki
z3gY_x2}in##7rjoNH(XNqWNnv2apWh<}rKKfKyVy*2VIuzIX=zN%0@0V
z#qye88sQeBLno%4to)_&c64&zo0D;GFsxE0yxa{YvTHbYwKdK5###&NfkFO4TkNN0
z(j;i9wqxhu6IJOb1Rgx)etMs0XDTXsU4QI*fx*87$H#Hh&H9PpV?Z)wYpTg_JxL@w
z(y{sMY-z@}X>JjLIJ45QzBkRkUqfJ5ZzK5!p8Guu8W>ZEViOTAcxWPq2O87p@E+ZpxtV1$brd4V
zXbDVP;o^T?+Pw2-X|EnGS1q5{=Y8%NDZ6G7ty<&{+P~QqRxG!hJf}-o+SB^}v%VLT
zoE;V7wyu9Ox8gQor4MgI)dE2?kD{G{ugi?;)lCih@k4HJm--1wSFu%Nkk(%MAAvjC
zb2kPdVQH#(7)9Rx{H!(l`vE1^;nJyV$#8@ArM>qhx*+Q;vN`0dl85~`sP*(&tvFH%
z;mCsloYi~Gtb=tnG07L%utdWpHp(y$n3e|@(u8K41kD5=i{<*PgR-+^eZx#P2A3pq
zcVOkFPd|cVr+@WGD&@wW-=yiwoxKs)aiz?tW7%hoS|Qp8s!zuv)#dW4uZ^2Q(l!kz
z#mC^0csSRUgdTJ!O&^09w>v7KBKQC8=D*p~I5_q-tS4AlW}`I*D5X81eJy!|9&stX
zEmi-~Q<(uD?6BM5udO)NO`jM+FBxL*#<2Obd)CR4+<|Xoo{7E`j%8WSZXwj8_Ji|P
zHM7X3&v;9uFyo$Taq**Xz-r89tEv|8B8lMg7kqGQuBLV?%Jmkh!CiAu(9xZ0pH-gY
zp)$*L+7p?izHdt{v2}*G__@+bLnzFfJBP`fe4PK0Bzz3AZFNs=F?$vs#N961)MOs&
zJSK8KVm$lCr}RueugGwv_r9}IM-f^YSlg;zO~u#_p@k8
z42^82eW0^JWoe-X9|zB^S8`ACmSA-whUmp&OWz5WPn@ZgnGM0GoR`U>%n!#t?@6h3
zIR&+XGAnk>M{{~TlQ?ZBNIMcnT-9oE*
zPv?3#5mQx@pN(=rd()ugZZjz2WQ9^z`u$3gDbQ(}kgeyT1qx`CrY
z(#}-)Whch5=lW`*a-k>J{^570w+q{^3VDsFuWxrU%#F^)BqJ5JXA8eY6dNn8k7AQt
zyKl8I0#;ZHE~cnHqjy9@YiJjgQb^tT>uok)hC+S31LEPLG^LXBJO%+YNC$>j`Elma
zl{_A&cd>APcEZdE8BlOxOX_9o8uU@LsJBN~SO4Og)T^Tds{{!FwiaIy4f5q&pKJr#
z@03nALcFp6ETzjmEzD@8p9Db7$aI=TAP0S>ymaGUw`g)>Snt)t4Q
zuLJXTM5_!83#Zc$hpwJH#<@8QQVV=I@axAvsk_*y;2-wAg9)L9%Up{py7{dUsqOnD
zw1)LBE)~)uxMNhX+&)ntY+I$)41eG-6%&W7&vbQS^K$k_>k~(9)wn#h`6-X(2gJ4X
zPd@8H*@!NRB1H4JLW``I4G8~MqtnueT+1%+Q>up1mhZAty?}QF^#aN9HV(cnm)nT=
z0s?4fEG|_dr){z!Aw7<24Q$i(u0kYcnw}r)@-2AR_uy_qDFFJggR0nyru6i2P0i4w
zv!!{W<`YTI(QZ%CU^o0j2-U*}=wxEA>kIBjC%<_WtARIej9O0;BCUQIHY2fcUN$bZ
z#)tMJIBaA$&DzhSZ
za=+0b8X-(q%P*tL7fFnpg0tj&)zm_lm5IPs#t0^5(e}ZgeCbJTmWG-4+`3xxWxaT6kgf
zr*x+dVQW^CryJK?0n)@ju{j^|ytcBU#LHxiYZ-?E{}JlHh}H$$dz42XtYF0Pi6obj
z#g;=7cDTs%G+)<<5lovSXC~B@-;Vjh`gO}B&f@q|Yk$*6(&0`AtPy&B5buuhcoK}_
zZ`WF@_L`MJt&Y_5&6N8PFpuP&WqS$J{3vWNg>S6=JB`~&7^TfVh>XWP`D4|MyPq*n
zo8|F>0B@bGhJANF02Z5aaH6f)n8E#nIKdV34?53;$R(mBQhY9xnSE%1GQFA~-)Z_1
z4^QW4ulz#1aK6`#L!8!q8kx2AHL_jPXI+0^YHRl%yqrB{w1clD_O}OfCHW?(;PO@K2;S{;^(|X3D86Sfm}+!xihP>5lPa0zg$+RasH_
zhN8+1E9GmN%F3E5YVyjeOy^8h>D~W9xEJW*>lOLG6R`WjOH6`ol$9OU!Zlnh2ovb#
z8{jF1jSTV>^9{nf0{{_4OWR!R!8Wq8gQi2n`g98bizKI#J14&+zab9D!Y_u1zt-)+
tBH3UiHuQC}zh}4y)WbFuulJ9E65z)rUhKAxS~hb8Kp$y>AnCX~`Y(6_1egE-
literal 0
HcmV?d00001
diff --git a/docs/.vuepress/public/browserconfig.xml b/docs/.vuepress/public/browserconfig.xml
new file mode 100644
index 00000000..b3930d0f
--- /dev/null
+++ b/docs/.vuepress/public/browserconfig.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+ #da532c
+
+
+
diff --git a/docs/.vuepress/public/favicon-16x16.png b/docs/.vuepress/public/favicon-16x16.png
new file mode 100644
index 0000000000000000000000000000000000000000..5f15c3b0af35e87882b492291e93fced03e0842f
GIT binary patch
literal 632
zcmeAS@N?(olHy`uVBq!ia0vp^0wB!60wlNoGJgf6SkfJR9T^xl_H+M9WCijSl0AZa
z85pY67#JE_7#My5g&JNkFq9fFFuY1&V6d9Oz#v{QXIG#NP=YDR+ueoXe|!I#{XiaP
zfk$L90|U1(2s1Lwnj--eWH0gbb!C6TA;iMKFLQm;8U_YN4o?@y5RLPt7xwEl7m6JF
z=pQX~NVa4DN~1)h4hfIT2NQ}pU0PaN3YhgAl3u)E<9Q)HvE#0a&OaUBsjUZ9RQK3B
zU)7#u_xPvX^EuDw^gKIt>-t5R?^>LFLHk>`C>j?9y_>u8X0SkH?t`*yITlmXja}Ir
zC(UE?yZK>V((NUj)po4mY?Epi&$H2PkejGbVKPm^jMX*si>089wLs3*#Rm=aj{Fi8
zbrG8~p?R8pd#U!RZk@_WP98klXBhu)*1mMC@UwYJsDR8Xrp<97y(+@@_PqM^H1OI2
z&2Fo=JMzAKw-((UBKG@(qDJ(^uy);|vZt=ow$7X;ddIRnzMtcG)8Z#9|4+TxRr)lo
zE&sb=&^0O7siMGOQY~?fC`m~yNwrEYN(E93Mg~R(x&{`yh87_PCRPRpR)(h921Y;z
z>#9ZZC>nC}Q!>*kacd~~F7+6wA-pOiq9iD>T%n*SKP@vSRiUJ^AXOo=pd^`rp<>SC
zPdprjVHz5z{7;|pd>X{Stjw*K%q^@e>^)h8Sy;iP!Q^lXv-0K;h0`~#oH%mkh|Cf8
f(+wUAy!04ei3^r|ax$F?w1UCY)z4*}Q$iB}TN~l#
literal 0
HcmV?d00001
diff --git a/docs/.vuepress/public/favicon-32x32.png b/docs/.vuepress/public/favicon-32x32.png
new file mode 100644
index 0000000000000000000000000000000000000000..9433c807fd08ae205cc8ec2c19345c0e6d57e482
GIT binary patch
literal 942
zcmeAS@N?(olHy`uVBq!ia0vp^3LwnE0wix1Z>k4UEa{HEjtmSN`?>!lvVtU&J%W50
z7^>757#dm_7=8hT8eT9klo~KFyh>nTu$sZZAYL$MSD+10f+@+{-G$+Qd;gjJKptm-
zM`SSr1Gg{;GcwGYBLNg-FY)wsWq-mU#KOQYbA8eppe8#{7sn8f<8!B+_YMgZIc`5S
zs#n)Z#PB$e*0D9Ac`h%OntG3J`DCdzibxGCDl@Hi9qnThJBJ!dtT
z@pwDe=XtKjPQExSe)xk#bB*zaqYhdN9WJd-|5iKEd)0mY9)n}m3tDa8
zEc0_0eerzb-hYO53LL3&7hJb~yFOvzn%D7s(y?=%YdulxJvM#IyI29WjIHVt0iki5
zb~43_<^2?So?D^oA97))Kuy={tR;)raI0l3Zc#WR5q{+IUI%l|dx{+Lnfz)VkCZqp
zuVvj&jIH(DA+eaF>2>ZwFWH#o%j{0;KP}wh#m(UQtCD3$O;FML?e7^6aP`J7ym{cS
z+Cj0PRjtdfF!jB2C2ciYS(bAo4o
zm@p$(_}s5+SKHKQxCs0!;98ygq?P}C_?MeLVYQOWI|ZaKRm|kc-coP+TQczZ@4SD<
z{uo)weDAEB#W(LGo1&li?kS)2oKxOjwSD+!vW9ei{LTl~;T-eQFZ}W@J~=Vz>i!Q)
zPnusi&a3wGjLqjWMh8v%s$NHDy06I&c2%~1v1IaFla^4n7pr#^{?Gk>Wky`F0vF%f
zJFXS3O1?`y25Jbe3W+EQN-S3>D9TUE%t=)!
zsVqoU$Sf#HW?-n8^Y{}FM`4(T#wq{PXFQ(m_pwD+_y17GV}vaA`0(oWiWU
tIYi;~jVmXPoH-(Mg#C1b#{w@shF9W(C7+y3rvj~D@O1TaS?83{1OSCRd>Q}%
literal 0
HcmV?d00001
diff --git a/docs/.vuepress/public/favicon-svg.svg b/docs/.vuepress/public/favicon-svg.svg
new file mode 100644
index 00000000..dbefbad9
--- /dev/null
+++ b/docs/.vuepress/public/favicon-svg.svg
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/.vuepress/public/logo-bw.svg b/docs/.vuepress/public/logo-bw.svg
new file mode 100644
index 00000000..f2575260
--- /dev/null
+++ b/docs/.vuepress/public/logo-bw.svg
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/docs/.vuepress/public/logo.svg b/docs/.vuepress/public/logo.svg
new file mode 100644
index 00000000..95ca6d30
--- /dev/null
+++ b/docs/.vuepress/public/logo.svg
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/.vuepress/public/mstile-150x150.png b/docs/.vuepress/public/mstile-150x150.png
new file mode 100644
index 0000000000000000000000000000000000000000..b1770354a1a01675d4ac79772b5abe846cccab16
GIT binary patch
literal 3039
zcmcJRc{J4T7so#$6*KuEVM59-rWr=oY_r(cB+DdY9VCpw*hL8$%Dy#1(IN`@W=o8v
zVandv$Iy^QeY1tM{O0@T?~mUHhVYWAiXFO1zE8H2x6DsCSnYk9!(1K|&xk=~A4
zfz~J9Y@sWZwQQ^*xq(f6p^qiuN>=VRRu}DNEad_(?)bvi(ac_jnLc}
zPgPrdPk^%%37wq$d~8E@-_hz;4_BH38aY~2jW-`!er`8%heBs1Rh~MpQiOkitxwwQ
z>=Y8qyMR$)ALEfx$5!0*D+%ZkoL!Xiz*SD@M{P)de+5lS49ihQ8)0At>6zY$6v?;J
zTNS(0PwzE_-Q|bup6%Dgs4E5BAA7>qt@TTu(BCwK&@RyN%Z-^fWeXb8#~JGN_Ct{#
z-lrz=Aa3QzV4ReFj=ya7^PE4!)-`O!wbrJvOzmC8{D7V242(6|4_=A~31@t@(tlaI
zU>Gy^9h`wtBQM6TRjU(V*Pt~dc0rOd1dd2TOCIfem()&w5Ok|(Dg!gSWKxsUF{kE8oFM|GJ#3%uEV}Zz
zCMyOKpoBic2KmV&CCtm;a+>mMbQfA-xb2L26h+xzMUWR&l{>cPB_6A5ef}A?f>+4g
z!TN~rZ0~vN6`m(8np=9kdXSq{MJI=SI-Z6s2UA!cs-W2SUi=!lzETaZ65Z_&<#lgJ
zL==>+Y5ZAu3KAHO6rT907HWKqYyiL6;e4VtwV2LUTc|V@sP?sL981!|Xb
z@tfI_qTag|xp=clJc)GGH;p5rz=&JQ9voCV5@*u!RC(kZp3SMV%5&;aPw0Ma+j*Gz
zFLmz4JrOa_>H~Reze%yq8h-t&h_N-OAQN`E+Q-dr_Hm-NDW^Cs^;SP511qi?jd{`k
z+;tv@|McNY74Gxnm>cVLR`~{kpp>I-(Hg{=?<13R)#-TV$otcbhn6LI>x+!$;eGHv
z{wnUkQYKO~!7QCaP#6r2lsrcZKjq%sd5f@J9XCMwdSP&`ZZKa>D6A_grOeavQDh2+
z*Ymi4JafvL{134Mz8vz~*fq`hOVxU0;crOPH*IQL;#w<~)wAjEoIy*~Vp;|*IDheH
zPjxRstXb=eNPqenk;mne1bP93IUUpWStdT5;zlym83<)-NZy@XLj2nIuYhCPIpu9jag)
z)c9;jYm|%(VBPIXxgCpB^>P%J
z#6sJ=dRgODrQUYqk;(G!6Jt%gam{I6y7dnHJz7!;ZaN_omKi0$2x(dg5<0Ifh5CGP
z(b|AIGP5&rj+)x4iUyx`&+>+6M+aBT?QFveKbGD0)^SXAj7aN#djdG
z^0Qc~?l7`Fjfi-IxSmk6E*-6$bpb|7wOZ;;eJZ6A=wk73B5KOE(sNp~sRu5#J{!JU
zej#s^v9nD#;~fBB@_?>9*OL?-3dL^3zx?a(`rL(T8FX
zj_r7Sw7WrUc+Vx_ddN8%TyYbhguii5yD8^aYI9*5)Okbg{zT{lrspqnigF{^C`iJe
z`AAZa5MrIWJd}2vY)QS(m;GgZ%?J{LZvSWiLI3O?j!`lqUVh{gla_rC!|Ml8YB#L&
zZ);u#LR_;`vW02voOQ@^>bpIdA+l=
z&J6$hP1AJ+syuWmo!`KuV_n~H(U8HmXeM|JMI$muWunKK4Qivav>cu>9m-fZS>(8+
zQvde1Nzv6Vqa0zC1h1kscy27YtKD-OR)0ILHv-4!sWbDeDr~KQB4e_(Xz~G)SV1BY
z8%hWaU*RN3mg}vmbf%_e59qQ=-}>p3t_g!}aRU(!e92pkSFg)(U?B*~XzJJ7j|PiidDdyQ2Vx}LpRF6^U2w42tY
z?XOiARE>|I`d?N!_U6HFz@645t2NHOG2|~HQSx6S>%8P&v(g~sQny#;qF`+0!R*sN
zG}3-=RT+ftaQq4yll}gAC7nL;T7EB3OnAan{`&9$$YEfDkhStC^s)r4A78d`qNH2z
z(Oks1?9m_8ju(l@pww|Mh*NR<%_+Q#?hG;hd
z^OwComNJRHWgIavaaC}5`WZ;NGmW3iTb#@$XQVo9O9-KnNtDG3{9EwFIa?bs@M?9&
zHbc?I0rBx%er?r-PWuP`qOOlu*=W@N+vJ%)Pqyd;90=4?jOLE03PiLE(FaHL)$=C!
zavh)n(}1bNbkyNGPB4TX45kOyIs?<-9u7XKyz(DFaEMQUU-W+mh&zxaE^sNv$%W{M
zi;@i`g!lzq^_3+?hx*C}gc7|0fK;}$0XiJ!tgJj>H%PZ&I|96Nf>3WkQ8`ho++)0=
zvZy2k)rVKE(Mfi2a{O%%y+^9&&|s4JeM>0tSzMU7VXBqSEdf}dZBccmp2`0P(>;9C
literal 0
HcmV?d00001
diff --git a/docs/.vuepress/public/safari-pinned-tab.svg b/docs/.vuepress/public/safari-pinned-tab.svg
new file mode 100644
index 00000000..db52b2cd
--- /dev/null
+++ b/docs/.vuepress/public/safari-pinned-tab.svg
@@ -0,0 +1,25 @@
+
+
+
+
+Created by potrace 1.11, written by Peter Selinger 2001-2013
+
+
+
+
+
diff --git a/docs/.vuepress/public/site.webmanifest b/docs/.vuepress/public/site.webmanifest
new file mode 100644
index 00000000..5b4c0a3c
--- /dev/null
+++ b/docs/.vuepress/public/site.webmanifest
@@ -0,0 +1,19 @@
+{
+ "name": "Cosmos SDK Documentation",
+ "short_name": "Cosmos SDK",
+ "icons": [
+ {
+ "src": "/android-chrome-192x192.png",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "/android-chrome-256x256.png",
+ "sizes": "256x256",
+ "type": "image/png"
+ }
+ ],
+ "theme_color": "#ffffff",
+ "background_color": "#ffffff",
+ "display": "standalone"
+}
diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md
new file mode 100644
index 00000000..b309c2fd
--- /dev/null
+++ b/docs/DOCS_README.md
@@ -0,0 +1,124 @@
+# Updating the docs
+
+If you want to update the documentation please open a pr on ibc-go.
+
+## Translating
+
+- Docs translations live in a `docs/country-code/` folder, where `country-code` stands for the country code of the language used (`cn` for Chinese, `kr` for Korea, `fr` for France, ...).
+- Always translate content living on `main`.
+- Specify the release/tag of the translation in the README of your translation folder. Update the release/tag each time you update the translation.
+
+## Docs Build Workflow
+
+The documentation for the Cosmos SDK is hosted at https://ibc.cosmos.network/
+
+built from the files in this (`/docs`) directory for
+[main](https://github.com/cosmos/ibc-go/tree/main/docs).
+
+### How It Works
+
+There is a CircleCI job listening for changes in the `/docs` directory, on
+the `main` branch. Any updates to files in this directory
+on that branch will automatically trigger a website deployment. Under the hood,
+the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo.
+
+## README
+
+The [README.md](./README.md) is also the landing page for the documentation
+on the website. During the Jenkins build, the current commit is added to the bottom
+of the README.
+
+## Config.js
+
+The [config.js](./.vuepress/config.js) generates the sidebar and Table of Contents
+on the website docs. Note the use of relative links and the omission of
+file extensions. Additional features are available to improve the look
+of the sidebar.
+
+## Links
+
+**NOTE:** Strongly consider the existing links - both within this directory
+and to the website docs - when moving or deleting files.
+
+Relative links should be used nearly everywhere, having discovered and weighed the following:
+
+### Relative
+
+Where is the other file, relative to the current one?
+
+- works both on GitHub and for the VuePress build
+- confusing / annoying to have things like: `../../../../myfile.md`
+- requires more updates when files are re-shuffled
+
+### Absolute
+
+Where is the other file, given the root of the repo?
+
+- works on GitHub, doesn't work for the VuePress build
+- this is much nicer: `/docs/hereitis/myfile.md`
+- if you move that file around, the links inside it are preserved (but not to it, of course)
+
+### Full
+
+The full GitHub URL to a file or directory. Used occasionally when it makes sense
+to send users to the GitHub.
+
+## Building Locally
+
+Make sure you are in the `docs` directory and run the following commands:
+
+```sh
+rm -rf node_modules
+```
+
+This command will remove old version of the visual theme and required packages. This step is optional.
+
+```sh
+npm install
+```
+
+Install the theme and all dependencies.
+
+```sh
+npm run serve
+```
+
+Run `pre` and `post` hooks and start a hot-reloading web-server. See output of this command for the URL (it is often https://localhost:8080).
+
+To build documentation as a static website run `npm run build`. You will find the website in `.vuepress/dist` directory.
+
+## Build RPC Docs
+
+TODO: is this still true?
+
+First, run `make tools` from the root of repo, to install the swagger-ui tool.
+
+Then, edit the `swagger.yaml` manually; it is found [here](https://github.com/cosmos/cosmos-sdk/blob/master/client/lcd/swagger-ui/swagger.yaml)
+
+Finally, run `make update_gaia_lite_docs` from the root of the repo.
+
+## Search
+
+TODO: update or remove
+
+We are using [Algolia](https://www.algolia.com) to power full-text search. This uses a public API search-only key in the `config.js` as well as a [cosmos_network.json](https://github.com/algolia/docsearch-configs/blob/master/configs/cosmos_network.json) configuration file that we can update with PRs.
+
+## Consistency
+
+Because the build processes are identical (as is the information contained herein), this file should be kept in sync as
+much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/tree/master/docs/DOCS_README.md).
+
+### Update and Build the RPC docs
+
+1. Execute the following command at the root directory to install the swagger-ui generate tool.
+ ```bash
+ make tools
+ ```
+2. Edit API docs
+ 1. Directly Edit API docs manually: `client/lcd/swagger-ui/swagger.yaml`.
+ 2. Edit API docs within the [Swagger Editor](https://editor.swagger.io/). Please refer to this [document](https://swagger.io/docs/specification/2-0/basic-structure/) for the correct structure in `.yaml`.
+3. Download `swagger.yaml` and replace the old `swagger.yaml` under fold `client/lcd/swagger-ui`.
+4. Compile simd
+ ```bash
+ make install
+ ```
diff --git a/docs/spec.md b/docs/OLD_README.md
similarity index 100%
rename from docs/spec.md
rename to docs/OLD_README.md
diff --git a/docs/README.md b/docs/README.md
index e84a4d1b..d6c92a8a 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,16 +1,15 @@
# IBC
-This repository contains reference documentation for the IBC protocol integration and concepts:
+Welcome to the IBC-Go documentation!
-1. [Overview](./overview.md)
-2. [Integration](./integration.md)
-3. [Customization](./custom.md)
-4. [Relayer](./relayer.md)
-5. [Governance Proposals](./proposals.md)
+The inter-blockchain communication protocol (IBC) is an end-to-end, connection-oriented, stateful protocol for reliable, ordered, and authenticated communication between heterogeneous blockchains arranged in an unknown and dynamic topology.
+The protocol realises this by specifying a set of data structures, abstractions, and semantics that can be implemented by any distributed ledger provided they satisfy a small set of requirements.
+
+IBC can be used to build a wide range of cross-chain applications, which include token transfers, atomic swaps, multi-chain smart contracts (with or without mutually comprehensible VMs), and data & code sharding of various kinds.
diff --git a/docs/custom.md b/docs/ibc/apps.md
similarity index 99%
rename from docs/custom.md
rename to docs/ibc/apps.md
index 64d857f2..c43afbba 100644
--- a/docs/custom.md
+++ b/docs/ibc/apps.md
@@ -2,7 +2,7 @@
order: 3
-->
-# Customization
+# IBC Applications
Learn how to configure your application to use IBC and send data packets to other chains. {synopsis}
diff --git a/docs/integration.md b/docs/ibc/integration.md
similarity index 100%
rename from docs/integration.md
rename to docs/ibc/integration.md
diff --git a/docs/overview.md b/docs/ibc/overview.md
similarity index 99%
rename from docs/overview.md
rename to docs/ibc/overview.md
index a513c792..ff1f46da 100644
--- a/docs/overview.md
+++ b/docs/ibc/overview.md
@@ -1,5 +1,7 @@
# Overview
diff --git a/docs/proposals.md b/docs/ibc/proposals.md
similarity index 100%
rename from docs/proposals.md
rename to docs/ibc/proposals.md
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 14fc96fa..aee597f6 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -1,7760 +1,3368 @@
-
-
-
-
- Protocol Documentation
-
-
-
-
-
-
-
-
-
-
- Protocol Documentation
-
- Table of Contents
-
-
-
-
-
-
-
ibc/apps/transfer/v1/transfer.proto Top
-
-
-
-
- DenomTrace
- DenomTrace contains the base denomination for ICS20 fungible tokens and the
source tracing information path.
-
-
-
-
- Field Type Label Description
-
-
-
-
- path
- string
-
- path defines the chain of port/channel identifiers used for tracing the
-source of the fungible token.
-
-
-
- base_denom
- string
-
- base denomination of the relayed fungible token.
-
-
-
-
-
-
-
-
-
- FungibleTokenPacketData
- FungibleTokenPacketData defines a struct for the packet payload
See FungibleTokenPacketData spec:
https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
-
-
-
-
- Field Type Label Description
-
-
-
-
- denom
- string
-
- the token denomination to be transferred
-
-
-
- amount
- uint64
-
- the token amount to be transferred
-
-
-
- sender
- string
-
- the sender address
-
-
-
- receiver
- string
-
- the recipient address on the destination chain
-
-
-
-
-
-
-
-
-
- Params
- Params defines the set of IBC transfer parameters.
NOTE: To prevent a single token from being transferred, set the
TransfersEnabled parameter to true and then set the bank module's SendEnabled
parameter for the denomination to false.
-
-
-
-
- Field Type Label Description
-
-
-
-
- send_enabled
- bool
-
- send_enabled enables or disables all cross-chain token transfers from this
-chain.
-
-
-
- receive_enabled
- bool
-
- receive_enabled enables or disables all cross-chain token transfers to this
-chain.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/apps/transfer/v1/genesis.proto Top
-
-
-
-
- GenesisState
- GenesisState defines the ibc-transfer genesis state
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
-
-
-
-
- denom_traces
- DenomTrace
- repeated
-
-
-
-
- params
- Params
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/apps/transfer/v1/query.proto Top
-
-
-
-
- QueryDenomTraceRequest
- QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
method
-
-
-
-
- Field Type Label Description
-
-
-
-
- hash
- string
-
- hash (in hex format) of the denomination trace information.
-
-
-
-
-
-
-
-
-
- QueryDenomTraceResponse
- QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
method.
-
-
-
-
- Field Type Label Description
-
-
-
-
- denom_trace
- DenomTrace
-
- denom_trace returns the requested denomination trace information.
-
-
-
-
-
-
-
-
-
- QueryDenomTracesRequest
- QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
method
-
-
-
-
-
-
-
-
- QueryDenomTracesResponse
- QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
method.
-
-
-
-
- Field Type Label Description
-
-
-
-
- denom_traces
- DenomTrace
- repeated
- denom_traces returns all denominations trace information.
-
-
-
- pagination
- cosmos.base.query.v1beta1.PageResponse
-
- pagination defines the pagination in the response.
-
-
-
-
-
-
-
-
-
- QueryParamsRequest
- QueryParamsRequest is the request type for the Query/Params RPC method.
-
-
-
-
-
- QueryParamsResponse
- QueryParamsResponse is the response type for the Query/Params RPC method.
-
-
-
-
- Field Type Label Description
-
-
-
-
- params
- Params
-
- params defines the parameters of the module.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Query
- Query provides defines the gRPC querier service.
-
-
-
-
-
- Methods with HTTP bindings
-
-
-
- Method Name
- Method
- Pattern
- Body
-
-
-
-
-
-
-
- DenomTrace
- GET
- /ibc/apps/transfer/v1/denom_traces/{hash}
-
-
-
-
-
-
-
- DenomTraces
- GET
- /ibc/apps/transfer/v1/denom_traces
-
-
-
-
-
-
-
- Params
- GET
- /ibc/apps/transfer/v1/params
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/client/v1/client.proto Top
-
-
-
-
- ClientConsensusStates
- ClientConsensusStates defines all the stored consensus states for a given
client.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client identifier
-
-
-
- consensus_states
- ConsensusStateWithHeight
- repeated
- consensus states and their heights associated with the client
-
-
-
-
-
-
-
-
-
- ClientUpdateProposal
- ClientUpdateProposal is a governance proposal. If it passes, the substitute
client's consensus states starting from the 'initial height' are copied over
to the subjects client state. The proposal handler may fail if the subject
and the substitute do not match in client and chain parameters (with
exception to latest height, frozen height, and chain-id). The updated client
must also be valid (cannot be expired).
-
-
-
-
- Field Type Label Description
-
-
-
-
- title
- string
-
- the title of the update proposal
-
-
-
- description
- string
-
- the description of the proposal
-
-
-
- subject_client_id
- string
-
- the client identifier for the client to be updated if the proposal passes
-
-
-
- substitute_client_id
- string
-
- the substitute client identifier for the client standing in for the subject
-client
-
-
-
- initial_height
- Height
-
- the intital height to copy consensus states from the substitute to the
-subject
-
-
-
-
-
-
-
-
-
- ConsensusStateWithHeight
- ConsensusStateWithHeight defines a consensus state with an additional height
field.
-
-
-
-
- Field Type Label Description
-
-
-
-
- height
- Height
-
- consensus state height
-
-
-
- consensus_state
- google.protobuf.Any
-
- consensus state
-
-
-
-
-
-
-
-
-
- Height
- Height is a monotonically increasing data type
that can be compared against another Height for the purposes of updating and
freezing clients
Normally the RevisionHeight is incremented at each height while keeping
RevisionNumber the same. However some consensus algorithms may choose to
reset the height in certain conditions e.g. hard forks, state-machine
breaking changes In these cases, the RevisionNumber is incremented so that
height continues to be monitonically increasing even as the RevisionHeight
gets reset
-
-
-
-
- Field Type Label Description
-
-
-
-
- revision_number
- uint64
-
- the revision that the client is currently on
-
-
-
- revision_height
- uint64
-
- the height within the given revision
-
-
-
-
-
-
-
-
-
- IdentifiedClientState
- IdentifiedClientState defines a client state with an additional client
identifier field.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client identifier
-
-
-
- client_state
- google.protobuf.Any
-
- client state
-
-
-
-
-
-
-
-
-
- Params
- Params defines the set of IBC light client parameters.
-
-
-
-
- Field Type Label Description
-
-
-
-
- allowed_clients
- string
- repeated
- allowed_clients defines the list of allowed client state types.
-
-
-
-
-
-
-
-
-
- UpgradeProposal
- UpgradeProposal is a gov Content type for initiating an IBC breaking
upgrade.
-
-
-
-
- Field Type Label Description
-
-
-
-
- title
- string
-
-
-
-
-
- description
- string
-
-
-
-
-
- plan
- cosmos.upgrade.v1beta1.Plan
-
-
-
-
-
- upgraded_client_state
- google.protobuf.Any
-
- An UpgradedClientState must be provided to perform an IBC breaking upgrade.
-This will make the chain commit to the correct upgraded (self) client state
-before the upgrade occurs, so that connecting chains can verify that the
-new upgraded client is valid by verifying a proof on the previous version
-of the chain. This will allow IBC connections to persist smoothly across
-planned chain upgrades
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/apps/transfer/v1/tx.proto Top
-
-
-
-
- MsgTransfer
- MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
ICS20 enabled chains. See ICS Spec here:
https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
-
-
-
-
- Field Type Label Description
-
-
-
-
- source_port
- string
-
- the port on which the packet will be sent
-
-
-
- source_channel
- string
-
- the channel by which the packet will be sent
-
-
-
- token
- cosmos.base.v1beta1.Coin
-
- the tokens to be transferred
-
-
-
- sender
- string
-
- the sender address
-
-
-
- receiver
- string
-
- the recipient address on the destination chain
-
-
-
- timeout_height
- ibc.core.client.v1.Height
-
- Timeout height relative to the current block height.
-The timeout is disabled when set to 0.
-
-
-
- timeout_timestamp
- uint64
-
- Timeout timestamp (in nanoseconds) relative to the current block timestamp.
-The timeout is disabled when set to 0.
-
-
-
-
-
-
-
-
-
- MsgTransferResponse
- MsgTransferResponse defines the Msg/Transfer response type.
-
-
-
-
-
-
-
-
-
-
-
- Msg
- Msg defines the ibc/transfer Msg service.
-
-
- Method Name Request Type Response Type Description
-
-
-
-
- Transfer
- MsgTransfer
- MsgTransferResponse
- Transfer defines a rpc handler method for MsgTransfer.
-
-
-
-
-
-
-
-
-
-
ibc/core/channel/v1/channel.proto Top
-
-
-
-
- Acknowledgement
- Acknowledgement is the recommended acknowledgement format to be used by
app-specific protocols.
NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
conflicts with other protobuf message formats used for acknowledgements.
The first byte of any message with this format will be the non-ASCII values
`0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
-
-
-
-
- Field Type Label Description
-
-
-
-
- result
- bytes
-
-
-
-
-
- error
- string
-
-
-
-
-
-
-
-
-
-
-
- Channel
- Channel defines pipeline for exactly-once packet delivery between specific
modules on separate blockchains, which has at least one end capable of
sending packets and one end capable of receiving packets.
-
-
-
-
- Field Type Label Description
-
-
-
-
- state
- State
-
- current state of the channel end
-
-
-
- ordering
- Order
-
- whether the channel is ordered or unordered
-
-
-
- counterparty
- Counterparty
-
- counterparty channel end
-
-
-
- connection_hops
- string
- repeated
- list of connection identifiers, in order, along which packets sent on
-this channel will travel
-
-
-
- version
- string
-
- opaque channel version, which is agreed upon during the handshake
-
-
-
-
-
-
-
-
-
- Counterparty
- Counterparty defines a channel end counterparty
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port on the counterparty chain which owns the other end of the channel.
-
-
-
- channel_id
- string
-
- channel end on the counterparty chain
-
-
-
-
-
-
-
-
-
- IdentifiedChannel
- IdentifiedChannel defines a channel with additional port and channel
identifier fields.
-
-
-
-
- Field Type Label Description
-
-
-
-
- state
- State
-
- current state of the channel end
-
-
-
- ordering
- Order
-
- whether the channel is ordered or unordered
-
-
-
- counterparty
- Counterparty
-
- counterparty channel end
-
-
-
- connection_hops
- string
- repeated
- list of connection identifiers, in order, along which packets sent on
-this channel will travel
-
-
-
- version
- string
-
- opaque channel version, which is agreed upon during the handshake
-
-
-
- port_id
- string
-
- port identifier
-
-
-
- channel_id
- string
-
- channel identifier
-
-
-
-
-
-
-
-
-
- Packet
- Packet defines a type that carries data across different chains through IBC
-
-
-
-
- Field Type Label Description
-
-
-
-
- sequence
- uint64
-
- number corresponds to the order of sends and receives, where a Packet
-with an earlier sequence number must be sent and received before a Packet
-with a later sequence number.
-
-
-
- source_port
- string
-
- identifies the port on the sending chain.
-
-
-
- source_channel
- string
-
- identifies the channel end on the sending chain.
-
-
-
- destination_port
- string
-
- identifies the port on the receiving chain.
-
-
-
- destination_channel
- string
-
- identifies the channel end on the receiving chain.
-
-
-
- data
- bytes
-
- actual opaque bytes transferred directly to the application module
-
-
-
- timeout_height
- ibc.core.client.v1.Height
-
- block height after which the packet times out
-
-
-
- timeout_timestamp
- uint64
-
- block timestamp (in nanoseconds) after which the packet times out
-
-
-
-
-
-
-
-
-
- PacketState
- PacketState defines the generic type necessary to retrieve and store
packet commitments, acknowledgements, and receipts.
Caller is responsible for knowing the context necessary to interpret this
state as a commitment, acknowledgement, or a receipt.
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- channel port identifier.
-
-
-
- channel_id
- string
-
- channel unique identifier.
-
-
-
- sequence
- uint64
-
- packet sequence.
-
-
-
- data
- bytes
-
- embedded data that represents packet state.
-
-
-
-
-
-
-
-
-
-
-
- Order
- Order defines if a channel is ORDERED or UNORDERED
-
-
- Name Number Description
-
-
-
-
- ORDER_NONE_UNSPECIFIED
- 0
- zero-value for channel ordering
-
-
-
- ORDER_UNORDERED
- 1
- packets can be delivered in any order, which may differ from the order in
-which they were sent.
-
-
-
- ORDER_ORDERED
- 2
- packets are delivered exactly in the order which they were sent
-
-
-
-
-
- State
- State defines if a channel is in one of the following states:
CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
-
-
- Name Number Description
-
-
-
-
- STATE_UNINITIALIZED_UNSPECIFIED
- 0
- Default State
-
-
-
- STATE_INIT
- 1
- A channel has just started the opening handshake.
-
-
-
- STATE_TRYOPEN
- 2
- A channel has acknowledged the handshake step on the counterparty chain.
-
-
-
- STATE_OPEN
- 3
- A channel has completed the handshake. Open channels are
-ready to send and receive packets.
-
-
-
- STATE_CLOSED
- 4
- A channel has been closed and can no longer be used to send or receive
-packets.
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/channel/v1/genesis.proto Top
-
-
-
-
- GenesisState
- GenesisState defines the ibc channel submodule's genesis state.
-
-
-
-
-
-
-
-
- PacketSequence
- PacketSequence defines the genesis type necessary to retrieve and store
next send and receive sequences.
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
-
-
-
-
- channel_id
- string
-
-
-
-
-
- sequence
- uint64
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/channel/v1/query.proto Top
-
-
-
-
- QueryChannelClientStateRequest
- QueryChannelClientStateRequest is the request type for the Query/ClientState
RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
-
-
-
-
-
-
- QueryChannelClientStateResponse
- QueryChannelClientStateResponse is the Response type for the
Query/QueryChannelClientState RPC method
-
-
-
-
-
-
-
-
- QueryChannelConsensusStateRequest
- QueryChannelConsensusStateRequest is the request type for the
Query/ConsensusState RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
- revision_number
- uint64
-
- revision number of the consensus state
-
-
-
- revision_height
- uint64
-
- revision height of the consensus state
-
-
-
-
-
-
-
-
-
- QueryChannelConsensusStateResponse
- QueryChannelClientStateResponse is the Response type for the
Query/QueryChannelClientState RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- consensus_state
- google.protobuf.Any
-
- consensus state associated with the channel
-
-
-
- client_id
- string
-
- client ID associated with the consensus state
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryChannelRequest
- QueryChannelRequest is the request type for the Query/Channel RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
-
-
-
-
-
-
- QueryChannelResponse
- QueryChannelResponse is the response type for the Query/Channel RPC method.
Besides the Channel end, it includes a proof and the height from which the
proof was retrieved.
-
-
-
-
- Field Type Label Description
-
-
-
-
- channel
- Channel
-
- channel associated with the request identifiers
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryChannelsRequest
- QueryChannelsRequest is the request type for the Query/Channels RPC method
-
-
-
-
-
-
-
-
- QueryChannelsResponse
- QueryChannelsResponse is the response type for the Query/Channels RPC method.
-
-
-
-
-
-
-
-
- QueryConnectionChannelsRequest
- QueryConnectionChannelsRequest is the request type for the
Query/QueryConnectionChannels RPC method
-
-
-
-
-
-
-
-
- QueryConnectionChannelsResponse
- QueryConnectionChannelsResponse is the Response type for the
Query/QueryConnectionChannels RPC method
-
-
-
-
-
-
-
-
- QueryNextSequenceReceiveRequest
- QueryNextSequenceReceiveRequest is the request type for the
Query/QueryNextSequenceReceiveRequest RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
-
-
-
-
-
-
- QueryNextSequenceReceiveResponse
- QuerySequenceResponse is the request type for the
Query/QueryNextSequenceReceiveResponse RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- next_sequence_receive
- uint64
-
- next sequence receive number
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryPacketAcknowledgementRequest
- QueryPacketAcknowledgementRequest is the request type for the
Query/PacketAcknowledgement RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
- sequence
- uint64
-
- packet sequence
-
-
-
-
-
-
-
-
-
- QueryPacketAcknowledgementResponse
- QueryPacketAcknowledgementResponse defines the client query response for a
packet which also includes a proof and the height from which the
proof was retrieved
-
-
-
-
- Field Type Label Description
-
-
-
-
- acknowledgement
- bytes
-
- packet associated with the request fields
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryPacketAcknowledgementsRequest
- QueryPacketAcknowledgementsRequest is the request type for the
Query/QueryPacketCommitments RPC method
-
-
-
-
-
-
-
-
- QueryPacketAcknowledgementsResponse
- QueryPacketAcknowledgemetsResponse is the request type for the
Query/QueryPacketAcknowledgements RPC method
-
-
-
-
-
-
-
-
- QueryPacketCommitmentRequest
- QueryPacketCommitmentRequest is the request type for the
Query/PacketCommitment RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
- sequence
- uint64
-
- packet sequence
-
-
-
-
-
-
-
-
-
- QueryPacketCommitmentResponse
- QueryPacketCommitmentResponse defines the client query response for a packet
which also includes a proof and the height from which the proof was
retrieved
-
-
-
-
- Field Type Label Description
-
-
-
-
- commitment
- bytes
-
- packet associated with the request fields
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryPacketCommitmentsRequest
- QueryPacketCommitmentsRequest is the request type for the
Query/QueryPacketCommitments RPC method
-
-
-
-
-
-
-
-
- QueryPacketCommitmentsResponse
- QueryPacketCommitmentsResponse is the request type for the
Query/QueryPacketCommitments RPC method
-
-
-
-
-
-
-
-
- QueryPacketReceiptRequest
- QueryPacketReceiptRequest is the request type for the
Query/PacketReceipt RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
- sequence
- uint64
-
- packet sequence
-
-
-
-
-
-
-
-
-
- QueryPacketReceiptResponse
- QueryPacketReceiptResponse defines the client query response for a packet
receipt which also includes a proof, and the height from which the proof was
retrieved
-
-
-
-
- Field Type Label Description
-
-
-
-
- received
- bool
-
- success flag for if receipt exists
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryUnreceivedAcksRequest
- QueryUnreceivedAcks is the request type for the
Query/UnreceivedAcks RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
- packet_ack_sequences
- uint64
- repeated
- list of acknowledgement sequences
-
-
-
-
-
-
-
-
-
- QueryUnreceivedAcksResponse
- QueryUnreceivedAcksResponse is the response type for the
Query/UnreceivedAcks RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- sequences
- uint64
- repeated
- list of unreceived acknowledgement sequences
-
-
-
- height
- ibc.core.client.v1.Height
-
- query block height
-
-
-
-
-
-
-
-
-
- QueryUnreceivedPacketsRequest
- QueryUnreceivedPacketsRequest is the request type for the
Query/UnreceivedPackets RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
- port unique identifier
-
-
-
- channel_id
- string
-
- channel unique identifier
-
-
-
- packet_commitment_sequences
- uint64
- repeated
- list of packet sequences
-
-
-
-
-
-
-
-
-
- QueryUnreceivedPacketsResponse
- QueryUnreceivedPacketsResponse is the response type for the
Query/UnreceivedPacketCommitments RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- sequences
- uint64
- repeated
- list of unreceived packet sequences
-
-
-
- height
- ibc.core.client.v1.Height
-
- query block height
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Query
- Query provides defines the gRPC querier service
-
-
-
-
-
- Methods with HTTP bindings
-
-
-
- Method Name
- Method
- Pattern
- Body
-
-
-
-
-
-
-
- Channel
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}
-
-
-
-
-
-
-
- Channels
- GET
- /ibc/core/channel/v1/channels
-
-
-
-
-
-
-
- ConnectionChannels
- GET
- /ibc/core/channel/v1/connections/{connection}/channels
-
-
-
-
-
-
-
- ChannelClientState
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/client_state
-
-
-
-
-
-
-
- ChannelConsensusState
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/consensus_state/revision/{revision_number}/height/{revision_height}
-
-
-
-
-
-
-
- PacketCommitment
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{sequence}
-
-
-
-
-
-
-
- PacketCommitments
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments
-
-
-
-
-
-
-
- PacketReceipt
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_receipts/{sequence}
-
-
-
-
-
-
-
- PacketAcknowledgement
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acks/{sequence}
-
-
-
-
-
-
-
- PacketAcknowledgements
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acknowledgements
-
-
-
-
-
-
-
- UnreceivedPackets
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_commitment_sequences}/unreceived_packets
-
-
-
-
-
-
-
- UnreceivedAcks
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_ack_sequences}/unreceived_acks
-
-
-
-
-
-
-
- NextSequenceReceive
- GET
- /ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/next_sequence
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/channel/v1/tx.proto Top
-
-
-
-
- MsgAcknowledgement
- MsgAcknowledgement receives incoming IBC acknowledgement
-
-
-
-
-
-
-
-
- MsgAcknowledgementResponse
- MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.
-
-
-
-
-
- MsgChannelCloseConfirm
- MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
to acknowledge the change of channel state to CLOSED on Chain A.
-
-
-
-
-
-
-
-
- MsgChannelCloseConfirmResponse
- MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response
type.
-
-
-
-
-
- MsgChannelCloseInit
- MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
to close a channel with Chain B.
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
-
-
-
-
- channel_id
- string
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgChannelCloseInitResponse
- MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
-
-
-
-
-
- MsgChannelOpenAck
- MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
the change of channel state to TRYOPEN on Chain B.
-
-
-
-
-
-
-
-
- MsgChannelOpenAckResponse
- MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.
-
-
-
-
-
- MsgChannelOpenConfirm
- MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
acknowledge the change of channel state to OPEN on Chain A.
-
-
-
-
-
-
-
-
- MsgChannelOpenConfirmResponse
- MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response
type.
-
-
-
-
-
- MsgChannelOpenInit
- MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
is called by a relayer on Chain A.
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
-
-
-
-
- channel
- Channel
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgChannelOpenInitResponse
- MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
-
-
-
-
-
- MsgChannelOpenTry
- MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
on Chain B.
-
-
-
-
- Field Type Label Description
-
-
-
-
- port_id
- string
-
-
-
-
-
- previous_channel_id
- string
-
- in the case of crossing hello's, when both chains call OpenInit, we need
-the channel identifier of the previous channel in state INIT
-
-
-
- channel
- Channel
-
-
-
-
-
- counterparty_version
- string
-
-
-
-
-
- proof_init
- bytes
-
-
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgChannelOpenTryResponse
- MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.
-
-
-
-
-
- MsgRecvPacket
- MsgRecvPacket receives incoming IBC packet
-
-
-
-
-
-
-
-
- MsgRecvPacketResponse
- MsgRecvPacketResponse defines the Msg/RecvPacket response type.
-
-
-
-
-
- MsgTimeout
- MsgTimeout receives timed-out packet
-
-
-
-
-
-
-
-
- MsgTimeoutOnClose
- MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
-
-
-
-
-
-
-
-
- MsgTimeoutOnCloseResponse
- MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
-
-
-
-
-
- MsgTimeoutResponse
- MsgTimeoutResponse defines the Msg/Timeout response type.
-
-
-
-
-
-
-
-
-
-
-
- Msg
- Msg defines the ibc/channel Msg service.
-
-
-
-
-
-
-
ibc/core/client/v1/genesis.proto Top
-
-
-
-
-
- GenesisMetadata defines the genesis type for metadata that clients may return
with ExportMetadata
-
-
-
-
- Field Type Label Description
-
-
-
-
- key
- bytes
-
- store key of metadata without clientID-prefix
-
-
-
- value
- bytes
-
- metadata value
-
-
-
-
-
-
-
-
-
- GenesisState
- GenesisState defines the ibc client submodule's genesis state.
-
-
-
-
- Field Type Label Description
-
-
-
-
- clients
- IdentifiedClientState
- repeated
- client states with their corresponding identifiers
-
-
-
- clients_consensus
- ClientConsensusStates
- repeated
- consensus states from each client
-
-
-
- clients_metadata
- IdentifiedGenesisMetadata
- repeated
- metadata from each client
-
-
-
- params
- Params
-
-
-
-
-
- create_localhost
- bool
-
- create localhost on initialization
-
-
-
- next_client_sequence
- uint64
-
- the sequence for the next generated client identifier
-
-
-
-
-
-
-
-
-
-
- IdentifiedGenesisMetadata has the client metadata with the corresponding
client id.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
-
-
-
-
- client_metadata
- GenesisMetadata
- repeated
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/client/v1/query.proto Top
-
-
-
-
- QueryClientParamsRequest
- QueryClientParamsRequest is the request type for the Query/ClientParams RPC
method.
-
-
-
-
-
- QueryClientParamsResponse
- QueryClientParamsResponse is the response type for the Query/ClientParams RPC
method.
-
-
-
-
- Field Type Label Description
-
-
-
-
- params
- Params
-
- params defines the parameters of the module.
-
-
-
-
-
-
-
-
-
- QueryClientStateRequest
- QueryClientStateRequest is the request type for the Query/ClientState RPC
method
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client state unique identifier
-
-
-
-
-
-
-
-
-
- QueryClientStateResponse
- QueryClientStateResponse is the response type for the Query/ClientState RPC
method. Besides the client state, it includes a proof and the height from
which the proof was retrieved.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_state
- google.protobuf.Any
-
- client state associated with the request identifier
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryClientStatesRequest
- QueryClientStatesRequest is the request type for the Query/ClientStates RPC
method
-
-
-
-
-
-
-
-
- QueryClientStatesResponse
- QueryClientStatesResponse is the response type for the Query/ClientStates RPC
method.
-
-
-
-
-
-
-
-
- QueryClientStatusRequest
- QueryClientStatusRequest is the request type for the Query/ClientStatus RPC
method
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client unique identifier
-
-
-
-
-
-
-
-
-
- QueryClientStatusResponse
- QueryClientStatusResponse is the response type for the Query/ClientStatus RPC
method. It returns the current status of the IBC client.
-
-
-
-
- Field Type Label Description
-
-
-
-
- status
- string
-
-
-
-
-
-
-
-
-
-
-
- QueryConsensusStateRequest
- QueryConsensusStateRequest is the request type for the Query/ConsensusState
RPC method. Besides the consensus state, it includes a proof and the height
from which the proof was retrieved.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client identifier
-
-
-
- revision_number
- uint64
-
- consensus state revision number
-
-
-
- revision_height
- uint64
-
- consensus state revision height
-
-
-
- latest_height
- bool
-
- latest_height overrrides the height field and queries the latest stored
-ConsensusState
-
-
-
-
-
-
-
-
-
- QueryConsensusStateResponse
- QueryConsensusStateResponse is the response type for the Query/ConsensusState
RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- consensus_state
- google.protobuf.Any
-
- consensus state associated with the client identifier at the given height
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryConsensusStatesRequest
- QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
RPC method.
-
-
-
-
-
-
-
-
- QueryConsensusStatesResponse
- QueryConsensusStatesResponse is the response type for the
Query/ConsensusStates RPC method
-
-
-
-
-
-
-
-
- QueryUpgradedClientStateRequest
- QueryUpgradedClientStateRequest is the request type for the
Query/UpgradedClientState RPC method
-
-
-
-
-
- QueryUpgradedClientStateResponse
- QueryUpgradedClientStateResponse is the response type for the
Query/UpgradedClientState RPC method.
-
-
-
-
- Field Type Label Description
-
-
-
-
- upgraded_client_state
- google.protobuf.Any
-
- client state associated with the request identifier
-
-
-
-
-
-
-
-
-
- QueryUpgradedConsensusStateRequest
- QueryUpgradedConsensusStateRequest is the request type for the
Query/UpgradedConsensusState RPC method
-
-
-
-
-
- QueryUpgradedConsensusStateResponse
- QueryUpgradedConsensusStateResponse is the response type for the
Query/UpgradedConsensusState RPC method.
-
-
-
-
- Field Type Label Description
-
-
-
-
- upgraded_consensus_state
- google.protobuf.Any
-
- Consensus state associated with the request identifier
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Query
- Query provides defines the gRPC querier service
-
-
-
-
-
- Methods with HTTP bindings
-
-
-
- Method Name
- Method
- Pattern
- Body
-
-
-
-
-
-
-
- ClientState
- GET
- /ibc/core/client/v1/client_states/{client_id}
-
-
-
-
-
-
-
- ClientStates
- GET
- /ibc/core/client/v1/client_states
-
-
-
-
-
-
-
- ConsensusState
- GET
- /ibc/core/client/v1/consensus_states/{client_id}/revision/{revision_number}/height/{revision_height}
-
-
-
-
-
-
-
- ConsensusStates
- GET
- /ibc/core/client/v1/consensus_states/{client_id}
-
-
-
-
-
-
-
- ClientStatus
- GET
- /ibc/core/client/v1/client_status/{client_id}
-
-
-
-
-
-
-
- ClientParams
- GET
- /ibc/client/v1/params
-
-
-
-
-
-
-
- UpgradedClientState
- GET
- /ibc/core/client/v1/upgraded_client_states
-
-
-
-
-
-
-
- UpgradedConsensusState
- GET
- /ibc/core/client/v1/upgraded_consensus_states
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/client/v1/tx.proto Top
-
-
-
-
- MsgCreateClient
- MsgCreateClient defines a message to create an IBC client
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_state
- google.protobuf.Any
-
- light client state
-
-
-
- consensus_state
- google.protobuf.Any
-
- consensus state associated with the client that corresponds to a given
-height.
-
-
-
- signer
- string
-
- signer address
-
-
-
-
-
-
-
-
-
- MsgCreateClientResponse
- MsgCreateClientResponse defines the Msg/CreateClient response type.
-
-
-
-
-
- MsgSubmitMisbehaviour
- MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
light client misbehaviour.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client unique identifier
-
-
-
- misbehaviour
- google.protobuf.Any
-
- misbehaviour used for freezing the light client
-
-
-
- signer
- string
-
- signer address
-
-
-
-
-
-
-
-
-
- MsgSubmitMisbehaviourResponse
- MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response
type.
-
-
-
-
-
- MsgUpdateClient
- MsgUpdateClient defines an sdk.Msg to update a IBC client state using
the given header.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client unique identifier
-
-
-
- header
- google.protobuf.Any
-
- header to update the light client
-
-
-
- signer
- string
-
- signer address
-
-
-
-
-
-
-
-
-
- MsgUpdateClientResponse
- MsgUpdateClientResponse defines the Msg/UpdateClient response type.
-
-
-
-
-
- MsgUpgradeClient
- MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
state
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client unique identifier
-
-
-
- client_state
- google.protobuf.Any
-
- upgraded client state
-
-
-
- consensus_state
- google.protobuf.Any
-
- upgraded consensus state, only contains enough information to serve as a
-basis of trust in update logic
-
-
-
- proof_upgrade_client
- bytes
-
- proof that old chain committed to new client
-
-
-
- proof_upgrade_consensus_state
- bytes
-
- proof that old chain committed to new consensus state
-
-
-
- signer
- string
-
- signer address
-
-
-
-
-
-
-
-
-
- MsgUpgradeClientResponse
- MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.
-
-
-
-
-
-
-
-
-
-
-
- Msg
- Msg defines the ibc/client Msg service.
-
-
-
-
-
-
-
ibc/core/commitment/v1/commitment.proto Top
-
-
-
-
- MerklePath
- MerklePath is the path used to verify commitment proofs, which can be an
arbitrary structured object (defined by a commitment type).
MerklePath is represented from root-to-leaf
-
-
-
-
- Field Type Label Description
-
-
-
-
- key_path
- string
- repeated
-
-
-
-
-
-
-
-
-
-
- MerklePrefix
- MerklePrefix is merkle path prefixed to the key.
The constructed key from the Path and the key will be append(Path.KeyPath,
append(Path.KeyPrefix, key...))
-
-
-
-
- Field Type Label Description
-
-
-
-
- key_prefix
- bytes
-
-
-
-
-
-
-
-
-
-
-
- MerkleProof
- MerkleProof is a wrapper type over a chain of CommitmentProofs.
It demonstrates membership or non-membership for an element or set of
elements, verifiable in conjunction with a known commitment root. Proofs
should be succinct.
MerkleProofs are ordered from leaf-to-root
-
-
-
-
-
-
-
-
- MerkleRoot
- MerkleRoot defines a merkle root hash.
In the Cosmos SDK, the AppHash of a block header becomes the root.
-
-
-
-
- Field Type Label Description
-
-
-
-
- hash
- bytes
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/connection/v1/connection.proto Top
-
-
-
-
- ClientPaths
- ClientPaths define all the connection paths for a client state.
-
-
-
-
- Field Type Label Description
-
-
-
-
- paths
- string
- repeated
- list of connection paths
-
-
-
-
-
-
-
-
-
- ConnectionEnd
- ConnectionEnd defines a stateful object on a chain connected to another
separate one.
NOTE: there must only be 2 defined ConnectionEnds to establish
a connection between two chains.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client associated with this connection.
-
-
-
- versions
- Version
- repeated
- IBC version which can be utilised to determine encodings or protocols for
-channels or packets utilising this connection.
-
-
-
- state
- State
-
- current state of the connection end.
-
-
-
- counterparty
- Counterparty
-
- counterparty chain associated with this connection.
-
-
-
- delay_period
- uint64
-
- delay period that must pass before a consensus state can be used for
-packet-verification NOTE: delay period logic is only implemented by some
-clients.
-
-
-
-
-
-
-
-
-
- ConnectionPaths
- ConnectionPaths define all the connection paths for a given client state.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client state unique identifier
-
-
-
- paths
- string
- repeated
- list of connection paths
-
-
-
-
-
-
-
-
-
- Counterparty
- Counterparty defines the counterparty chain associated with a connection end.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- identifies the client on the counterparty chain associated with a given
-connection.
-
-
-
- connection_id
- string
-
- identifies the connection end on the counterparty chain associated with a
-given connection.
-
-
-
- prefix
- ibc.core.commitment.v1.MerklePrefix
-
- commitment merkle prefix of the counterparty chain.
-
-
-
-
-
-
-
-
-
- IdentifiedConnection
- IdentifiedConnection defines a connection with additional connection
identifier field.
-
-
-
-
- Field Type Label Description
-
-
-
-
- id
- string
-
- connection identifier.
-
-
-
- client_id
- string
-
- client associated with this connection.
-
-
-
- versions
- Version
- repeated
- IBC version which can be utilised to determine encodings or protocols for
-channels or packets utilising this connection
-
-
-
- state
- State
-
- current state of the connection end.
-
-
-
- counterparty
- Counterparty
-
- counterparty chain associated with this connection.
-
-
-
- delay_period
- uint64
-
- delay period associated with this connection.
-
-
-
-
-
-
-
-
-
- Version
- Version defines the versioning scheme used to negotiate the IBC verison in
the connection handshake.
-
-
-
-
- Field Type Label Description
-
-
-
-
- identifier
- string
-
- unique version identifier
-
-
-
- features
- string
- repeated
- list of features compatible with the specified identifier
-
-
-
-
-
-
-
-
-
-
-
- State
- State defines if a connection is in one of the following states:
INIT, TRYOPEN, OPEN or UNINITIALIZED.
-
-
- Name Number Description
-
-
-
-
- STATE_UNINITIALIZED_UNSPECIFIED
- 0
- Default State
-
-
-
- STATE_INIT
- 1
- A connection end has just started the opening handshake.
-
-
-
- STATE_TRYOPEN
- 2
- A connection end has acknowledged the handshake step on the counterparty
-chain.
-
-
-
- STATE_OPEN
- 3
- A connection end has completed the handshake.
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/connection/v1/genesis.proto Top
-
-
-
-
- GenesisState
- GenesisState defines the ibc connection submodule's genesis state.
-
-
-
-
- Field Type Label Description
-
-
-
-
- connections
- IdentifiedConnection
- repeated
-
-
-
-
- client_connection_paths
- ConnectionPaths
- repeated
-
-
-
-
- next_connection_sequence
- uint64
-
- the sequence for the next generated connection identifier
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/connection/v1/query.proto Top
-
-
-
-
- QueryClientConnectionsRequest
- QueryClientConnectionsRequest is the request type for the
Query/ClientConnections RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
- client identifier associated with a connection
-
-
-
-
-
-
-
-
-
- QueryClientConnectionsResponse
- QueryClientConnectionsResponse is the response type for the
Query/ClientConnections RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection_paths
- string
- repeated
- slice of all the connection paths associated with a client.
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was generated
-
-
-
-
-
-
-
-
-
- QueryConnectionClientStateRequest
- QueryConnectionClientStateRequest is the request type for the
Query/ConnectionClientState RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection_id
- string
-
- connection identifier
-
-
-
-
-
-
-
-
-
- QueryConnectionClientStateResponse
- QueryConnectionClientStateResponse is the response type for the
Query/ConnectionClientState RPC method
-
-
-
-
-
-
-
-
- QueryConnectionConsensusStateRequest
- QueryConnectionConsensusStateRequest is the request type for the
Query/ConnectionConsensusState RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection_id
- string
-
- connection identifier
-
-
-
- revision_number
- uint64
-
-
-
-
-
- revision_height
- uint64
-
-
-
-
-
-
-
-
-
-
-
- QueryConnectionConsensusStateResponse
- QueryConnectionConsensusStateResponse is the response type for the
Query/ConnectionConsensusState RPC method
-
-
-
-
- Field Type Label Description
-
-
-
-
- consensus_state
- google.protobuf.Any
-
- consensus state associated with the channel
-
-
-
- client_id
- string
-
- client ID associated with the consensus state
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryConnectionRequest
- QueryConnectionRequest is the request type for the Query/Connection RPC
method
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection_id
- string
-
- connection unique identifier
-
-
-
-
-
-
-
-
-
- QueryConnectionResponse
- QueryConnectionResponse is the response type for the Query/Connection RPC
method. Besides the connection end, it includes a proof and the height from
which the proof was retrieved.
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection
- ConnectionEnd
-
- connection associated with the request identifier
-
-
-
- proof
- bytes
-
- merkle proof of existence
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
- height at which the proof was retrieved
-
-
-
-
-
-
-
-
-
- QueryConnectionsRequest
- QueryConnectionsRequest is the request type for the Query/Connections RPC
method
-
-
-
-
-
-
-
-
- QueryConnectionsResponse
- QueryConnectionsResponse is the response type for the Query/Connections RPC
method.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Query
- Query provides defines the gRPC querier service
-
-
-
-
-
- Methods with HTTP bindings
-
-
-
- Method Name
- Method
- Pattern
- Body
-
-
-
-
-
-
-
- Connection
- GET
- /ibc/core/connection/v1/connections/{connection_id}
-
-
-
-
-
-
-
- Connections
- GET
- /ibc/core/connection/v1/connections
-
-
-
-
-
-
-
- ClientConnections
- GET
- /ibc/core/connection/v1/client_connections/{client_id}
-
-
-
-
-
-
-
- ConnectionClientState
- GET
- /ibc/core/connection/v1/connections/{connection_id}/client_state
-
-
-
-
-
-
-
- ConnectionConsensusState
- GET
- /ibc/core/connection/v1/connections/{connection_id}/consensus_state/revision/{revision_number}/height/{revision_height}
-
-
-
-
-
-
-
-
-
-
-
-
ibc/core/connection/v1/tx.proto Top
-
-
-
-
- MsgConnectionOpenAck
- MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
acknowledge the change of connection state to TRYOPEN on Chain B.
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection_id
- string
-
-
-
-
-
- counterparty_connection_id
- string
-
-
-
-
-
- version
- Version
-
-
-
-
-
- client_state
- google.protobuf.Any
-
-
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
-
-
-
-
- proof_try
- bytes
-
- proof of the initialization the connection on Chain B: `UNITIALIZED ->
-TRYOPEN`
-
-
-
- proof_client
- bytes
-
- proof of client state included in message
-
-
-
- proof_consensus
- bytes
-
- proof of client consensus state
-
-
-
- consensus_height
- ibc.core.client.v1.Height
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgConnectionOpenAckResponse
- MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.
-
-
-
-
-
- MsgConnectionOpenConfirm
- MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
acknowledge the change of connection state to OPEN on Chain A.
-
-
-
-
- Field Type Label Description
-
-
-
-
- connection_id
- string
-
-
-
-
-
- proof_ack
- bytes
-
- proof for the change of the connection state on Chain A: `INIT -> OPEN`
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgConnectionOpenConfirmResponse
- MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm
response type.
-
-
-
-
-
- MsgConnectionOpenInit
- MsgConnectionOpenInit defines the msg sent by an account on Chain A to
initialize a connection with Chain B.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
-
-
-
-
- counterparty
- Counterparty
-
-
-
-
-
- version
- Version
-
-
-
-
-
- delay_period
- uint64
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgConnectionOpenInitResponse
- MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
type.
-
-
-
-
-
- MsgConnectionOpenTry
- MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
connection on Chain B.
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
-
-
-
-
- previous_connection_id
- string
-
- in the case of crossing hello's, when both chains call OpenInit, we need
-the connection identifier of the previous connection in state INIT
-
-
-
- client_state
- google.protobuf.Any
-
-
-
-
-
- counterparty
- Counterparty
-
-
-
-
-
- delay_period
- uint64
-
-
-
-
-
- counterparty_versions
- Version
- repeated
-
-
-
-
- proof_height
- ibc.core.client.v1.Height
-
-
-
-
-
- proof_init
- bytes
-
- proof of the initialization the connection on Chain A: `UNITIALIZED ->
-INIT`
-
-
-
- proof_client
- bytes
-
- proof of client state included in message
-
-
-
- proof_consensus
- bytes
-
- proof of client consensus state
-
-
-
- consensus_height
- ibc.core.client.v1.Height
-
-
-
-
-
- signer
- string
-
-
-
-
-
-
-
-
-
-
-
- MsgConnectionOpenTryResponse
- MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.
-
-
-
-
-
-
-
-
-
-
-
- Msg
- Msg defines the ibc/connection Msg service.
-
-
-
-
-
-
-
ibc/core/types/v1/genesis.proto Top
-
-
-
-
- GenesisState
- GenesisState defines the ibc module's genesis state.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/lightclients/localhost/v1/localhost.proto Top
-
-
-
-
- ClientState
- ClientState defines a loopback (localhost) client. It requires (read-only)
access to keys outside the client prefix.
-
-
-
-
- Field Type Label Description
-
-
-
-
- chain_id
- string
-
- self chain ID
-
-
-
- height
- ibc.core.client.v1.Height
-
- self latest block height
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/lightclients/solomachine/v1/solomachine.proto Top
-
-
-
-
- ChannelStateData
- ChannelStateData returns the SignBytes data for channel state
verification.
-
-
-
-
-
-
-
-
- ClientState
- ClientState defines a solo machine client that tracks the current consensus
state and if the client is frozen.
-
-
-
-
- Field Type Label Description
-
-
-
-
- sequence
- uint64
-
- latest sequence of the client state
-
-
-
- frozen_sequence
- uint64
-
- frozen sequence of the solo machine
-
-
-
- consensus_state
- ConsensusState
-
-
-
-
-
- allow_update_after_proposal
- bool
-
- when set to true, will allow governance to update a solo machine client.
-The client will be unfrozen if it is frozen.
-
-
-
-
-
-
-
-
-
- ClientStateData
- ClientStateData returns the SignBytes data for client state verification.
-
-
-
-
-
-
-
-
- ConnectionStateData
- ConnectionStateData returns the SignBytes data for connection state
verification.
-
-
-
-
-
-
-
-
- ConsensusState
- ConsensusState defines a solo machine consensus state. The sequence of a
consensus state is contained in the "height" key used in storing the
consensus state.
-
-
-
-
- Field Type Label Description
-
-
-
-
- public_key
- google.protobuf.Any
-
- public key of the solo machine
-
-
-
- diversifier
- string
-
- diversifier allows the same public key to be re-used across different solo
-machine clients (potentially on different chains) without being considered
-misbehaviour.
-
-
-
- timestamp
- uint64
-
-
-
-
-
-
-
-
-
-
-
- ConsensusStateData
- ConsensusStateData returns the SignBytes data for consensus state
verification.
-
-
-
-
-
-
-
-
-
- Header defines a solo machine consensus header
-
-
-
-
- Field Type Label Description
-
-
-
-
- sequence
- uint64
-
- sequence to update solo machine public key at
-
-
-
- timestamp
- uint64
-
-
-
-
-
- signature
- bytes
-
-
-
-
-
- new_public_key
- google.protobuf.Any
-
-
-
-
-
- new_diversifier
- string
-
-
-
-
-
-
-
-
-
-
-
-
- HeaderData returns the SignBytes data for update verification.
-
-
-
-
- Field Type Label Description
-
-
-
-
- new_pub_key
- google.protobuf.Any
-
- header public key
-
-
-
- new_diversifier
- string
-
- header diversifier
-
-
-
-
-
-
-
-
-
- Misbehaviour
- Misbehaviour defines misbehaviour for a solo machine which consists
of a sequence and two signatures over different messages at that sequence.
-
-
-
-
-
-
-
-
- NextSequenceRecvData
- NextSequenceRecvData returns the SignBytes data for verification of the next
sequence to be received.
-
-
-
-
- Field Type Label Description
-
-
-
-
- path
- bytes
-
-
-
-
-
- next_seq_recv
- uint64
-
-
-
-
-
-
-
-
-
-
-
- PacketAcknowledgementData
- PacketAcknowledgementData returns the SignBytes data for acknowledgement
verification.
-
-
-
-
- Field Type Label Description
-
-
-
-
- path
- bytes
-
-
-
-
-
- acknowledgement
- bytes
-
-
-
-
-
-
-
-
-
-
-
- PacketCommitmentData
- PacketCommitmentData returns the SignBytes data for packet commitment
verification.
-
-
-
-
- Field Type Label Description
-
-
-
-
- path
- bytes
-
-
-
-
-
- commitment
- bytes
-
-
-
-
-
-
-
-
-
-
-
- PacketReceiptAbsenceData
- PacketReceiptAbsenceData returns the SignBytes data for
packet receipt absence verification.
-
-
-
-
- Field Type Label Description
-
-
-
-
- path
- bytes
-
-
-
-
-
-
-
-
-
-
-
- SignBytes
- SignBytes defines the signed bytes used for signature verification.
-
-
-
-
- Field Type Label Description
-
-
-
-
- sequence
- uint64
-
-
-
-
-
- timestamp
- uint64
-
-
-
-
-
- diversifier
- string
-
-
-
-
-
- data_type
- DataType
-
- type of the data used
-
-
-
- data
- bytes
-
- marshaled data
-
-
-
-
-
-
-
-
-
- SignatureAndData
- SignatureAndData contains a signature and the data signed over to create that
signature.
-
-
-
-
- Field Type Label Description
-
-
-
-
- signature
- bytes
-
-
-
-
-
- data_type
- DataType
-
-
-
-
-
- data
- bytes
-
-
-
-
-
- timestamp
- uint64
-
-
-
-
-
-
-
-
-
-
-
- TimestampedSignatureData
- TimestampedSignatureData contains the signature data and the timestamp of the
signature.
-
-
-
-
- Field Type Label Description
-
-
-
-
- signature_data
- bytes
-
-
-
-
-
- timestamp
- uint64
-
-
-
-
-
-
-
-
-
-
-
-
-
- DataType
- DataType defines the type of solo machine proof being created. This is done
to preserve uniqueness of different data sign byte encodings.
-
-
- Name Number Description
-
-
-
-
- DATA_TYPE_UNINITIALIZED_UNSPECIFIED
- 0
- Default State
-
-
-
- DATA_TYPE_CLIENT_STATE
- 1
- Data type for client state verification
-
-
-
- DATA_TYPE_CONSENSUS_STATE
- 2
- Data type for consensus state verification
-
-
-
- DATA_TYPE_CONNECTION_STATE
- 3
- Data type for connection state verification
-
-
-
- DATA_TYPE_CHANNEL_STATE
- 4
- Data type for channel state verification
-
-
-
- DATA_TYPE_PACKET_COMMITMENT
- 5
- Data type for packet commitment verification
-
-
-
- DATA_TYPE_PACKET_ACKNOWLEDGEMENT
- 6
- Data type for packet acknowledgement verification
-
-
-
- DATA_TYPE_PACKET_RECEIPT_ABSENCE
- 7
- Data type for packet receipt absence verification
-
-
-
- DATA_TYPE_NEXT_SEQUENCE_RECV
- 8
- Data type for next sequence recv verification
-
-
-
- DATA_TYPE_HEADER
- 9
- Data type for header verification
-
-
-
-
-
-
-
-
-
-
-
-
-
ibc/lightclients/tendermint/v1/tendermint.proto Top
-
-
-
-
- ClientState
- ClientState from Tendermint tracks the current validator set, latest height,
and a possible frozen height.
-
-
-
-
- Field Type Label Description
-
-
-
-
- chain_id
- string
-
-
-
-
-
- trust_level
- Fraction
-
-
-
-
-
- trusting_period
- google.protobuf.Duration
-
- duration of the period since the LastestTimestamp during which the
-submitted headers are valid for upgrade
-
-
-
- unbonding_period
- google.protobuf.Duration
-
- duration of the staking unbonding period
-
-
-
- max_clock_drift
- google.protobuf.Duration
-
- defines how much new (untrusted) header's Time can drift into the future.
-
-
-
- frozen_height
- ibc.core.client.v1.Height
-
- Block height when the client was frozen due to a misbehaviour
-
-
-
- latest_height
- ibc.core.client.v1.Height
-
- Latest height the client was updated to
-
-
-
- proof_specs
- ics23.ProofSpec
- repeated
- Proof specifications used in verifying counterparty state
-
-
-
- upgrade_path
- string
- repeated
- Path at which next upgraded client will be committed.
-Each element corresponds to the key for a single CommitmentProof in the
-chained proof. NOTE: ClientState must stored under
-`{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored
-under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using
-the default upgrade module, upgrade_path should be []string{"upgrade",
-"upgradedIBCState"}`
-
-
-
- allow_update_after_expiry
- bool
-
- This flag, when set to true, will allow governance to recover a client
-which has expired
-
-
-
- allow_update_after_misbehaviour
- bool
-
- This flag, when set to true, will allow governance to unfreeze a client
-whose chain has experienced a misbehaviour event
-
-
-
-
-
-
-
-
-
- ConsensusState
- ConsensusState defines the consensus state from Tendermint.
-
-
-
-
-
-
-
-
- Fraction
- Fraction defines the protobuf message type for tmmath.Fraction that only
supports positive values.
-
-
-
-
- Field Type Label Description
-
-
-
-
- numerator
- uint64
-
-
-
-
-
- denominator
- uint64
-
-
-
-
-
-
-
-
-
-
-
-
- Header defines the Tendermint client consensus Header.
It encapsulates all the information necessary to update from a trusted
Tendermint ConsensusState. The inclusion of TrustedHeight and
TrustedValidators allows this update to process correctly, so long as the
ConsensusState for the TrustedHeight exists, this removes race conditions
among relayers The SignedHeader and ValidatorSet are the new untrusted update
fields for the client. The TrustedHeight is the height of a stored
ConsensusState on the client that will be used to verify the new untrusted
header. The Trusted ConsensusState must be within the unbonding period of
current time in order to correctly verify, and the TrustedValidators must
hash to TrustedConsensusState.NextValidatorsHash since that is the last
trusted validator set at the TrustedHeight.
-
-
-
-
-
-
-
-
- Misbehaviour
- Misbehaviour is a wrapper over two conflicting Headers
that implements Misbehaviour interface expected by ICS-02
-
-
-
-
- Field Type Label Description
-
-
-
-
- client_id
- string
-
-
-
-
-
- header_1
- Header
-
-
-
-
-
- header_2
- Header
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Scalar Value Types
-
-
- .proto Type Notes C++ Java Python Go C# PHP Ruby
-
-
-
-
- double
-
- double
- double
- float
- float64
- double
- float
- Float
-
-
-
- float
-
- float
- float
- float
- float32
- float
- float
- Float
-
-
-
- int32
- Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead.
- int32
- int
- int
- int32
- int
- integer
- Bignum or Fixnum (as required)
-
-
-
- int64
- Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead.
- int64
- long
- int/long
- int64
- long
- integer/string
- Bignum
-
-
-
- uint32
- Uses variable-length encoding.
- uint32
- int
- int/long
- uint32
- uint
- integer
- Bignum or Fixnum (as required)
-
-
-
- uint64
- Uses variable-length encoding.
- uint64
- long
- int/long
- uint64
- ulong
- integer/string
- Bignum or Fixnum (as required)
-
-
-
- sint32
- Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s.
- int32
- int
- int
- int32
- int
- integer
- Bignum or Fixnum (as required)
-
-
-
- sint64
- Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s.
- int64
- long
- int/long
- int64
- long
- integer/string
- Bignum
-
-
-
- fixed32
- Always four bytes. More efficient than uint32 if values are often greater than 2^28.
- uint32
- int
- int
- uint32
- uint
- integer
- Bignum or Fixnum (as required)
-
-
-
- fixed64
- Always eight bytes. More efficient than uint64 if values are often greater than 2^56.
- uint64
- long
- int/long
- uint64
- ulong
- integer/string
- Bignum
-
-
-
- sfixed32
- Always four bytes.
- int32
- int
- int
- int32
- int
- integer
- Bignum or Fixnum (as required)
-
-
-
- sfixed64
- Always eight bytes.
- int64
- long
- int/long
- int64
- long
- integer/string
- Bignum
-
-
-
- bool
-
- bool
- boolean
- boolean
- bool
- bool
- boolean
- TrueClass/FalseClass
-
-
-
- string
- A string must always contain UTF-8 encoded or 7-bit ASCII text.
- string
- String
- str/unicode
- string
- string
- string
- String (UTF-8)
-
-
-
- bytes
- May contain any arbitrary sequence of bytes.
- string
- ByteString
- str
- []byte
- ByteString
- string
- String (ASCII-8BIT)
-
-
-
-
-
-
+
+# Protobuf Documentation
+
+
+## Table of Contents
+
+- [ibc/apps/transfer/v1/transfer.proto](#ibc/apps/transfer/v1/transfer.proto)
+ - [DenomTrace](#ibc.apps.transfer.v1.DenomTrace)
+ - [FungibleTokenPacketData](#ibc.apps.transfer.v1.FungibleTokenPacketData)
+ - [Params](#ibc.apps.transfer.v1.Params)
+
+- [ibc/apps/transfer/v1/genesis.proto](#ibc/apps/transfer/v1/genesis.proto)
+ - [GenesisState](#ibc.apps.transfer.v1.GenesisState)
+
+- [ibc/apps/transfer/v1/query.proto](#ibc/apps/transfer/v1/query.proto)
+ - [QueryDenomTraceRequest](#ibc.apps.transfer.v1.QueryDenomTraceRequest)
+ - [QueryDenomTraceResponse](#ibc.apps.transfer.v1.QueryDenomTraceResponse)
+ - [QueryDenomTracesRequest](#ibc.apps.transfer.v1.QueryDenomTracesRequest)
+ - [QueryDenomTracesResponse](#ibc.apps.transfer.v1.QueryDenomTracesResponse)
+ - [QueryParamsRequest](#ibc.apps.transfer.v1.QueryParamsRequest)
+ - [QueryParamsResponse](#ibc.apps.transfer.v1.QueryParamsResponse)
+
+ - [Query](#ibc.apps.transfer.v1.Query)
+
+- [ibc/core/client/v1/client.proto](#ibc/core/client/v1/client.proto)
+ - [ClientConsensusStates](#ibc.core.client.v1.ClientConsensusStates)
+ - [ClientUpdateProposal](#ibc.core.client.v1.ClientUpdateProposal)
+ - [ConsensusStateWithHeight](#ibc.core.client.v1.ConsensusStateWithHeight)
+ - [Height](#ibc.core.client.v1.Height)
+ - [IdentifiedClientState](#ibc.core.client.v1.IdentifiedClientState)
+ - [Params](#ibc.core.client.v1.Params)
+ - [UpgradeProposal](#ibc.core.client.v1.UpgradeProposal)
+
+- [ibc/apps/transfer/v1/tx.proto](#ibc/apps/transfer/v1/tx.proto)
+ - [MsgTransfer](#ibc.apps.transfer.v1.MsgTransfer)
+ - [MsgTransferResponse](#ibc.apps.transfer.v1.MsgTransferResponse)
+
+ - [Msg](#ibc.apps.transfer.v1.Msg)
+
+- [ibc/core/channel/v1/channel.proto](#ibc/core/channel/v1/channel.proto)
+ - [Acknowledgement](#ibc.core.channel.v1.Acknowledgement)
+ - [Channel](#ibc.core.channel.v1.Channel)
+ - [Counterparty](#ibc.core.channel.v1.Counterparty)
+ - [IdentifiedChannel](#ibc.core.channel.v1.IdentifiedChannel)
+ - [Packet](#ibc.core.channel.v1.Packet)
+ - [PacketState](#ibc.core.channel.v1.PacketState)
+
+ - [Order](#ibc.core.channel.v1.Order)
+ - [State](#ibc.core.channel.v1.State)
+
+- [ibc/core/channel/v1/genesis.proto](#ibc/core/channel/v1/genesis.proto)
+ - [GenesisState](#ibc.core.channel.v1.GenesisState)
+ - [PacketSequence](#ibc.core.channel.v1.PacketSequence)
+
+- [ibc/core/channel/v1/query.proto](#ibc/core/channel/v1/query.proto)
+ - [QueryChannelClientStateRequest](#ibc.core.channel.v1.QueryChannelClientStateRequest)
+ - [QueryChannelClientStateResponse](#ibc.core.channel.v1.QueryChannelClientStateResponse)
+ - [QueryChannelConsensusStateRequest](#ibc.core.channel.v1.QueryChannelConsensusStateRequest)
+ - [QueryChannelConsensusStateResponse](#ibc.core.channel.v1.QueryChannelConsensusStateResponse)
+ - [QueryChannelRequest](#ibc.core.channel.v1.QueryChannelRequest)
+ - [QueryChannelResponse](#ibc.core.channel.v1.QueryChannelResponse)
+ - [QueryChannelsRequest](#ibc.core.channel.v1.QueryChannelsRequest)
+ - [QueryChannelsResponse](#ibc.core.channel.v1.QueryChannelsResponse)
+ - [QueryConnectionChannelsRequest](#ibc.core.channel.v1.QueryConnectionChannelsRequest)
+ - [QueryConnectionChannelsResponse](#ibc.core.channel.v1.QueryConnectionChannelsResponse)
+ - [QueryNextSequenceReceiveRequest](#ibc.core.channel.v1.QueryNextSequenceReceiveRequest)
+ - [QueryNextSequenceReceiveResponse](#ibc.core.channel.v1.QueryNextSequenceReceiveResponse)
+ - [QueryPacketAcknowledgementRequest](#ibc.core.channel.v1.QueryPacketAcknowledgementRequest)
+ - [QueryPacketAcknowledgementResponse](#ibc.core.channel.v1.QueryPacketAcknowledgementResponse)
+ - [QueryPacketAcknowledgementsRequest](#ibc.core.channel.v1.QueryPacketAcknowledgementsRequest)
+ - [QueryPacketAcknowledgementsResponse](#ibc.core.channel.v1.QueryPacketAcknowledgementsResponse)
+ - [QueryPacketCommitmentRequest](#ibc.core.channel.v1.QueryPacketCommitmentRequest)
+ - [QueryPacketCommitmentResponse](#ibc.core.channel.v1.QueryPacketCommitmentResponse)
+ - [QueryPacketCommitmentsRequest](#ibc.core.channel.v1.QueryPacketCommitmentsRequest)
+ - [QueryPacketCommitmentsResponse](#ibc.core.channel.v1.QueryPacketCommitmentsResponse)
+ - [QueryPacketReceiptRequest](#ibc.core.channel.v1.QueryPacketReceiptRequest)
+ - [QueryPacketReceiptResponse](#ibc.core.channel.v1.QueryPacketReceiptResponse)
+ - [QueryUnreceivedAcksRequest](#ibc.core.channel.v1.QueryUnreceivedAcksRequest)
+ - [QueryUnreceivedAcksResponse](#ibc.core.channel.v1.QueryUnreceivedAcksResponse)
+ - [QueryUnreceivedPacketsRequest](#ibc.core.channel.v1.QueryUnreceivedPacketsRequest)
+ - [QueryUnreceivedPacketsResponse](#ibc.core.channel.v1.QueryUnreceivedPacketsResponse)
+
+ - [Query](#ibc.core.channel.v1.Query)
+
+- [ibc/core/channel/v1/tx.proto](#ibc/core/channel/v1/tx.proto)
+ - [MsgAcknowledgement](#ibc.core.channel.v1.MsgAcknowledgement)
+ - [MsgAcknowledgementResponse](#ibc.core.channel.v1.MsgAcknowledgementResponse)
+ - [MsgChannelCloseConfirm](#ibc.core.channel.v1.MsgChannelCloseConfirm)
+ - [MsgChannelCloseConfirmResponse](#ibc.core.channel.v1.MsgChannelCloseConfirmResponse)
+ - [MsgChannelCloseInit](#ibc.core.channel.v1.MsgChannelCloseInit)
+ - [MsgChannelCloseInitResponse](#ibc.core.channel.v1.MsgChannelCloseInitResponse)
+ - [MsgChannelOpenAck](#ibc.core.channel.v1.MsgChannelOpenAck)
+ - [MsgChannelOpenAckResponse](#ibc.core.channel.v1.MsgChannelOpenAckResponse)
+ - [MsgChannelOpenConfirm](#ibc.core.channel.v1.MsgChannelOpenConfirm)
+ - [MsgChannelOpenConfirmResponse](#ibc.core.channel.v1.MsgChannelOpenConfirmResponse)
+ - [MsgChannelOpenInit](#ibc.core.channel.v1.MsgChannelOpenInit)
+ - [MsgChannelOpenInitResponse](#ibc.core.channel.v1.MsgChannelOpenInitResponse)
+ - [MsgChannelOpenTry](#ibc.core.channel.v1.MsgChannelOpenTry)
+ - [MsgChannelOpenTryResponse](#ibc.core.channel.v1.MsgChannelOpenTryResponse)
+ - [MsgRecvPacket](#ibc.core.channel.v1.MsgRecvPacket)
+ - [MsgRecvPacketResponse](#ibc.core.channel.v1.MsgRecvPacketResponse)
+ - [MsgTimeout](#ibc.core.channel.v1.MsgTimeout)
+ - [MsgTimeoutOnClose](#ibc.core.channel.v1.MsgTimeoutOnClose)
+ - [MsgTimeoutOnCloseResponse](#ibc.core.channel.v1.MsgTimeoutOnCloseResponse)
+ - [MsgTimeoutResponse](#ibc.core.channel.v1.MsgTimeoutResponse)
+
+ - [Msg](#ibc.core.channel.v1.Msg)
+
+- [ibc/core/client/v1/genesis.proto](#ibc/core/client/v1/genesis.proto)
+ - [GenesisMetadata](#ibc.core.client.v1.GenesisMetadata)
+ - [GenesisState](#ibc.core.client.v1.GenesisState)
+ - [IdentifiedGenesisMetadata](#ibc.core.client.v1.IdentifiedGenesisMetadata)
+
+- [ibc/core/client/v1/query.proto](#ibc/core/client/v1/query.proto)
+ - [QueryClientParamsRequest](#ibc.core.client.v1.QueryClientParamsRequest)
+ - [QueryClientParamsResponse](#ibc.core.client.v1.QueryClientParamsResponse)
+ - [QueryClientStateRequest](#ibc.core.client.v1.QueryClientStateRequest)
+ - [QueryClientStateResponse](#ibc.core.client.v1.QueryClientStateResponse)
+ - [QueryClientStatesRequest](#ibc.core.client.v1.QueryClientStatesRequest)
+ - [QueryClientStatesResponse](#ibc.core.client.v1.QueryClientStatesResponse)
+ - [QueryClientStatusRequest](#ibc.core.client.v1.QueryClientStatusRequest)
+ - [QueryClientStatusResponse](#ibc.core.client.v1.QueryClientStatusResponse)
+ - [QueryConsensusStateRequest](#ibc.core.client.v1.QueryConsensusStateRequest)
+ - [QueryConsensusStateResponse](#ibc.core.client.v1.QueryConsensusStateResponse)
+ - [QueryConsensusStatesRequest](#ibc.core.client.v1.QueryConsensusStatesRequest)
+ - [QueryConsensusStatesResponse](#ibc.core.client.v1.QueryConsensusStatesResponse)
+ - [QueryUpgradedClientStateRequest](#ibc.core.client.v1.QueryUpgradedClientStateRequest)
+ - [QueryUpgradedClientStateResponse](#ibc.core.client.v1.QueryUpgradedClientStateResponse)
+ - [QueryUpgradedConsensusStateRequest](#ibc.core.client.v1.QueryUpgradedConsensusStateRequest)
+ - [QueryUpgradedConsensusStateResponse](#ibc.core.client.v1.QueryUpgradedConsensusStateResponse)
+
+ - [Query](#ibc.core.client.v1.Query)
+
+- [ibc/core/client/v1/tx.proto](#ibc/core/client/v1/tx.proto)
+ - [MsgCreateClient](#ibc.core.client.v1.MsgCreateClient)
+ - [MsgCreateClientResponse](#ibc.core.client.v1.MsgCreateClientResponse)
+ - [MsgSubmitMisbehaviour](#ibc.core.client.v1.MsgSubmitMisbehaviour)
+ - [MsgSubmitMisbehaviourResponse](#ibc.core.client.v1.MsgSubmitMisbehaviourResponse)
+ - [MsgUpdateClient](#ibc.core.client.v1.MsgUpdateClient)
+ - [MsgUpdateClientResponse](#ibc.core.client.v1.MsgUpdateClientResponse)
+ - [MsgUpgradeClient](#ibc.core.client.v1.MsgUpgradeClient)
+ - [MsgUpgradeClientResponse](#ibc.core.client.v1.MsgUpgradeClientResponse)
+
+ - [Msg](#ibc.core.client.v1.Msg)
+
+- [ibc/core/commitment/v1/commitment.proto](#ibc/core/commitment/v1/commitment.proto)
+ - [MerklePath](#ibc.core.commitment.v1.MerklePath)
+ - [MerklePrefix](#ibc.core.commitment.v1.MerklePrefix)
+ - [MerkleProof](#ibc.core.commitment.v1.MerkleProof)
+ - [MerkleRoot](#ibc.core.commitment.v1.MerkleRoot)
+
+- [ibc/core/connection/v1/connection.proto](#ibc/core/connection/v1/connection.proto)
+ - [ClientPaths](#ibc.core.connection.v1.ClientPaths)
+ - [ConnectionEnd](#ibc.core.connection.v1.ConnectionEnd)
+ - [ConnectionPaths](#ibc.core.connection.v1.ConnectionPaths)
+ - [Counterparty](#ibc.core.connection.v1.Counterparty)
+ - [IdentifiedConnection](#ibc.core.connection.v1.IdentifiedConnection)
+ - [Version](#ibc.core.connection.v1.Version)
+
+ - [State](#ibc.core.connection.v1.State)
+
+- [ibc/core/connection/v1/genesis.proto](#ibc/core/connection/v1/genesis.proto)
+ - [GenesisState](#ibc.core.connection.v1.GenesisState)
+
+- [ibc/core/connection/v1/query.proto](#ibc/core/connection/v1/query.proto)
+ - [QueryClientConnectionsRequest](#ibc.core.connection.v1.QueryClientConnectionsRequest)
+ - [QueryClientConnectionsResponse](#ibc.core.connection.v1.QueryClientConnectionsResponse)
+ - [QueryConnectionClientStateRequest](#ibc.core.connection.v1.QueryConnectionClientStateRequest)
+ - [QueryConnectionClientStateResponse](#ibc.core.connection.v1.QueryConnectionClientStateResponse)
+ - [QueryConnectionConsensusStateRequest](#ibc.core.connection.v1.QueryConnectionConsensusStateRequest)
+ - [QueryConnectionConsensusStateResponse](#ibc.core.connection.v1.QueryConnectionConsensusStateResponse)
+ - [QueryConnectionRequest](#ibc.core.connection.v1.QueryConnectionRequest)
+ - [QueryConnectionResponse](#ibc.core.connection.v1.QueryConnectionResponse)
+ - [QueryConnectionsRequest](#ibc.core.connection.v1.QueryConnectionsRequest)
+ - [QueryConnectionsResponse](#ibc.core.connection.v1.QueryConnectionsResponse)
+
+ - [Query](#ibc.core.connection.v1.Query)
+
+- [ibc/core/connection/v1/tx.proto](#ibc/core/connection/v1/tx.proto)
+ - [MsgConnectionOpenAck](#ibc.core.connection.v1.MsgConnectionOpenAck)
+ - [MsgConnectionOpenAckResponse](#ibc.core.connection.v1.MsgConnectionOpenAckResponse)
+ - [MsgConnectionOpenConfirm](#ibc.core.connection.v1.MsgConnectionOpenConfirm)
+ - [MsgConnectionOpenConfirmResponse](#ibc.core.connection.v1.MsgConnectionOpenConfirmResponse)
+ - [MsgConnectionOpenInit](#ibc.core.connection.v1.MsgConnectionOpenInit)
+ - [MsgConnectionOpenInitResponse](#ibc.core.connection.v1.MsgConnectionOpenInitResponse)
+ - [MsgConnectionOpenTry](#ibc.core.connection.v1.MsgConnectionOpenTry)
+ - [MsgConnectionOpenTryResponse](#ibc.core.connection.v1.MsgConnectionOpenTryResponse)
+
+ - [Msg](#ibc.core.connection.v1.Msg)
+
+- [ibc/core/types/v1/genesis.proto](#ibc/core/types/v1/genesis.proto)
+ - [GenesisState](#ibc.core.types.v1.GenesisState)
+
+- [ibc/lightclients/localhost/v1/localhost.proto](#ibc/lightclients/localhost/v1/localhost.proto)
+ - [ClientState](#ibc.lightclients.localhost.v1.ClientState)
+
+- [ibc/lightclients/solomachine/v1/solomachine.proto](#ibc/lightclients/solomachine/v1/solomachine.proto)
+ - [ChannelStateData](#ibc.lightclients.solomachine.v1.ChannelStateData)
+ - [ClientState](#ibc.lightclients.solomachine.v1.ClientState)
+ - [ClientStateData](#ibc.lightclients.solomachine.v1.ClientStateData)
+ - [ConnectionStateData](#ibc.lightclients.solomachine.v1.ConnectionStateData)
+ - [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState)
+ - [ConsensusStateData](#ibc.lightclients.solomachine.v1.ConsensusStateData)
+ - [Header](#ibc.lightclients.solomachine.v1.Header)
+ - [HeaderData](#ibc.lightclients.solomachine.v1.HeaderData)
+ - [Misbehaviour](#ibc.lightclients.solomachine.v1.Misbehaviour)
+ - [NextSequenceRecvData](#ibc.lightclients.solomachine.v1.NextSequenceRecvData)
+ - [PacketAcknowledgementData](#ibc.lightclients.solomachine.v1.PacketAcknowledgementData)
+ - [PacketCommitmentData](#ibc.lightclients.solomachine.v1.PacketCommitmentData)
+ - [PacketReceiptAbsenceData](#ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData)
+ - [SignBytes](#ibc.lightclients.solomachine.v1.SignBytes)
+ - [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData)
+ - [TimestampedSignatureData](#ibc.lightclients.solomachine.v1.TimestampedSignatureData)
+
+ - [DataType](#ibc.lightclients.solomachine.v1.DataType)
+
+- [ibc/lightclients/tendermint/v1/tendermint.proto](#ibc/lightclients/tendermint/v1/tendermint.proto)
+ - [ClientState](#ibc.lightclients.tendermint.v1.ClientState)
+ - [ConsensusState](#ibc.lightclients.tendermint.v1.ConsensusState)
+ - [Fraction](#ibc.lightclients.tendermint.v1.Fraction)
+ - [Header](#ibc.lightclients.tendermint.v1.Header)
+ - [Misbehaviour](#ibc.lightclients.tendermint.v1.Misbehaviour)
+
+- [Scalar Value Types](#scalar-value-types)
+
+
+
+
+Top
+
+## ibc/apps/transfer/v1/transfer.proto
+
+
+
+
+
+### DenomTrace
+DenomTrace contains the base denomination for ICS20 fungible tokens and the
+source tracing information path.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [string](#string) | | path defines the chain of port/channel identifiers used for tracing the source of the fungible token. |
+| `base_denom` | [string](#string) | | base denomination of the relayed fungible token. |
+
+
+
+
+
+
+
+
+### FungibleTokenPacketData
+FungibleTokenPacketData defines a struct for the packet payload
+See FungibleTokenPacketData spec:
+https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `denom` | [string](#string) | | the token denomination to be transferred |
+| `amount` | [uint64](#uint64) | | the token amount to be transferred |
+| `sender` | [string](#string) | | the sender address |
+| `receiver` | [string](#string) | | the recipient address on the destination chain |
+
+
+
+
+
+
+
+
+### Params
+Params defines the set of IBC transfer parameters.
+NOTE: To prevent a single token from being transferred, set the
+TransfersEnabled parameter to true and then set the bank module's SendEnabled
+parameter for the denomination to false.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `send_enabled` | [bool](#bool) | | send_enabled enables or disables all cross-chain token transfers from this chain. |
+| `receive_enabled` | [bool](#bool) | | receive_enabled enables or disables all cross-chain token transfers to this chain. |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/apps/transfer/v1/genesis.proto
+
+
+
+
+
+### GenesisState
+GenesisState defines the ibc-transfer genesis state
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `denom_traces` | [DenomTrace](#ibc.apps.transfer.v1.DenomTrace) | repeated | |
+| `params` | [Params](#ibc.apps.transfer.v1.Params) | | |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/apps/transfer/v1/query.proto
+
+
+
+
+
+### QueryDenomTraceRequest
+QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `hash` | [string](#string) | | hash (in hex format) of the denomination trace information. |
+
+
+
+
+
+
+
+
+### QueryDenomTraceResponse
+QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
+method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `denom_trace` | [DenomTrace](#ibc.apps.transfer.v1.DenomTrace) | | denom_trace returns the requested denomination trace information. |
+
+
+
+
+
+
+
+
+### QueryDenomTracesRequest
+QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination defines an optional pagination for the request. |
+
+
+
+
+
+
+
+
+### QueryDenomTracesResponse
+QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
+method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `denom_traces` | [DenomTrace](#ibc.apps.transfer.v1.DenomTrace) | repeated | denom_traces returns all denominations trace information. |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination defines the pagination in the response. |
+
+
+
+
+
+
+
+
+### QueryParamsRequest
+QueryParamsRequest is the request type for the Query/Params RPC method.
+
+
+
+
+
+
+
+
+### QueryParamsResponse
+QueryParamsResponse is the response type for the Query/Params RPC method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `params` | [Params](#ibc.apps.transfer.v1.Params) | | params defines the parameters of the module. |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Query
+Query provides defines the gRPC querier service.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `DenomTrace` | [QueryDenomTraceRequest](#ibc.apps.transfer.v1.QueryDenomTraceRequest) | [QueryDenomTraceResponse](#ibc.apps.transfer.v1.QueryDenomTraceResponse) | DenomTrace queries a denomination trace information. | GET|/ibc/apps/transfer/v1/denom_traces/{hash}|
+| `DenomTraces` | [QueryDenomTracesRequest](#ibc.apps.transfer.v1.QueryDenomTracesRequest) | [QueryDenomTracesResponse](#ibc.apps.transfer.v1.QueryDenomTracesResponse) | DenomTraces queries all denomination traces. | GET|/ibc/apps/transfer/v1/denom_traces|
+| `Params` | [QueryParamsRequest](#ibc.apps.transfer.v1.QueryParamsRequest) | [QueryParamsResponse](#ibc.apps.transfer.v1.QueryParamsResponse) | Params queries all parameters of the ibc-transfer module. | GET|/ibc/apps/transfer/v1/params|
+
+
+
+
+
+
+Top
+
+## ibc/core/client/v1/client.proto
+
+
+
+
+
+### ClientConsensusStates
+ClientConsensusStates defines all the stored consensus states for a given
+client.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client identifier |
+| `consensus_states` | [ConsensusStateWithHeight](#ibc.core.client.v1.ConsensusStateWithHeight) | repeated | consensus states and their heights associated with the client |
+
+
+
+
+
+
+
+
+### ClientUpdateProposal
+ClientUpdateProposal is a governance proposal. If it passes, the substitute
+client's consensus states starting from the 'initial height' are copied over
+to the subjects client state. The proposal handler may fail if the subject
+and the substitute do not match in client and chain parameters (with
+exception to latest height, frozen height, and chain-id). The updated client
+must also be valid (cannot be expired).
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `title` | [string](#string) | | the title of the update proposal |
+| `description` | [string](#string) | | the description of the proposal |
+| `subject_client_id` | [string](#string) | | the client identifier for the client to be updated if the proposal passes |
+| `substitute_client_id` | [string](#string) | | the substitute client identifier for the client standing in for the subject client |
+| `initial_height` | [Height](#ibc.core.client.v1.Height) | | the intital height to copy consensus states from the substitute to the subject |
+
+
+
+
+
+
+
+
+### ConsensusStateWithHeight
+ConsensusStateWithHeight defines a consensus state with an additional height
+field.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `height` | [Height](#ibc.core.client.v1.Height) | | consensus state height |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | consensus state |
+
+
+
+
+
+
+
+
+### Height
+Height is a monotonically increasing data type
+that can be compared against another Height for the purposes of updating and
+freezing clients
+
+Normally the RevisionHeight is incremented at each height while keeping
+RevisionNumber the same. However some consensus algorithms may choose to
+reset the height in certain conditions e.g. hard forks, state-machine
+breaking changes In these cases, the RevisionNumber is incremented so that
+height continues to be monitonically increasing even as the RevisionHeight
+gets reset
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `revision_number` | [uint64](#uint64) | | the revision that the client is currently on |
+| `revision_height` | [uint64](#uint64) | | the height within the given revision |
+
+
+
+
+
+
+
+
+### IdentifiedClientState
+IdentifiedClientState defines a client state with an additional client
+identifier field.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client identifier |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | client state |
+
+
+
+
+
+
+
+
+### Params
+Params defines the set of IBC light client parameters.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `allowed_clients` | [string](#string) | repeated | allowed_clients defines the list of allowed client state types. |
+
+
+
+
+
+
+
+
+### UpgradeProposal
+UpgradeProposal is a gov Content type for initiating an IBC breaking
+upgrade.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `title` | [string](#string) | | |
+| `description` | [string](#string) | | |
+| `plan` | [cosmos.upgrade.v1beta1.Plan](#cosmos.upgrade.v1beta1.Plan) | | |
+| `upgraded_client_state` | [google.protobuf.Any](#google.protobuf.Any) | | An UpgradedClientState must be provided to perform an IBC breaking upgrade. This will make the chain commit to the correct upgraded (self) client state before the upgrade occurs, so that connecting chains can verify that the new upgraded client is valid by verifying a proof on the previous version of the chain. This will allow IBC connections to persist smoothly across planned chain upgrades |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/apps/transfer/v1/tx.proto
+
+
+
+
+
+### MsgTransfer
+MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
+ICS20 enabled chains. See ICS Spec here:
+https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `source_port` | [string](#string) | | the port on which the packet will be sent |
+| `source_channel` | [string](#string) | | the channel by which the packet will be sent |
+| `token` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | the tokens to be transferred |
+| `sender` | [string](#string) | | the sender address |
+| `receiver` | [string](#string) | | the recipient address on the destination chain |
+| `timeout_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | Timeout height relative to the current block height. The timeout is disabled when set to 0. |
+| `timeout_timestamp` | [uint64](#uint64) | | Timeout timestamp (in nanoseconds) relative to the current block timestamp. The timeout is disabled when set to 0. |
+
+
+
+
+
+
+
+
+### MsgTransferResponse
+MsgTransferResponse defines the Msg/Transfer response type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Msg
+Msg defines the ibc/transfer Msg service.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `Transfer` | [MsgTransfer](#ibc.apps.transfer.v1.MsgTransfer) | [MsgTransferResponse](#ibc.apps.transfer.v1.MsgTransferResponse) | Transfer defines a rpc handler method for MsgTransfer. | |
+
+
+
+
+
+
+Top
+
+## ibc/core/channel/v1/channel.proto
+
+
+
+
+
+### Acknowledgement
+Acknowledgement is the recommended acknowledgement format to be used by
+app-specific protocols.
+NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental
+conflicts with other protobuf message formats used for acknowledgements.
+The first byte of any message with this format will be the non-ASCII values
+`0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:
+https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `result` | [bytes](#bytes) | | |
+| `error` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### Channel
+Channel defines pipeline for exactly-once packet delivery between specific
+modules on separate blockchains, which has at least one end capable of
+sending packets and one end capable of receiving packets.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `state` | [State](#ibc.core.channel.v1.State) | | current state of the channel end |
+| `ordering` | [Order](#ibc.core.channel.v1.Order) | | whether the channel is ordered or unordered |
+| `counterparty` | [Counterparty](#ibc.core.channel.v1.Counterparty) | | counterparty channel end |
+| `connection_hops` | [string](#string) | repeated | list of connection identifiers, in order, along which packets sent on this channel will travel |
+| `version` | [string](#string) | | opaque channel version, which is agreed upon during the handshake |
+
+
+
+
+
+
+
+
+### Counterparty
+Counterparty defines a channel end counterparty
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port on the counterparty chain which owns the other end of the channel. |
+| `channel_id` | [string](#string) | | channel end on the counterparty chain |
+
+
+
+
+
+
+
+
+### IdentifiedChannel
+IdentifiedChannel defines a channel with additional port and channel
+identifier fields.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `state` | [State](#ibc.core.channel.v1.State) | | current state of the channel end |
+| `ordering` | [Order](#ibc.core.channel.v1.Order) | | whether the channel is ordered or unordered |
+| `counterparty` | [Counterparty](#ibc.core.channel.v1.Counterparty) | | counterparty channel end |
+| `connection_hops` | [string](#string) | repeated | list of connection identifiers, in order, along which packets sent on this channel will travel |
+| `version` | [string](#string) | | opaque channel version, which is agreed upon during the handshake |
+| `port_id` | [string](#string) | | port identifier |
+| `channel_id` | [string](#string) | | channel identifier |
+
+
+
+
+
+
+
+
+### Packet
+Packet defines a type that carries data across different chains through IBC
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | number corresponds to the order of sends and receives, where a Packet with an earlier sequence number must be sent and received before a Packet with a later sequence number. |
+| `source_port` | [string](#string) | | identifies the port on the sending chain. |
+| `source_channel` | [string](#string) | | identifies the channel end on the sending chain. |
+| `destination_port` | [string](#string) | | identifies the port on the receiving chain. |
+| `destination_channel` | [string](#string) | | identifies the channel end on the receiving chain. |
+| `data` | [bytes](#bytes) | | actual opaque bytes transferred directly to the application module |
+| `timeout_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | block height after which the packet times out |
+| `timeout_timestamp` | [uint64](#uint64) | | block timestamp (in nanoseconds) after which the packet times out |
+
+
+
+
+
+
+
+
+### PacketState
+PacketState defines the generic type necessary to retrieve and store
+packet commitments, acknowledgements, and receipts.
+Caller is responsible for knowing the context necessary to interpret this
+state as a commitment, acknowledgement, or a receipt.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | channel port identifier. |
+| `channel_id` | [string](#string) | | channel unique identifier. |
+| `sequence` | [uint64](#uint64) | | packet sequence. |
+| `data` | [bytes](#bytes) | | embedded data that represents packet state. |
+
+
+
+
+
+
+
+
+
+
+### Order
+Order defines if a channel is ORDERED or UNORDERED
+
+| Name | Number | Description |
+| ---- | ------ | ----------- |
+| ORDER_NONE_UNSPECIFIED | 0 | zero-value for channel ordering |
+| ORDER_UNORDERED | 1 | packets can be delivered in any order, which may differ from the order in which they were sent. |
+| ORDER_ORDERED | 2 | packets are delivered exactly in the order which they were sent |
+
+
+
+
+
+### State
+State defines if a channel is in one of the following states:
+CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
+
+| Name | Number | Description |
+| ---- | ------ | ----------- |
+| STATE_UNINITIALIZED_UNSPECIFIED | 0 | Default State |
+| STATE_INIT | 1 | A channel has just started the opening handshake. |
+| STATE_TRYOPEN | 2 | A channel has acknowledged the handshake step on the counterparty chain. |
+| STATE_OPEN | 3 | A channel has completed the handshake. Open channels are ready to send and receive packets. |
+| STATE_CLOSED | 4 | A channel has been closed and can no longer be used to send or receive packets. |
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/core/channel/v1/genesis.proto
+
+
+
+
+
+### GenesisState
+GenesisState defines the ibc channel submodule's genesis state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `channels` | [IdentifiedChannel](#ibc.core.channel.v1.IdentifiedChannel) | repeated | |
+| `acknowledgements` | [PacketState](#ibc.core.channel.v1.PacketState) | repeated | |
+| `commitments` | [PacketState](#ibc.core.channel.v1.PacketState) | repeated | |
+| `receipts` | [PacketState](#ibc.core.channel.v1.PacketState) | repeated | |
+| `send_sequences` | [PacketSequence](#ibc.core.channel.v1.PacketSequence) | repeated | |
+| `recv_sequences` | [PacketSequence](#ibc.core.channel.v1.PacketSequence) | repeated | |
+| `ack_sequences` | [PacketSequence](#ibc.core.channel.v1.PacketSequence) | repeated | |
+| `next_channel_sequence` | [uint64](#uint64) | | the sequence for the next generated channel identifier |
+
+
+
+
+
+
+
+
+### PacketSequence
+PacketSequence defines the genesis type necessary to retrieve and store
+next send and receive sequences.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `channel_id` | [string](#string) | | |
+| `sequence` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/core/channel/v1/query.proto
+
+
+
+
+
+### QueryChannelClientStateRequest
+QueryChannelClientStateRequest is the request type for the Query/ClientState
+RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+
+
+
+
+
+
+
+
+### QueryChannelClientStateResponse
+QueryChannelClientStateResponse is the Response type for the
+Query/QueryChannelClientState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `identified_client_state` | [ibc.core.client.v1.IdentifiedClientState](#ibc.core.client.v1.IdentifiedClientState) | | client state associated with the channel |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryChannelConsensusStateRequest
+QueryChannelConsensusStateRequest is the request type for the
+Query/ConsensusState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `revision_number` | [uint64](#uint64) | | revision number of the consensus state |
+| `revision_height` | [uint64](#uint64) | | revision height of the consensus state |
+
+
+
+
+
+
+
+
+### QueryChannelConsensusStateResponse
+QueryChannelClientStateResponse is the Response type for the
+Query/QueryChannelClientState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | consensus state associated with the channel |
+| `client_id` | [string](#string) | | client ID associated with the consensus state |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryChannelRequest
+QueryChannelRequest is the request type for the Query/Channel RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+
+
+
+
+
+
+
+
+### QueryChannelResponse
+QueryChannelResponse is the response type for the Query/Channel RPC method.
+Besides the Channel end, it includes a proof and the height from which the
+proof was retrieved.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `channel` | [Channel](#ibc.core.channel.v1.Channel) | | channel associated with the request identifiers |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryChannelsRequest
+QueryChannelsRequest is the request type for the Query/Channels RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination request |
+
+
+
+
+
+
+
+
+### QueryChannelsResponse
+QueryChannelsResponse is the response type for the Query/Channels RPC method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `channels` | [IdentifiedChannel](#ibc.core.channel.v1.IdentifiedChannel) | repeated | list of stored channels of the chain. |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+### QueryConnectionChannelsRequest
+QueryConnectionChannelsRequest is the request type for the
+Query/QueryConnectionChannels RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection` | [string](#string) | | connection unique identifier |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination request |
+
+
+
+
+
+
+
+
+### QueryConnectionChannelsResponse
+QueryConnectionChannelsResponse is the Response type for the
+Query/QueryConnectionChannels RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `channels` | [IdentifiedChannel](#ibc.core.channel.v1.IdentifiedChannel) | repeated | list of channels associated with a connection. |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+### QueryNextSequenceReceiveRequest
+QueryNextSequenceReceiveRequest is the request type for the
+Query/QueryNextSequenceReceiveRequest RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+
+
+
+
+
+
+
+
+### QueryNextSequenceReceiveResponse
+QuerySequenceResponse is the request type for the
+Query/QueryNextSequenceReceiveResponse RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `next_sequence_receive` | [uint64](#uint64) | | next sequence receive number |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryPacketAcknowledgementRequest
+QueryPacketAcknowledgementRequest is the request type for the
+Query/PacketAcknowledgement RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `sequence` | [uint64](#uint64) | | packet sequence |
+
+
+
+
+
+
+
+
+### QueryPacketAcknowledgementResponse
+QueryPacketAcknowledgementResponse defines the client query response for a
+packet which also includes a proof and the height from which the
+proof was retrieved
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `acknowledgement` | [bytes](#bytes) | | packet associated with the request fields |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryPacketAcknowledgementsRequest
+QueryPacketAcknowledgementsRequest is the request type for the
+Query/QueryPacketCommitments RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination request |
+
+
+
+
+
+
+
+
+### QueryPacketAcknowledgementsResponse
+QueryPacketAcknowledgemetsResponse is the request type for the
+Query/QueryPacketAcknowledgements RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `acknowledgements` | [PacketState](#ibc.core.channel.v1.PacketState) | repeated | |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+### QueryPacketCommitmentRequest
+QueryPacketCommitmentRequest is the request type for the
+Query/PacketCommitment RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `sequence` | [uint64](#uint64) | | packet sequence |
+
+
+
+
+
+
+
+
+### QueryPacketCommitmentResponse
+QueryPacketCommitmentResponse defines the client query response for a packet
+which also includes a proof and the height from which the proof was
+retrieved
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `commitment` | [bytes](#bytes) | | packet associated with the request fields |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryPacketCommitmentsRequest
+QueryPacketCommitmentsRequest is the request type for the
+Query/QueryPacketCommitments RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination request |
+
+
+
+
+
+
+
+
+### QueryPacketCommitmentsResponse
+QueryPacketCommitmentsResponse is the request type for the
+Query/QueryPacketCommitments RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `commitments` | [PacketState](#ibc.core.channel.v1.PacketState) | repeated | |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+### QueryPacketReceiptRequest
+QueryPacketReceiptRequest is the request type for the
+Query/PacketReceipt RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `sequence` | [uint64](#uint64) | | packet sequence |
+
+
+
+
+
+
+
+
+### QueryPacketReceiptResponse
+QueryPacketReceiptResponse defines the client query response for a packet
+receipt which also includes a proof, and the height from which the proof was
+retrieved
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `received` | [bool](#bool) | | success flag for if receipt exists |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryUnreceivedAcksRequest
+QueryUnreceivedAcks is the request type for the
+Query/UnreceivedAcks RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `packet_ack_sequences` | [uint64](#uint64) | repeated | list of acknowledgement sequences |
+
+
+
+
+
+
+
+
+### QueryUnreceivedAcksResponse
+QueryUnreceivedAcksResponse is the response type for the
+Query/UnreceivedAcks RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequences` | [uint64](#uint64) | repeated | list of unreceived acknowledgement sequences |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+### QueryUnreceivedPacketsRequest
+QueryUnreceivedPacketsRequest is the request type for the
+Query/UnreceivedPackets RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | port unique identifier |
+| `channel_id` | [string](#string) | | channel unique identifier |
+| `packet_commitment_sequences` | [uint64](#uint64) | repeated | list of packet sequences |
+
+
+
+
+
+
+
+
+### QueryUnreceivedPacketsResponse
+QueryUnreceivedPacketsResponse is the response type for the
+Query/UnreceivedPacketCommitments RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequences` | [uint64](#uint64) | repeated | list of unreceived packet sequences |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Query
+Query provides defines the gRPC querier service
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `Channel` | [QueryChannelRequest](#ibc.core.channel.v1.QueryChannelRequest) | [QueryChannelResponse](#ibc.core.channel.v1.QueryChannelResponse) | Channel queries an IBC Channel. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}|
+| `Channels` | [QueryChannelsRequest](#ibc.core.channel.v1.QueryChannelsRequest) | [QueryChannelsResponse](#ibc.core.channel.v1.QueryChannelsResponse) | Channels queries all the IBC channels of a chain. | GET|/ibc/core/channel/v1/channels|
+| `ConnectionChannels` | [QueryConnectionChannelsRequest](#ibc.core.channel.v1.QueryConnectionChannelsRequest) | [QueryConnectionChannelsResponse](#ibc.core.channel.v1.QueryConnectionChannelsResponse) | ConnectionChannels queries all the channels associated with a connection end. | GET|/ibc/core/channel/v1/connections/{connection}/channels|
+| `ChannelClientState` | [QueryChannelClientStateRequest](#ibc.core.channel.v1.QueryChannelClientStateRequest) | [QueryChannelClientStateResponse](#ibc.core.channel.v1.QueryChannelClientStateResponse) | ChannelClientState queries for the client state for the channel associated with the provided channel identifiers. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/client_state|
+| `ChannelConsensusState` | [QueryChannelConsensusStateRequest](#ibc.core.channel.v1.QueryChannelConsensusStateRequest) | [QueryChannelConsensusStateResponse](#ibc.core.channel.v1.QueryChannelConsensusStateResponse) | ChannelConsensusState queries for the consensus state for the channel associated with the provided channel identifiers. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/consensus_state/revision/{revision_number}/height/{revision_height}|
+| `PacketCommitment` | [QueryPacketCommitmentRequest](#ibc.core.channel.v1.QueryPacketCommitmentRequest) | [QueryPacketCommitmentResponse](#ibc.core.channel.v1.QueryPacketCommitmentResponse) | PacketCommitment queries a stored packet commitment hash. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{sequence}|
+| `PacketCommitments` | [QueryPacketCommitmentsRequest](#ibc.core.channel.v1.QueryPacketCommitmentsRequest) | [QueryPacketCommitmentsResponse](#ibc.core.channel.v1.QueryPacketCommitmentsResponse) | PacketCommitments returns all the packet commitments hashes associated with a channel. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments|
+| `PacketReceipt` | [QueryPacketReceiptRequest](#ibc.core.channel.v1.QueryPacketReceiptRequest) | [QueryPacketReceiptResponse](#ibc.core.channel.v1.QueryPacketReceiptResponse) | PacketReceipt queries if a given packet sequence has been received on the queried chain | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_receipts/{sequence}|
+| `PacketAcknowledgement` | [QueryPacketAcknowledgementRequest](#ibc.core.channel.v1.QueryPacketAcknowledgementRequest) | [QueryPacketAcknowledgementResponse](#ibc.core.channel.v1.QueryPacketAcknowledgementResponse) | PacketAcknowledgement queries a stored packet acknowledgement hash. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acks/{sequence}|
+| `PacketAcknowledgements` | [QueryPacketAcknowledgementsRequest](#ibc.core.channel.v1.QueryPacketAcknowledgementsRequest) | [QueryPacketAcknowledgementsResponse](#ibc.core.channel.v1.QueryPacketAcknowledgementsResponse) | PacketAcknowledgements returns all the packet acknowledgements associated with a channel. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acknowledgements|
+| `UnreceivedPackets` | [QueryUnreceivedPacketsRequest](#ibc.core.channel.v1.QueryUnreceivedPacketsRequest) | [QueryUnreceivedPacketsResponse](#ibc.core.channel.v1.QueryUnreceivedPacketsResponse) | UnreceivedPackets returns all the unreceived IBC packets associated with a channel and sequences. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_commitment_sequences}/unreceived_packets|
+| `UnreceivedAcks` | [QueryUnreceivedAcksRequest](#ibc.core.channel.v1.QueryUnreceivedAcksRequest) | [QueryUnreceivedAcksResponse](#ibc.core.channel.v1.QueryUnreceivedAcksResponse) | UnreceivedAcks returns all the unreceived IBC acknowledgements associated with a channel and sequences. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_ack_sequences}/unreceived_acks|
+| `NextSequenceReceive` | [QueryNextSequenceReceiveRequest](#ibc.core.channel.v1.QueryNextSequenceReceiveRequest) | [QueryNextSequenceReceiveResponse](#ibc.core.channel.v1.QueryNextSequenceReceiveResponse) | NextSequenceReceive returns the next receive sequence for a given channel. | GET|/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/next_sequence|
+
+
+
+
+
+
+Top
+
+## ibc/core/channel/v1/tx.proto
+
+
+
+
+
+### MsgAcknowledgement
+MsgAcknowledgement receives incoming IBC acknowledgement
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `packet` | [Packet](#ibc.core.channel.v1.Packet) | | |
+| `acknowledgement` | [bytes](#bytes) | | |
+| `proof_acked` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgAcknowledgementResponse
+MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.
+
+
+
+
+
+
+
+
+### MsgChannelCloseConfirm
+MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B
+to acknowledge the change of channel state to CLOSED on Chain A.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `channel_id` | [string](#string) | | |
+| `proof_init` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgChannelCloseConfirmResponse
+MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response
+type.
+
+
+
+
+
+
+
+
+### MsgChannelCloseInit
+MsgChannelCloseInit defines a msg sent by a Relayer to Chain A
+to close a channel with Chain B.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `channel_id` | [string](#string) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgChannelCloseInitResponse
+MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.
+
+
+
+
+
+
+
+
+### MsgChannelOpenAck
+MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge
+the change of channel state to TRYOPEN on Chain B.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `channel_id` | [string](#string) | | |
+| `counterparty_channel_id` | [string](#string) | | |
+| `counterparty_version` | [string](#string) | | |
+| `proof_try` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgChannelOpenAckResponse
+MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.
+
+
+
+
+
+
+
+
+### MsgChannelOpenConfirm
+MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to
+acknowledge the change of channel state to OPEN on Chain A.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `channel_id` | [string](#string) | | |
+| `proof_ack` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgChannelOpenConfirmResponse
+MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response
+type.
+
+
+
+
+
+
+
+
+### MsgChannelOpenInit
+MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It
+is called by a relayer on Chain A.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `channel` | [Channel](#ibc.core.channel.v1.Channel) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgChannelOpenInitResponse
+MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.
+
+
+
+
+
+
+
+
+### MsgChannelOpenTry
+MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel
+on Chain B.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `port_id` | [string](#string) | | |
+| `previous_channel_id` | [string](#string) | | in the case of crossing hello's, when both chains call OpenInit, we need the channel identifier of the previous channel in state INIT |
+| `channel` | [Channel](#ibc.core.channel.v1.Channel) | | |
+| `counterparty_version` | [string](#string) | | |
+| `proof_init` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgChannelOpenTryResponse
+MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.
+
+
+
+
+
+
+
+
+### MsgRecvPacket
+MsgRecvPacket receives incoming IBC packet
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `packet` | [Packet](#ibc.core.channel.v1.Packet) | | |
+| `proof_commitment` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgRecvPacketResponse
+MsgRecvPacketResponse defines the Msg/RecvPacket response type.
+
+
+
+
+
+
+
+
+### MsgTimeout
+MsgTimeout receives timed-out packet
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `packet` | [Packet](#ibc.core.channel.v1.Packet) | | |
+| `proof_unreceived` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `next_sequence_recv` | [uint64](#uint64) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgTimeoutOnClose
+MsgTimeoutOnClose timed-out packet upon counterparty channel closure.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `packet` | [Packet](#ibc.core.channel.v1.Packet) | | |
+| `proof_unreceived` | [bytes](#bytes) | | |
+| `proof_close` | [bytes](#bytes) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `next_sequence_recv` | [uint64](#uint64) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgTimeoutOnCloseResponse
+MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.
+
+
+
+
+
+
+
+
+### MsgTimeoutResponse
+MsgTimeoutResponse defines the Msg/Timeout response type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Msg
+Msg defines the ibc/channel Msg service.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `ChannelOpenInit` | [MsgChannelOpenInit](#ibc.core.channel.v1.MsgChannelOpenInit) | [MsgChannelOpenInitResponse](#ibc.core.channel.v1.MsgChannelOpenInitResponse) | ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. | |
+| `ChannelOpenTry` | [MsgChannelOpenTry](#ibc.core.channel.v1.MsgChannelOpenTry) | [MsgChannelOpenTryResponse](#ibc.core.channel.v1.MsgChannelOpenTryResponse) | ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. | |
+| `ChannelOpenAck` | [MsgChannelOpenAck](#ibc.core.channel.v1.MsgChannelOpenAck) | [MsgChannelOpenAckResponse](#ibc.core.channel.v1.MsgChannelOpenAckResponse) | ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. | |
+| `ChannelOpenConfirm` | [MsgChannelOpenConfirm](#ibc.core.channel.v1.MsgChannelOpenConfirm) | [MsgChannelOpenConfirmResponse](#ibc.core.channel.v1.MsgChannelOpenConfirmResponse) | ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. | |
+| `ChannelCloseInit` | [MsgChannelCloseInit](#ibc.core.channel.v1.MsgChannelCloseInit) | [MsgChannelCloseInitResponse](#ibc.core.channel.v1.MsgChannelCloseInitResponse) | ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. | |
+| `ChannelCloseConfirm` | [MsgChannelCloseConfirm](#ibc.core.channel.v1.MsgChannelCloseConfirm) | [MsgChannelCloseConfirmResponse](#ibc.core.channel.v1.MsgChannelCloseConfirmResponse) | ChannelCloseConfirm defines a rpc handler method for MsgChannelCloseConfirm. | |
+| `RecvPacket` | [MsgRecvPacket](#ibc.core.channel.v1.MsgRecvPacket) | [MsgRecvPacketResponse](#ibc.core.channel.v1.MsgRecvPacketResponse) | RecvPacket defines a rpc handler method for MsgRecvPacket. | |
+| `Timeout` | [MsgTimeout](#ibc.core.channel.v1.MsgTimeout) | [MsgTimeoutResponse](#ibc.core.channel.v1.MsgTimeoutResponse) | Timeout defines a rpc handler method for MsgTimeout. | |
+| `TimeoutOnClose` | [MsgTimeoutOnClose](#ibc.core.channel.v1.MsgTimeoutOnClose) | [MsgTimeoutOnCloseResponse](#ibc.core.channel.v1.MsgTimeoutOnCloseResponse) | TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. | |
+| `Acknowledgement` | [MsgAcknowledgement](#ibc.core.channel.v1.MsgAcknowledgement) | [MsgAcknowledgementResponse](#ibc.core.channel.v1.MsgAcknowledgementResponse) | Acknowledgement defines a rpc handler method for MsgAcknowledgement. | |
+
+
+
+
+
+
+Top
+
+## ibc/core/client/v1/genesis.proto
+
+
+
+
+
+### GenesisMetadata
+GenesisMetadata defines the genesis type for metadata that clients may return
+with ExportMetadata
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `key` | [bytes](#bytes) | | store key of metadata without clientID-prefix |
+| `value` | [bytes](#bytes) | | metadata value |
+
+
+
+
+
+
+
+
+### GenesisState
+GenesisState defines the ibc client submodule's genesis state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `clients` | [IdentifiedClientState](#ibc.core.client.v1.IdentifiedClientState) | repeated | client states with their corresponding identifiers |
+| `clients_consensus` | [ClientConsensusStates](#ibc.core.client.v1.ClientConsensusStates) | repeated | consensus states from each client |
+| `clients_metadata` | [IdentifiedGenesisMetadata](#ibc.core.client.v1.IdentifiedGenesisMetadata) | repeated | metadata from each client |
+| `params` | [Params](#ibc.core.client.v1.Params) | | |
+| `create_localhost` | [bool](#bool) | | create localhost on initialization |
+| `next_client_sequence` | [uint64](#uint64) | | the sequence for the next generated client identifier |
+
+
+
+
+
+
+
+
+### IdentifiedGenesisMetadata
+IdentifiedGenesisMetadata has the client metadata with the corresponding
+client id.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | |
+| `client_metadata` | [GenesisMetadata](#ibc.core.client.v1.GenesisMetadata) | repeated | |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/core/client/v1/query.proto
+
+
+
+
+
+### QueryClientParamsRequest
+QueryClientParamsRequest is the request type for the Query/ClientParams RPC
+method.
+
+
+
+
+
+
+
+
+### QueryClientParamsResponse
+QueryClientParamsResponse is the response type for the Query/ClientParams RPC
+method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `params` | [Params](#ibc.core.client.v1.Params) | | params defines the parameters of the module. |
+
+
+
+
+
+
+
+
+### QueryClientStateRequest
+QueryClientStateRequest is the request type for the Query/ClientState RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client state unique identifier |
+
+
+
+
+
+
+
+
+### QueryClientStateResponse
+QueryClientStateResponse is the response type for the Query/ClientState RPC
+method. Besides the client state, it includes a proof and the height from
+which the proof was retrieved.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | client state associated with the request identifier |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryClientStatesRequest
+QueryClientStatesRequest is the request type for the Query/ClientStates RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination request |
+
+
+
+
+
+
+
+
+### QueryClientStatesResponse
+QueryClientStatesResponse is the response type for the Query/ClientStates RPC
+method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_states` | [IdentifiedClientState](#ibc.core.client.v1.IdentifiedClientState) | repeated | list of stored ClientStates of the chain. |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+
+
+
+
+
+
+
+
+### QueryClientStatusRequest
+QueryClientStatusRequest is the request type for the Query/ClientStatus RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client unique identifier |
+
+
+
+
+
+
+
+
+### QueryClientStatusResponse
+QueryClientStatusResponse is the response type for the Query/ClientStatus RPC
+method. It returns the current status of the IBC client.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `status` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### QueryConsensusStateRequest
+QueryConsensusStateRequest is the request type for the Query/ConsensusState
+RPC method. Besides the consensus state, it includes a proof and the height
+from which the proof was retrieved.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client identifier |
+| `revision_number` | [uint64](#uint64) | | consensus state revision number |
+| `revision_height` | [uint64](#uint64) | | consensus state revision height |
+| `latest_height` | [bool](#bool) | | latest_height overrrides the height field and queries the latest stored ConsensusState |
+
+
+
+
+
+
+
+
+### QueryConsensusStateResponse
+QueryConsensusStateResponse is the response type for the Query/ConsensusState
+RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | consensus state associated with the client identifier at the given height |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryConsensusStatesRequest
+QueryConsensusStatesRequest is the request type for the Query/ConsensusStates
+RPC method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client identifier |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | pagination request |
+
+
+
+
+
+
+
+
+### QueryConsensusStatesResponse
+QueryConsensusStatesResponse is the response type for the
+Query/ConsensusStates RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `consensus_states` | [ConsensusStateWithHeight](#ibc.core.client.v1.ConsensusStateWithHeight) | repeated | consensus states associated with the identifier |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+
+
+
+
+
+
+
+
+### QueryUpgradedClientStateRequest
+QueryUpgradedClientStateRequest is the request type for the
+Query/UpgradedClientState RPC method
+
+
+
+
+
+
+
+
+### QueryUpgradedClientStateResponse
+QueryUpgradedClientStateResponse is the response type for the
+Query/UpgradedClientState RPC method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `upgraded_client_state` | [google.protobuf.Any](#google.protobuf.Any) | | client state associated with the request identifier |
+
+
+
+
+
+
+
+
+### QueryUpgradedConsensusStateRequest
+QueryUpgradedConsensusStateRequest is the request type for the
+Query/UpgradedConsensusState RPC method
+
+
+
+
+
+
+
+
+### QueryUpgradedConsensusStateResponse
+QueryUpgradedConsensusStateResponse is the response type for the
+Query/UpgradedConsensusState RPC method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `upgraded_consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | Consensus state associated with the request identifier |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Query
+Query provides defines the gRPC querier service
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `ClientState` | [QueryClientStateRequest](#ibc.core.client.v1.QueryClientStateRequest) | [QueryClientStateResponse](#ibc.core.client.v1.QueryClientStateResponse) | ClientState queries an IBC light client. | GET|/ibc/core/client/v1/client_states/{client_id}|
+| `ClientStates` | [QueryClientStatesRequest](#ibc.core.client.v1.QueryClientStatesRequest) | [QueryClientStatesResponse](#ibc.core.client.v1.QueryClientStatesResponse) | ClientStates queries all the IBC light clients of a chain. | GET|/ibc/core/client/v1/client_states|
+| `ConsensusState` | [QueryConsensusStateRequest](#ibc.core.client.v1.QueryConsensusStateRequest) | [QueryConsensusStateResponse](#ibc.core.client.v1.QueryConsensusStateResponse) | ConsensusState queries a consensus state associated with a client state at a given height. | GET|/ibc/core/client/v1/consensus_states/{client_id}/revision/{revision_number}/height/{revision_height}|
+| `ConsensusStates` | [QueryConsensusStatesRequest](#ibc.core.client.v1.QueryConsensusStatesRequest) | [QueryConsensusStatesResponse](#ibc.core.client.v1.QueryConsensusStatesResponse) | ConsensusStates queries all the consensus state associated with a given client. | GET|/ibc/core/client/v1/consensus_states/{client_id}|
+| `ClientStatus` | [QueryClientStatusRequest](#ibc.core.client.v1.QueryClientStatusRequest) | [QueryClientStatusResponse](#ibc.core.client.v1.QueryClientStatusResponse) | Status queries the status of an IBC client. | GET|/ibc/core/client/v1/client_status/{client_id}|
+| `ClientParams` | [QueryClientParamsRequest](#ibc.core.client.v1.QueryClientParamsRequest) | [QueryClientParamsResponse](#ibc.core.client.v1.QueryClientParamsResponse) | ClientParams queries all parameters of the ibc client. | GET|/ibc/client/v1/params|
+| `UpgradedClientState` | [QueryUpgradedClientStateRequest](#ibc.core.client.v1.QueryUpgradedClientStateRequest) | [QueryUpgradedClientStateResponse](#ibc.core.client.v1.QueryUpgradedClientStateResponse) | UpgradedClientState queries an Upgraded IBC light client. | GET|/ibc/core/client/v1/upgraded_client_states|
+| `UpgradedConsensusState` | [QueryUpgradedConsensusStateRequest](#ibc.core.client.v1.QueryUpgradedConsensusStateRequest) | [QueryUpgradedConsensusStateResponse](#ibc.core.client.v1.QueryUpgradedConsensusStateResponse) | UpgradedConsensusState queries an Upgraded IBC consensus state. | GET|/ibc/core/client/v1/upgraded_consensus_states|
+
+
+
+
+
+
+Top
+
+## ibc/core/client/v1/tx.proto
+
+
+
+
+
+### MsgCreateClient
+MsgCreateClient defines a message to create an IBC client
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | light client state |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | consensus state associated with the client that corresponds to a given height. |
+| `signer` | [string](#string) | | signer address |
+
+
+
+
+
+
+
+
+### MsgCreateClientResponse
+MsgCreateClientResponse defines the Msg/CreateClient response type.
+
+
+
+
+
+
+
+
+### MsgSubmitMisbehaviour
+MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for
+light client misbehaviour.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client unique identifier |
+| `misbehaviour` | [google.protobuf.Any](#google.protobuf.Any) | | misbehaviour used for freezing the light client |
+| `signer` | [string](#string) | | signer address |
+
+
+
+
+
+
+
+
+### MsgSubmitMisbehaviourResponse
+MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response
+type.
+
+
+
+
+
+
+
+
+### MsgUpdateClient
+MsgUpdateClient defines an sdk.Msg to update a IBC client state using
+the given header.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client unique identifier |
+| `header` | [google.protobuf.Any](#google.protobuf.Any) | | header to update the light client |
+| `signer` | [string](#string) | | signer address |
+
+
+
+
+
+
+
+
+### MsgUpdateClientResponse
+MsgUpdateClientResponse defines the Msg/UpdateClient response type.
+
+
+
+
+
+
+
+
+### MsgUpgradeClient
+MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client
+state
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client unique identifier |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | upgraded client state |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | upgraded consensus state, only contains enough information to serve as a basis of trust in update logic |
+| `proof_upgrade_client` | [bytes](#bytes) | | proof that old chain committed to new client |
+| `proof_upgrade_consensus_state` | [bytes](#bytes) | | proof that old chain committed to new consensus state |
+| `signer` | [string](#string) | | signer address |
+
+
+
+
+
+
+
+
+### MsgUpgradeClientResponse
+MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Msg
+Msg defines the ibc/client Msg service.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `CreateClient` | [MsgCreateClient](#ibc.core.client.v1.MsgCreateClient) | [MsgCreateClientResponse](#ibc.core.client.v1.MsgCreateClientResponse) | CreateClient defines a rpc handler method for MsgCreateClient. | |
+| `UpdateClient` | [MsgUpdateClient](#ibc.core.client.v1.MsgUpdateClient) | [MsgUpdateClientResponse](#ibc.core.client.v1.MsgUpdateClientResponse) | UpdateClient defines a rpc handler method for MsgUpdateClient. | |
+| `UpgradeClient` | [MsgUpgradeClient](#ibc.core.client.v1.MsgUpgradeClient) | [MsgUpgradeClientResponse](#ibc.core.client.v1.MsgUpgradeClientResponse) | UpgradeClient defines a rpc handler method for MsgUpgradeClient. | |
+| `SubmitMisbehaviour` | [MsgSubmitMisbehaviour](#ibc.core.client.v1.MsgSubmitMisbehaviour) | [MsgSubmitMisbehaviourResponse](#ibc.core.client.v1.MsgSubmitMisbehaviourResponse) | SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. | |
+
+
+
+
+
+
+Top
+
+## ibc/core/commitment/v1/commitment.proto
+
+
+
+
+
+### MerklePath
+MerklePath is the path used to verify commitment proofs, which can be an
+arbitrary structured object (defined by a commitment type).
+MerklePath is represented from root-to-leaf
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `key_path` | [string](#string) | repeated | |
+
+
+
+
+
+
+
+
+### MerklePrefix
+MerklePrefix is merkle path prefixed to the key.
+The constructed key from the Path and the key will be append(Path.KeyPath,
+append(Path.KeyPrefix, key...))
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `key_prefix` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### MerkleProof
+MerkleProof is a wrapper type over a chain of CommitmentProofs.
+It demonstrates membership or non-membership for an element or set of
+elements, verifiable in conjunction with a known commitment root. Proofs
+should be succinct.
+MerkleProofs are ordered from leaf-to-root
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `proofs` | [ics23.CommitmentProof](#ics23.CommitmentProof) | repeated | |
+
+
+
+
+
+
+
+
+### MerkleRoot
+MerkleRoot defines a merkle root hash.
+In the Cosmos SDK, the AppHash of a block header becomes the root.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `hash` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/core/connection/v1/connection.proto
+
+
+
+
+
+### ClientPaths
+ClientPaths define all the connection paths for a client state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `paths` | [string](#string) | repeated | list of connection paths |
+
+
+
+
+
+
+
+
+### ConnectionEnd
+ConnectionEnd defines a stateful object on a chain connected to another
+separate one.
+NOTE: there must only be 2 defined ConnectionEnds to establish
+a connection between two chains.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client associated with this connection. |
+| `versions` | [Version](#ibc.core.connection.v1.Version) | repeated | IBC version which can be utilised to determine encodings or protocols for channels or packets utilising this connection. |
+| `state` | [State](#ibc.core.connection.v1.State) | | current state of the connection end. |
+| `counterparty` | [Counterparty](#ibc.core.connection.v1.Counterparty) | | counterparty chain associated with this connection. |
+| `delay_period` | [uint64](#uint64) | | delay period that must pass before a consensus state can be used for packet-verification NOTE: delay period logic is only implemented by some clients. |
+
+
+
+
+
+
+
+
+### ConnectionPaths
+ConnectionPaths define all the connection paths for a given client state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client state unique identifier |
+| `paths` | [string](#string) | repeated | list of connection paths |
+
+
+
+
+
+
+
+
+### Counterparty
+Counterparty defines the counterparty chain associated with a connection end.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | identifies the client on the counterparty chain associated with a given connection. |
+| `connection_id` | [string](#string) | | identifies the connection end on the counterparty chain associated with a given connection. |
+| `prefix` | [ibc.core.commitment.v1.MerklePrefix](#ibc.core.commitment.v1.MerklePrefix) | | commitment merkle prefix of the counterparty chain. |
+
+
+
+
+
+
+
+
+### IdentifiedConnection
+IdentifiedConnection defines a connection with additional connection
+identifier field.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `id` | [string](#string) | | connection identifier. |
+| `client_id` | [string](#string) | | client associated with this connection. |
+| `versions` | [Version](#ibc.core.connection.v1.Version) | repeated | IBC version which can be utilised to determine encodings or protocols for channels or packets utilising this connection |
+| `state` | [State](#ibc.core.connection.v1.State) | | current state of the connection end. |
+| `counterparty` | [Counterparty](#ibc.core.connection.v1.Counterparty) | | counterparty chain associated with this connection. |
+| `delay_period` | [uint64](#uint64) | | delay period associated with this connection. |
+
+
+
+
+
+
+
+
+### Version
+Version defines the versioning scheme used to negotiate the IBC verison in
+the connection handshake.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `identifier` | [string](#string) | | unique version identifier |
+| `features` | [string](#string) | repeated | list of features compatible with the specified identifier |
+
+
+
+
+
+
+
+
+
+
+### State
+State defines if a connection is in one of the following states:
+INIT, TRYOPEN, OPEN or UNINITIALIZED.
+
+| Name | Number | Description |
+| ---- | ------ | ----------- |
+| STATE_UNINITIALIZED_UNSPECIFIED | 0 | Default State |
+| STATE_INIT | 1 | A connection end has just started the opening handshake. |
+| STATE_TRYOPEN | 2 | A connection end has acknowledged the handshake step on the counterparty chain. |
+| STATE_OPEN | 3 | A connection end has completed the handshake. |
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/core/connection/v1/genesis.proto
+
+
+
+
+
+### GenesisState
+GenesisState defines the ibc connection submodule's genesis state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connections` | [IdentifiedConnection](#ibc.core.connection.v1.IdentifiedConnection) | repeated | |
+| `client_connection_paths` | [ConnectionPaths](#ibc.core.connection.v1.ConnectionPaths) | repeated | |
+| `next_connection_sequence` | [uint64](#uint64) | | the sequence for the next generated connection identifier |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/core/connection/v1/query.proto
+
+
+
+
+
+### QueryClientConnectionsRequest
+QueryClientConnectionsRequest is the request type for the
+Query/ClientConnections RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | client identifier associated with a connection |
+
+
+
+
+
+
+
+
+### QueryClientConnectionsResponse
+QueryClientConnectionsResponse is the response type for the
+Query/ClientConnections RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection_paths` | [string](#string) | repeated | slice of all the connection paths associated with a client. |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was generated |
+
+
+
+
+
+
+
+
+### QueryConnectionClientStateRequest
+QueryConnectionClientStateRequest is the request type for the
+Query/ConnectionClientState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection_id` | [string](#string) | | connection identifier |
+
+
+
+
+
+
+
+
+### QueryConnectionClientStateResponse
+QueryConnectionClientStateResponse is the response type for the
+Query/ConnectionClientState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `identified_client_state` | [ibc.core.client.v1.IdentifiedClientState](#ibc.core.client.v1.IdentifiedClientState) | | client state associated with the channel |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryConnectionConsensusStateRequest
+QueryConnectionConsensusStateRequest is the request type for the
+Query/ConnectionConsensusState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection_id` | [string](#string) | | connection identifier |
+| `revision_number` | [uint64](#uint64) | | |
+| `revision_height` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### QueryConnectionConsensusStateResponse
+QueryConnectionConsensusStateResponse is the response type for the
+Query/ConnectionConsensusState RPC method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | consensus state associated with the channel |
+| `client_id` | [string](#string) | | client ID associated with the consensus state |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryConnectionRequest
+QueryConnectionRequest is the request type for the Query/Connection RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection_id` | [string](#string) | | connection unique identifier |
+
+
+
+
+
+
+
+
+### QueryConnectionResponse
+QueryConnectionResponse is the response type for the Query/Connection RPC
+method. Besides the connection end, it includes a proof and the height from
+which the proof was retrieved.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection` | [ConnectionEnd](#ibc.core.connection.v1.ConnectionEnd) | | connection associated with the request identifier |
+| `proof` | [bytes](#bytes) | | merkle proof of existence |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | height at which the proof was retrieved |
+
+
+
+
+
+
+
+
+### QueryConnectionsRequest
+QueryConnectionsRequest is the request type for the Query/Connections RPC
+method
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | |
+
+
+
+
+
+
+
+
+### QueryConnectionsResponse
+QueryConnectionsResponse is the response type for the Query/Connections RPC
+method.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connections` | [IdentifiedConnection](#ibc.core.connection.v1.IdentifiedConnection) | repeated | list of stored connections of the chain. |
+| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination response |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | query block height |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Query
+Query provides defines the gRPC querier service
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `Connection` | [QueryConnectionRequest](#ibc.core.connection.v1.QueryConnectionRequest) | [QueryConnectionResponse](#ibc.core.connection.v1.QueryConnectionResponse) | Connection queries an IBC connection end. | GET|/ibc/core/connection/v1/connections/{connection_id}|
+| `Connections` | [QueryConnectionsRequest](#ibc.core.connection.v1.QueryConnectionsRequest) | [QueryConnectionsResponse](#ibc.core.connection.v1.QueryConnectionsResponse) | Connections queries all the IBC connections of a chain. | GET|/ibc/core/connection/v1/connections|
+| `ClientConnections` | [QueryClientConnectionsRequest](#ibc.core.connection.v1.QueryClientConnectionsRequest) | [QueryClientConnectionsResponse](#ibc.core.connection.v1.QueryClientConnectionsResponse) | ClientConnections queries the connection paths associated with a client state. | GET|/ibc/core/connection/v1/client_connections/{client_id}|
+| `ConnectionClientState` | [QueryConnectionClientStateRequest](#ibc.core.connection.v1.QueryConnectionClientStateRequest) | [QueryConnectionClientStateResponse](#ibc.core.connection.v1.QueryConnectionClientStateResponse) | ConnectionClientState queries the client state associated with the connection. | GET|/ibc/core/connection/v1/connections/{connection_id}/client_state|
+| `ConnectionConsensusState` | [QueryConnectionConsensusStateRequest](#ibc.core.connection.v1.QueryConnectionConsensusStateRequest) | [QueryConnectionConsensusStateResponse](#ibc.core.connection.v1.QueryConnectionConsensusStateResponse) | ConnectionConsensusState queries the consensus state associated with the connection. | GET|/ibc/core/connection/v1/connections/{connection_id}/consensus_state/revision/{revision_number}/height/{revision_height}|
+
+
+
+
+
+
+Top
+
+## ibc/core/connection/v1/tx.proto
+
+
+
+
+
+### MsgConnectionOpenAck
+MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to
+acknowledge the change of connection state to TRYOPEN on Chain B.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection_id` | [string](#string) | | |
+| `counterparty_connection_id` | [string](#string) | | |
+| `version` | [Version](#ibc.core.connection.v1.Version) | | |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `proof_try` | [bytes](#bytes) | | proof of the initialization the connection on Chain B: `UNITIALIZED -> TRYOPEN` |
+| `proof_client` | [bytes](#bytes) | | proof of client state included in message |
+| `proof_consensus` | [bytes](#bytes) | | proof of client consensus state |
+| `consensus_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgConnectionOpenAckResponse
+MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.
+
+
+
+
+
+
+
+
+### MsgConnectionOpenConfirm
+MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to
+acknowledge the change of connection state to OPEN on Chain A.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `connection_id` | [string](#string) | | |
+| `proof_ack` | [bytes](#bytes) | | proof for the change of the connection state on Chain A: `INIT -> OPEN` |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgConnectionOpenConfirmResponse
+MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm
+response type.
+
+
+
+
+
+
+
+
+### MsgConnectionOpenInit
+MsgConnectionOpenInit defines the msg sent by an account on Chain A to
+initialize a connection with Chain B.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | |
+| `counterparty` | [Counterparty](#ibc.core.connection.v1.Counterparty) | | |
+| `version` | [Version](#ibc.core.connection.v1.Version) | | |
+| `delay_period` | [uint64](#uint64) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgConnectionOpenInitResponse
+MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response
+type.
+
+
+
+
+
+
+
+
+### MsgConnectionOpenTry
+MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a
+connection on Chain B.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | |
+| `previous_connection_id` | [string](#string) | | in the case of crossing hello's, when both chains call OpenInit, we need the connection identifier of the previous connection in state INIT |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | |
+| `counterparty` | [Counterparty](#ibc.core.connection.v1.Counterparty) | | |
+| `delay_period` | [uint64](#uint64) | | |
+| `counterparty_versions` | [Version](#ibc.core.connection.v1.Version) | repeated | |
+| `proof_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `proof_init` | [bytes](#bytes) | | proof of the initialization the connection on Chain A: `UNITIALIZED -> INIT` |
+| `proof_client` | [bytes](#bytes) | | proof of client state included in message |
+| `proof_consensus` | [bytes](#bytes) | | proof of client consensus state |
+| `consensus_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `signer` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### MsgConnectionOpenTryResponse
+MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### Msg
+Msg defines the ibc/connection Msg service.
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+| `ConnectionOpenInit` | [MsgConnectionOpenInit](#ibc.core.connection.v1.MsgConnectionOpenInit) | [MsgConnectionOpenInitResponse](#ibc.core.connection.v1.MsgConnectionOpenInitResponse) | ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. | |
+| `ConnectionOpenTry` | [MsgConnectionOpenTry](#ibc.core.connection.v1.MsgConnectionOpenTry) | [MsgConnectionOpenTryResponse](#ibc.core.connection.v1.MsgConnectionOpenTryResponse) | ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. | |
+| `ConnectionOpenAck` | [MsgConnectionOpenAck](#ibc.core.connection.v1.MsgConnectionOpenAck) | [MsgConnectionOpenAckResponse](#ibc.core.connection.v1.MsgConnectionOpenAckResponse) | ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. | |
+| `ConnectionOpenConfirm` | [MsgConnectionOpenConfirm](#ibc.core.connection.v1.MsgConnectionOpenConfirm) | [MsgConnectionOpenConfirmResponse](#ibc.core.connection.v1.MsgConnectionOpenConfirmResponse) | ConnectionOpenConfirm defines a rpc handler method for MsgConnectionOpenConfirm. | |
+
+
+
+
+
+
+Top
+
+## ibc/core/types/v1/genesis.proto
+
+
+
+
+
+### GenesisState
+GenesisState defines the ibc module's genesis state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_genesis` | [ibc.core.client.v1.GenesisState](#ibc.core.client.v1.GenesisState) | | ICS002 - Clients genesis state |
+| `connection_genesis` | [ibc.core.connection.v1.GenesisState](#ibc.core.connection.v1.GenesisState) | | ICS003 - Connections genesis state |
+| `channel_genesis` | [ibc.core.channel.v1.GenesisState](#ibc.core.channel.v1.GenesisState) | | ICS004 - Channel genesis state |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/lightclients/localhost/v1/localhost.proto
+
+
+
+
+
+### ClientState
+ClientState defines a loopback (localhost) client. It requires (read-only)
+access to keys outside the client prefix.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `chain_id` | [string](#string) | | self chain ID |
+| `height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | self latest block height |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/lightclients/solomachine/v1/solomachine.proto
+
+
+
+
+
+### ChannelStateData
+ChannelStateData returns the SignBytes data for channel state
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `channel` | [ibc.core.channel.v1.Channel](#ibc.core.channel.v1.Channel) | | |
+
+
+
+
+
+
+
+
+### ClientState
+ClientState defines a solo machine client that tracks the current consensus
+state and if the client is frozen.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | latest sequence of the client state |
+| `frozen_sequence` | [uint64](#uint64) | | frozen sequence of the solo machine |
+| `consensus_state` | [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) | | |
+| `allow_update_after_proposal` | [bool](#bool) | | when set to true, will allow governance to update a solo machine client. The client will be unfrozen if it is frozen. |
+
+
+
+
+
+
+
+
+### ClientStateData
+ClientStateData returns the SignBytes data for client state verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | |
+
+
+
+
+
+
+
+
+### ConnectionStateData
+ConnectionStateData returns the SignBytes data for connection state
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `connection` | [ibc.core.connection.v1.ConnectionEnd](#ibc.core.connection.v1.ConnectionEnd) | | |
+
+
+
+
+
+
+
+
+### ConsensusState
+ConsensusState defines a solo machine consensus state. The sequence of a
+consensus state is contained in the "height" key used in storing the
+consensus state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `public_key` | [google.protobuf.Any](#google.protobuf.Any) | | public key of the solo machine |
+| `diversifier` | [string](#string) | | diversifier allows the same public key to be re-used across different solo machine clients (potentially on different chains) without being considered misbehaviour. |
+| `timestamp` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### ConsensusStateData
+ConsensusStateData returns the SignBytes data for consensus state
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | |
+
+
+
+
+
+
+
+
+### Header
+Header defines a solo machine consensus header
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | sequence to update solo machine public key at |
+| `timestamp` | [uint64](#uint64) | | |
+| `signature` | [bytes](#bytes) | | |
+| `new_public_key` | [google.protobuf.Any](#google.protobuf.Any) | | |
+| `new_diversifier` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### HeaderData
+HeaderData returns the SignBytes data for update verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `new_pub_key` | [google.protobuf.Any](#google.protobuf.Any) | | header public key |
+| `new_diversifier` | [string](#string) | | header diversifier |
+
+
+
+
+
+
+
+
+### Misbehaviour
+Misbehaviour defines misbehaviour for a solo machine which consists
+of a sequence and two signatures over different messages at that sequence.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | |
+| `sequence` | [uint64](#uint64) | | |
+| `signature_one` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | |
+| `signature_two` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | |
+
+
+
+
+
+
+
+
+### NextSequenceRecvData
+NextSequenceRecvData returns the SignBytes data for verification of the next
+sequence to be received.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `next_seq_recv` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### PacketAcknowledgementData
+PacketAcknowledgementData returns the SignBytes data for acknowledgement
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `acknowledgement` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### PacketCommitmentData
+PacketCommitmentData returns the SignBytes data for packet commitment
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `commitment` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### PacketReceiptAbsenceData
+PacketReceiptAbsenceData returns the SignBytes data for
+packet receipt absence verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### SignBytes
+SignBytes defines the signed bytes used for signature verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | |
+| `timestamp` | [uint64](#uint64) | | |
+| `diversifier` | [string](#string) | | |
+| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | type of the data used |
+| `data` | [bytes](#bytes) | | marshaled data |
+
+
+
+
+
+
+
+
+### SignatureAndData
+SignatureAndData contains a signature and the data signed over to create that
+signature.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `signature` | [bytes](#bytes) | | |
+| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | |
+| `data` | [bytes](#bytes) | | |
+| `timestamp` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### TimestampedSignatureData
+TimestampedSignatureData contains the signature data and the timestamp of the
+signature.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `signature_data` | [bytes](#bytes) | | |
+| `timestamp` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+
+
+### DataType
+DataType defines the type of solo machine proof being created. This is done
+to preserve uniqueness of different data sign byte encodings.
+
+| Name | Number | Description |
+| ---- | ------ | ----------- |
+| DATA_TYPE_UNINITIALIZED_UNSPECIFIED | 0 | Default State |
+| DATA_TYPE_CLIENT_STATE | 1 | Data type for client state verification |
+| DATA_TYPE_CONSENSUS_STATE | 2 | Data type for consensus state verification |
+| DATA_TYPE_CONNECTION_STATE | 3 | Data type for connection state verification |
+| DATA_TYPE_CHANNEL_STATE | 4 | Data type for channel state verification |
+| DATA_TYPE_PACKET_COMMITMENT | 5 | Data type for packet commitment verification |
+| DATA_TYPE_PACKET_ACKNOWLEDGEMENT | 6 | Data type for packet acknowledgement verification |
+| DATA_TYPE_PACKET_RECEIPT_ABSENCE | 7 | Data type for packet receipt absence verification |
+| DATA_TYPE_NEXT_SEQUENCE_RECV | 8 | Data type for next sequence recv verification |
+| DATA_TYPE_HEADER | 9 | Data type for header verification |
+
+
+
+
+
+
+
+
+
+
+
+Top
+
+## ibc/lightclients/tendermint/v1/tendermint.proto
+
+
+
+
+
+### ClientState
+ClientState from Tendermint tracks the current validator set, latest height,
+and a possible frozen height.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `chain_id` | [string](#string) | | |
+| `trust_level` | [Fraction](#ibc.lightclients.tendermint.v1.Fraction) | | |
+| `trusting_period` | [google.protobuf.Duration](#google.protobuf.Duration) | | duration of the period since the LastestTimestamp during which the submitted headers are valid for upgrade |
+| `unbonding_period` | [google.protobuf.Duration](#google.protobuf.Duration) | | duration of the staking unbonding period |
+| `max_clock_drift` | [google.protobuf.Duration](#google.protobuf.Duration) | | defines how much new (untrusted) header's Time can drift into the future. |
+| `frozen_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | Block height when the client was frozen due to a misbehaviour |
+| `latest_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | Latest height the client was updated to |
+| `proof_specs` | [ics23.ProofSpec](#ics23.ProofSpec) | repeated | Proof specifications used in verifying counterparty state |
+| `upgrade_path` | [string](#string) | repeated | Path at which next upgraded client will be committed. Each element corresponds to the key for a single CommitmentProof in the chained proof. NOTE: ClientState must stored under `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using the default upgrade module, upgrade_path should be []string{"upgrade", "upgradedIBCState"}` |
+| `allow_update_after_expiry` | [bool](#bool) | | This flag, when set to true, will allow governance to recover a client which has expired |
+| `allow_update_after_misbehaviour` | [bool](#bool) | | This flag, when set to true, will allow governance to unfreeze a client whose chain has experienced a misbehaviour event |
+
+
+
+
+
+
+
+
+### ConsensusState
+ConsensusState defines the consensus state from Tendermint.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `timestamp` | [google.protobuf.Timestamp](#google.protobuf.Timestamp) | | timestamp that corresponds to the block height in which the ConsensusState was stored. |
+| `root` | [ibc.core.commitment.v1.MerkleRoot](#ibc.core.commitment.v1.MerkleRoot) | | commitment root (i.e app hash) |
+| `next_validators_hash` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### Fraction
+Fraction defines the protobuf message type for tmmath.Fraction that only
+supports positive values.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `numerator` | [uint64](#uint64) | | |
+| `denominator` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### Header
+Header defines the Tendermint client consensus Header.
+It encapsulates all the information necessary to update from a trusted
+Tendermint ConsensusState. The inclusion of TrustedHeight and
+TrustedValidators allows this update to process correctly, so long as the
+ConsensusState for the TrustedHeight exists, this removes race conditions
+among relayers The SignedHeader and ValidatorSet are the new untrusted update
+fields for the client. The TrustedHeight is the height of a stored
+ConsensusState on the client that will be used to verify the new untrusted
+header. The Trusted ConsensusState must be within the unbonding period of
+current time in order to correctly verify, and the TrustedValidators must
+hash to TrustedConsensusState.NextValidatorsHash since that is the last
+trusted validator set at the TrustedHeight.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `signed_header` | [tendermint.types.SignedHeader](#tendermint.types.SignedHeader) | | |
+| `validator_set` | [tendermint.types.ValidatorSet](#tendermint.types.ValidatorSet) | | |
+| `trusted_height` | [ibc.core.client.v1.Height](#ibc.core.client.v1.Height) | | |
+| `trusted_validators` | [tendermint.types.ValidatorSet](#tendermint.types.ValidatorSet) | | |
+
+
+
+
+
+
+
+
+### Misbehaviour
+Misbehaviour is a wrapper over two conflicting Headers
+that implements Misbehaviour interface expected by ICS-02
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | |
+| `header_1` | [Header](#ibc.lightclients.tendermint.v1.Header) | | |
+| `header_2` | [Header](#ibc.lightclients.tendermint.v1.Header) | | |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Scalar Value Types
+
+| .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby |
+| ----------- | ----- | --- | ---- | ------ | -- | -- | --- | ---- |
+| double | | double | double | float | float64 | double | float | Float |
+| float | | float | float | float | float32 | float | float | Float |
+| int32 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) |
+| int64 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. | int64 | long | int/long | int64 | long | integer/string | Bignum |
+| uint32 | Uses variable-length encoding. | uint32 | int | int/long | uint32 | uint | integer | Bignum or Fixnum (as required) |
+| uint64 | Uses variable-length encoding. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum or Fixnum (as required) |
+| sint32 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) |
+| sint64 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. | int64 | long | int/long | int64 | long | integer/string | Bignum |
+| fixed32 | Always four bytes. More efficient than uint32 if values are often greater than 2^28. | uint32 | int | int | uint32 | uint | integer | Bignum or Fixnum (as required) |
+| fixed64 | Always eight bytes. More efficient than uint64 if values are often greater than 2^56. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum |
+| sfixed32 | Always four bytes. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) |
+| sfixed64 | Always eight bytes. | int64 | long | int/long | int64 | long | integer/string | Bignum |
+| bool | | bool | boolean | boolean | bool | bool | boolean | TrueClass/FalseClass |
+| string | A string must always contain UTF-8 encoded or 7-bit ASCII text. | string | String | str/unicode | string | string | string | String (UTF-8) |
+| bytes | May contain any arbitrary sequence of bytes. | string | ByteString | str | []byte | ByteString | string | String (ASCII-8BIT) |
diff --git a/docs/relayer.md b/docs/ibc/relayer.md
similarity index 100%
rename from docs/relayer.md
rename to docs/ibc/relayer.md
diff --git a/docs/upgrades/README.md b/docs/ibc/upgrades/README.md
similarity index 100%
rename from docs/upgrades/README.md
rename to docs/ibc/upgrades/README.md
diff --git a/docs/upgrades/developer-guide.md b/docs/ibc/upgrades/developer-guide.md
similarity index 100%
rename from docs/upgrades/developer-guide.md
rename to docs/ibc/upgrades/developer-guide.md
diff --git a/docs/upgrades/quick-guide.md b/docs/ibc/upgrades/quick-guide.md
similarity index 100%
rename from docs/upgrades/quick-guide.md
rename to docs/ibc/upgrades/quick-guide.md
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index f343afc2..fbbe4323 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -10,9 +10,13 @@ The most obvious changes is import name changes. We need to change:
On my GNU/Linux based machine I used the following commands, executed in order:
-`grep -RiIl 'cosmos-sdk\/x\/ibc\/applications' | xargs sed -i 's/cosmos-sdk\/x\/ibc\/applications/ibc-go\/modules\/apps/g'`
+```
+grep -RiIl 'cosmos-sdk\/x\/ibc\/applications' | xargs sed -i 's/cosmos-sdk\/x\/ibc\/applications/ibc-go\/modules\/apps/g'
+```
-`grep -RiIl 'cosmos-sdk\/x\/ibc' | xargs sed -i 's/cosmos-sdk\/x\/ibc/ibc-go\/modules/g'`
+```
+grep -RiIl 'cosmos-sdk\/x\/ibc' | xargs sed -i 's/cosmos-sdk\/x\/ibc/ibc-go\/modules/g'
+```
ref: [explanation of the above commands](https://www.internalpointers.com/post/linux-find-and-replace-text-multiple-files)
diff --git a/docs/package-lock.json b/docs/package-lock.json
new file mode 100644
index 00000000..8e79e60d
--- /dev/null
+++ b/docs/package-lock.json
@@ -0,0 +1,24219 @@
+{
+ "name": "docs",
+ "version": "1.0.0",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "version": "1.0.0",
+ "license": "ISC",
+ "dependencies": {
+ "vuepress-theme-cosmos": "^1.0.182"
+ }
+ },
+ "node_modules/@algolia/cache-browser-local-storage": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.9.1.tgz",
+ "integrity": "sha512-bAUU9vKCy45uTTlzJw0LYu1IjoZsmzL6lgjaVFaW1crhX/4P+JD5ReQv3n/wpiXSFaHq1WEO3WyH2g3ymzeipQ==",
+ "dependencies": {
+ "@algolia/cache-common": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/cache-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.9.1.tgz",
+ "integrity": "sha512-tcvw4mOfFy44V4ZxDEy9wNGr6vFROZKRpXKTEBgdw/WBn6mX51H1ar4RWtceDEcDU4H5fIv5tsY3ip2hU+fTPg=="
+ },
+ "node_modules/@algolia/cache-in-memory": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.9.1.tgz",
+ "integrity": "sha512-IEJrHonvdymW2CnRfJtsTVWyfAH05xPEFkGXGCw00+6JNCj8Dln3TeaRLiaaY1srlyGedkemekQm1/Xb46CGOQ==",
+ "dependencies": {
+ "@algolia/cache-common": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/client-account": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.9.1.tgz",
+ "integrity": "sha512-Shpjeuwb7i2LR5QuWREb6UbEQLGB+Pl/J5+wPgILJDP/uWp7jpl0ase9mYNQGKj7TjztpSpQCPZ3dSHPnzZPfw==",
+ "dependencies": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/client-search": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/client-analytics": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.9.1.tgz",
+ "integrity": "sha512-/g6OkOSIA+A0t/tjvbL6iG/zV4El4LPFgv/tcAYHTH27BmlNtnEXw+iFpGjeUlQoPily9WVB3QNLMJkaNwL3HA==",
+ "dependencies": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/client-search": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/client-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.9.1.tgz",
+ "integrity": "sha512-UziRTZ8km3qwoVPIyEre8TV6V+MX7UtbfVqPmSafZ0xu41UUZ+sL56YoKjOXkbKuybeIC9prXMGy/ID5bXkTqg==",
+ "dependencies": {
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/client-recommendation": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.9.1.tgz",
+ "integrity": "sha512-Drtvvm1PNIOpYf4HFlkPFstFQ3IsN+TRmxur2F7y6Faplb5ybISa8ithu1tmlTdyTf3A78hQUQjgJet6qD2XZw==",
+ "dependencies": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/client-search": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.9.1.tgz",
+ "integrity": "sha512-r9Cw2r8kJr45iYncFDht6EshARghU265wuY8Q8oHrpFHjAziEYdsUOdNmQKbsSH5J3gLjDPx1EI5DzVd6ivn3w==",
+ "dependencies": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/logger-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.9.1.tgz",
+ "integrity": "sha512-9mPrbFlFyPT7or/7PXTiJjyOewWB9QRkZKVXkt5zHAUiUzGxmmdpJIGpPv3YQnDur8lXrXaRI0MHXUuIDMY1ng=="
+ },
+ "node_modules/@algolia/logger-console": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.9.1.tgz",
+ "integrity": "sha512-74VUwjtFjFpjZpi3QoHIPv0kcr3vWUSHX/Vs8PJW3lPsD4CgyhFenQbG9v+ZnyH0JrJwiYTtzfmrVh7IMWZGrQ==",
+ "dependencies": {
+ "@algolia/logger-common": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/requester-browser-xhr": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.9.1.tgz",
+ "integrity": "sha512-zc46tk5o0ikOAz3uYiRAMxC2iVKAMFKT7nNZnLB5IzT0uqAh7pz/+D/UvIxP4bKmsllpBSnPcpfQF+OI4Ag/BA==",
+ "dependencies": {
+ "@algolia/requester-common": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/requester-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.9.1.tgz",
+ "integrity": "sha512-9hPgXnlCSbqJqF69M5x5WN3h51Dc+mk/iWNeJSVxExHGvCDfBBZd0v6S15i8q2a9cD1I2RnhMpbnX5BmGtabVA=="
+ },
+ "node_modules/@algolia/requester-node-http": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.9.1.tgz",
+ "integrity": "sha512-vYNVbSCuyrCSCjHBQJk+tLZtWCjvvDf5tSbRJjyJYMqpnXuIuP7gZm24iHil4NPYBhbBj5NU2ZDAhc/gTn75Ag==",
+ "dependencies": {
+ "@algolia/requester-common": "4.9.1"
+ }
+ },
+ "node_modules/@algolia/transporter": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.9.1.tgz",
+ "integrity": "sha512-AbjFfGzX+cAuj7Qyc536OxIQzjFOA5FU2ANGStx8LBH+AKXScwfkx67C05riuaRR5adSCLMSEbVvUscH0nF+6A==",
+ "dependencies": {
+ "@algolia/cache-common": "4.9.1",
+ "@algolia/logger-common": "4.9.1",
+ "@algolia/requester-common": "4.9.1"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz",
+ "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==",
+ "dependencies": {
+ "@babel/highlight": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.13.15.tgz",
+ "integrity": "sha512-ltnibHKR1VnrU4ymHyQ/CXtNXI6yZC0oJThyW78Hft8XndANwi+9H+UIklBDraIjFEJzw8wmcM427oDd9KS5wA=="
+ },
+ "node_modules/@babel/core": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.13.16.tgz",
+ "integrity": "sha512-sXHpixBiWWFti0AV2Zq7avpTasr6sIAu7Y396c608541qAU2ui4a193m0KSQmfPSKFZLnQ3cvlKDOm3XkuXm3Q==",
+ "dependencies": {
+ "@babel/code-frame": "^7.12.13",
+ "@babel/generator": "^7.13.16",
+ "@babel/helper-compilation-targets": "^7.13.16",
+ "@babel/helper-module-transforms": "^7.13.14",
+ "@babel/helpers": "^7.13.16",
+ "@babel/parser": "^7.13.16",
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.15",
+ "@babel/types": "^7.13.16",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.1.2",
+ "semver": "^6.3.0",
+ "source-map": "^0.5.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/core/node_modules/debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@babel/core/node_modules/json5": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz",
+ "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@babel/core/node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/@babel/core/node_modules/source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.13.16.tgz",
+ "integrity": "sha512-grBBR75UnKOcUWMp8WoDxNsWCFl//XCK6HWTrBQKTr5SV9f5g0pNOjdyzi/DTBv12S9GnYPInIXQBTky7OXEMg==",
+ "dependencies": {
+ "@babel/types": "^7.13.16",
+ "jsesc": "^2.5.1",
+ "source-map": "^0.5.0"
+ }
+ },
+ "node_modules/@babel/generator/node_modules/source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/@babel/helper-annotate-as-pure": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz",
+ "integrity": "sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw==",
+ "dependencies": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.12.13.tgz",
+ "integrity": "sha512-CZOv9tGphhDRlVjVkAgm8Nhklm9RzSmWpX2my+t7Ua/KT616pEzXsQCjinzvkRvHWJ9itO4f296efroX23XCMA==",
+ "dependencies": {
+ "@babel/helper-explode-assignable-expression": "^7.12.13",
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.16.tgz",
+ "integrity": "sha512-3gmkYIrpqsLlieFwjkGgLaSHmhnvlAYzZLlYVjlW+QwI+1zE17kGxuJGmIqDQdYp56XdmGeD+Bswx0UTyG18xA==",
+ "dependencies": {
+ "@babel/compat-data": "^7.13.15",
+ "@babel/helper-validator-option": "^7.12.17",
+ "browserslist": "^4.14.5",
+ "semver": "^6.3.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-create-class-features-plugin": {
+ "version": "7.13.11",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz",
+ "integrity": "sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw==",
+ "dependencies": {
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-member-expression-to-functions": "^7.13.0",
+ "@babel/helper-optimise-call-expression": "^7.12.13",
+ "@babel/helper-replace-supers": "^7.13.0",
+ "@babel/helper-split-export-declaration": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-create-regexp-features-plugin": {
+ "version": "7.12.17",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.17.tgz",
+ "integrity": "sha512-p2VGmBu9oefLZ2nQpgnEnG0ZlRPvL8gAGvPUMQwUdaE8k49rOMuZpOwdQoy5qJf6K8jL3bcAMhVUlHAjIgJHUg==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.12.13",
+ "regexpu-core": "^4.7.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-define-polyfill-provider": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz",
+ "integrity": "sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw==",
+ "dependencies": {
+ "@babel/helper-compilation-targets": "^7.13.0",
+ "@babel/helper-module-imports": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/traverse": "^7.13.0",
+ "debug": "^4.1.1",
+ "lodash.debounce": "^4.0.8",
+ "resolve": "^1.14.2",
+ "semver": "^6.1.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.4.0-0"
+ }
+ },
+ "node_modules/@babel/helper-define-polyfill-provider/node_modules/debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@babel/helper-define-polyfill-provider/node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/@babel/helper-explode-assignable-expression": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz",
+ "integrity": "sha512-qS0peLTDP8kOisG1blKbaoBg/o9OSa1qoumMjTK5pM+KDTtpxpsiubnCGP34vK8BXGcb2M9eigwgvoJryrzwWA==",
+ "dependencies": {
+ "@babel/types": "^7.13.0"
+ }
+ },
+ "node_modules/@babel/helper-function-name": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz",
+ "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==",
+ "dependencies": {
+ "@babel/helper-get-function-arity": "^7.12.13",
+ "@babel/template": "^7.12.13",
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/helper-get-function-arity": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz",
+ "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==",
+ "dependencies": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/helper-hoist-variables": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.16.tgz",
+ "integrity": "sha512-1eMtTrXtrwscjcAeO4BVK+vvkxaLJSPFz1w1KLawz6HLNi9bPFGBNwwDyVfiu1Tv/vRRFYfoGaKhmAQPGPn5Wg==",
+ "dependencies": {
+ "@babel/traverse": "^7.13.15",
+ "@babel/types": "^7.13.16"
+ }
+ },
+ "node_modules/@babel/helper-member-expression-to-functions": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz",
+ "integrity": "sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw==",
+ "dependencies": {
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz",
+ "integrity": "sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA==",
+ "dependencies": {
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.13.14",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.13.14.tgz",
+ "integrity": "sha512-QuU/OJ0iAOSIatyVZmfqB0lbkVP0kDRiKj34xy+QNsnVZi/PA6BoSoreeqnxxa9EHFAIL0R9XOaAR/G9WlIy5g==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.13.12",
+ "@babel/helper-replace-supers": "^7.13.12",
+ "@babel/helper-simple-access": "^7.13.12",
+ "@babel/helper-split-export-declaration": "^7.12.13",
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.13",
+ "@babel/types": "^7.13.14"
+ }
+ },
+ "node_modules/@babel/helper-optimise-call-expression": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz",
+ "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==",
+ "dependencies": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz",
+ "integrity": "sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ=="
+ },
+ "node_modules/@babel/helper-remap-async-to-generator": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz",
+ "integrity": "sha512-pUQpFBE9JvC9lrQbpX0TmeNIy5s7GnZjna2lhhcHC7DzgBs6fWn722Y5cfwgrtrqc7NAJwMvOa0mKhq6XaE4jg==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.12.13",
+ "@babel/helper-wrap-function": "^7.13.0",
+ "@babel/types": "^7.13.0"
+ }
+ },
+ "node_modules/@babel/helper-replace-supers": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.13.12.tgz",
+ "integrity": "sha512-Gz1eiX+4yDO8mT+heB94aLVNCL+rbuT2xy4YfyNqu8F+OI6vMvJK891qGBTqL9Uc8wxEvRW92Id6G7sDen3fFw==",
+ "dependencies": {
+ "@babel/helper-member-expression-to-functions": "^7.13.12",
+ "@babel/helper-optimise-call-expression": "^7.12.13",
+ "@babel/traverse": "^7.13.0",
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "node_modules/@babel/helper-simple-access": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz",
+ "integrity": "sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA==",
+ "dependencies": {
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "node_modules/@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz",
+ "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==",
+ "dependencies": {
+ "@babel/types": "^7.12.1"
+ }
+ },
+ "node_modules/@babel/helper-split-export-declaration": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz",
+ "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==",
+ "dependencies": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.12.11",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz",
+ "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw=="
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.12.17",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz",
+ "integrity": "sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw=="
+ },
+ "node_modules/@babel/helper-wrap-function": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz",
+ "integrity": "sha512-1UX9F7K3BS42fI6qd2A4BjKzgGjToscyZTdp1DjknHLCIvpgne6918io+aL5LXFcER/8QWiwpoY902pVEqgTXA==",
+ "dependencies": {
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.0",
+ "@babel/types": "^7.13.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.13.17.tgz",
+ "integrity": "sha512-Eal4Gce4kGijo1/TGJdqp3WuhllaMLSrW6XcL0ulyUAQOuxHcCafZE8KHg9857gcTehsm/v7RcOx2+jp0Ryjsg==",
+ "dependencies": {
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.17",
+ "@babel/types": "^7.13.17"
+ }
+ },
+ "node_modules/@babel/highlight": {
+ "version": "7.13.10",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz",
+ "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "chalk": "^2.0.0",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.13.16.tgz",
+ "integrity": "sha512-6bAg36mCwuqLO0hbR+z7PHuqWiCeP7Dzg73OpQwsAB1Eb8HnGEz5xYBzCfbu+YjoaJsJs+qheDxVAuqbt3ILEw==",
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz",
+ "integrity": "sha512-d0u3zWKcoZf379fOeJdr1a5WPDny4aOFZ6hlfKivgK0LY7ZxNfoaHL2fWwdGtHyVvra38FC+HVYkO+byfSA8AQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1",
+ "@babel/plugin-proposal-optional-chaining": "^7.13.12"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.13.0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-async-generator-functions": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz",
+ "integrity": "sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-remap-async-to-generator": "^7.13.0",
+ "@babel/plugin-syntax-async-generators": "^7.8.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-class-properties": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz",
+ "integrity": "sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg==",
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-decorators": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.13.15.tgz",
+ "integrity": "sha512-ibAMAqUm97yzi+LPgdr5Nqb9CMkeieGHvwPg1ywSGjZrZHQEGqE01HmOio8kxRpA/+VtOHouIVy2FMpBbtltjA==",
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.13.11",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-decorators": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-dynamic-import": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.13.8.tgz",
+ "integrity": "sha512-ONWKj0H6+wIRCkZi9zSbZtE/r73uOhMVHh256ys0UzfM7I3d4n+spZNWjOnJv2gzopumP2Wxi186vI8N0Y2JyQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-export-namespace-from": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.13.tgz",
+ "integrity": "sha512-INAgtFo4OnLN3Y/j0VwAgw3HDXcDtX+C/erMvWzuV9v71r7urb6iyMXu7eM9IgLr1ElLlOkaHjJ0SbCmdOQ3Iw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-json-strings": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.13.8.tgz",
+ "integrity": "sha512-w4zOPKUFPX1mgvTmL/fcEqy34hrQ1CRcGxdphBc6snDnnqJ47EZDIyop6IwXzAC8G916hsIuXB2ZMBCExC5k7Q==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-json-strings": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-logical-assignment-operators": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.13.8.tgz",
+ "integrity": "sha512-aul6znYB4N4HGweImqKn59Su9RS8lbUIqxtXTOcAGtNIDczoEFv+l1EhmX8rUBp3G1jMjKJm8m0jXVp63ZpS4A==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.13.8.tgz",
+ "integrity": "sha512-iePlDPBn//UhxExyS9KyeYU7RM9WScAG+D3Hhno0PLJebAEpDZMocbDe64eqynhNAnwz/vZoL/q/QB2T1OH39A==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-numeric-separator": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.13.tgz",
+ "integrity": "sha512-O1jFia9R8BUCl3ZGB7eitaAPu62TXJRHn7rh+ojNERCFyqRwJMTmhz+tJ+k0CwI6CLjX/ee4qW74FSqlq9I35w==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-object-rest-spread": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz",
+ "integrity": "sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g==",
+ "dependencies": {
+ "@babel/compat-data": "^7.13.8",
+ "@babel/helper-compilation-targets": "^7.13.8",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-transform-parameters": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-optional-catch-binding": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.13.8.tgz",
+ "integrity": "sha512-0wS/4DUF1CuTmGo+NiaHfHcVSeSLj5S3e6RivPTg/2k3wOv3jO35tZ6/ZWsQhQMvdgI7CwphjQa/ccarLymHVA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-optional-chaining": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.13.12.tgz",
+ "integrity": "sha512-fcEdKOkIB7Tf4IxrgEVeFC4zeJSTr78no9wTdBuZZbqF64kzllU0ybo2zrzm7gUQfxGhBgq4E39oRs8Zx/RMYQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-private-methods": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.13.0.tgz",
+ "integrity": "sha512-MXyyKQd9inhx1kDYPkFRVOBXQ20ES8Pto3T7UZ92xj2mY0EVD8oAVzeyYuVfy/mxAdTSIayOvg+aVzcHV2bn6Q==",
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-unicode-property-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz",
+ "integrity": "sha512-XyJmZidNfofEkqFV5VC/bLabGmO5QzenPO/YOfGuEbgU+2sSwMmio3YLb4WtBgcmmdwZHyVyv8on77IUjQ5Gvg==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-decorators": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz",
+ "integrity": "sha512-Rw6aIXGuqDLr6/LoBBYE57nKOzQpz/aDkKlMqEwH+Vp0MXbG6H/TfRjaY343LKxzAKAMXIHsQ8JzaZKuDZ9MwA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-export-namespace-from": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz",
+ "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz",
+ "integrity": "sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-top-level-await": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz",
+ "integrity": "sha512-A81F9pDwyS7yM//KwbCSDqy3Uj4NMIurtplxphWxoYtNPov7cJsDkAFNNyVlIZ3jwGycVsurZ+LtOA8gZ376iQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-arrow-functions": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz",
+ "integrity": "sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-async-to-generator": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.13.0.tgz",
+ "integrity": "sha512-3j6E004Dx0K3eGmhxVJxwwI89CTJrce7lg3UrtFuDAVQ/2+SJ/h/aSFOeE6/n0WB1GsOffsJp6MnPQNQ8nmwhg==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-remap-async-to-generator": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-block-scoped-functions": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz",
+ "integrity": "sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-block-scoping": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.13.16.tgz",
+ "integrity": "sha512-ad3PHUxGnfWF4Efd3qFuznEtZKoBp0spS+DgqzVzRPV7urEBvPLue3y2j80w4Jf2YLzZHj8TOv/Lmvdmh3b2xg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-classes": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz",
+ "integrity": "sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.12.13",
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-optimise-call-expression": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-replace-supers": "^7.13.0",
+ "@babel/helper-split-export-declaration": "^7.12.13",
+ "globals": "^11.1.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-computed-properties": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz",
+ "integrity": "sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-destructuring": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.17.tgz",
+ "integrity": "sha512-UAUqiLv+uRLO+xuBKKMEpC+t7YRNVRqBsWWq1yKXbBZBje/t3IXCiSinZhjn/DC3qzBfICeYd2EFGEbHsh5RLA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-dotall-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.13.tgz",
+ "integrity": "sha512-foDrozE65ZFdUC2OfgeOCrEPTxdB3yjqxpXh8CH+ipd9CHd4s/iq81kcUpyH8ACGNEPdFqbtzfgzbT/ZGlbDeQ==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-duplicate-keys": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.13.tgz",
+ "integrity": "sha512-NfADJiiHdhLBW3pulJlJI2NB0t4cci4WTZ8FtdIuNc2+8pslXdPtRRAEWqUY+m9kNOk2eRYbTAOipAxlrOcwwQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-exponentiation-operator": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.13.tgz",
+ "integrity": "sha512-fbUelkM1apvqez/yYx1/oICVnGo2KM5s63mhGylrmXUxK/IAXSIf87QIxVfZldWf4QsOafY6vV3bX8aMHSvNrA==",
+ "dependencies": {
+ "@babel/helper-builder-binary-assignment-operator-visitor": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-for-of": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz",
+ "integrity": "sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-function-name": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz",
+ "integrity": "sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ==",
+ "dependencies": {
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-literals": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz",
+ "integrity": "sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-member-expression-literals": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz",
+ "integrity": "sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-amd": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz",
+ "integrity": "sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ==",
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "babel-plugin-dynamic-import-node": "^2.3.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-commonjs": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz",
+ "integrity": "sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw==",
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-simple-access": "^7.12.13",
+ "babel-plugin-dynamic-import-node": "^2.3.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-systemjs": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.13.8.tgz",
+ "integrity": "sha512-hwqctPYjhM6cWvVIlOIe27jCIBgHCsdH2xCJVAYQm7V5yTMoilbVMi9f6wKg0rpQAOn6ZG4AOyvCqFF/hUh6+A==",
+ "dependencies": {
+ "@babel/helper-hoist-variables": "^7.13.0",
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "babel-plugin-dynamic-import-node": "^2.3.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-umd": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz",
+ "integrity": "sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw==",
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-named-capturing-groups-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.13.tgz",
+ "integrity": "sha512-Xsm8P2hr5hAxyYblrfACXpQKdQbx4m2df9/ZZSQ8MAhsadw06+jW7s9zsSw6he+mJZXRlVMyEnVktJo4zjk1WA==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-new-target": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.13.tgz",
+ "integrity": "sha512-/KY2hbLxrG5GTQ9zzZSc3xWiOy379pIETEhbtzwZcw9rvuaVV4Fqy7BYGYOWZnaoXIQYbbJ0ziXLa/sKcGCYEQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-object-super": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz",
+ "integrity": "sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13",
+ "@babel/helper-replace-supers": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-parameters": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz",
+ "integrity": "sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-property-literals": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz",
+ "integrity": "sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-regenerator": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz",
+ "integrity": "sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ==",
+ "dependencies": {
+ "regenerator-transform": "^0.14.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-reserved-words": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.13.tgz",
+ "integrity": "sha512-xhUPzDXxZN1QfiOy/I5tyye+TRz6lA7z6xaT4CLOjPRMVg1ldRf0LHw0TDBpYL4vG78556WuHdyO9oi5UmzZBg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-runtime": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.15.tgz",
+ "integrity": "sha512-d+ezl76gx6Jal08XngJUkXM4lFXK/5Ikl9Mh4HKDxSfGJXmZ9xG64XT2oivBzfxb/eQ62VfvoMkaCZUKJMVrBA==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.13.12",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "babel-plugin-polyfill-corejs2": "^0.2.0",
+ "babel-plugin-polyfill-corejs3": "^0.2.0",
+ "babel-plugin-polyfill-regenerator": "^0.2.0",
+ "semver": "^6.3.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-shorthand-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz",
+ "integrity": "sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-spread": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz",
+ "integrity": "sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-sticky-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.13.tgz",
+ "integrity": "sha512-Jc3JSaaWT8+fr7GRvQP02fKDsYk4K/lYwWq38r/UGfaxo89ajud321NH28KRQ7xy1Ybc0VUE5Pz8psjNNDUglg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-template-literals": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz",
+ "integrity": "sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-typeof-symbol": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.13.tgz",
+ "integrity": "sha512-eKv/LmUJpMnu4npgfvs3LiHhJua5fo/CysENxa45YCQXZwKnGCQKAg87bvoqSW1fFT+HA32l03Qxsm8ouTY3ZQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-escapes": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.13.tgz",
+ "integrity": "sha512-0bHEkdwJ/sN/ikBHfSmOXPypN/beiGqjo+o4/5K+vxEFNPRPdImhviPakMKG4x96l85emoa0Z6cDflsdBusZbw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.13.tgz",
+ "integrity": "sha512-mDRzSNY7/zopwisPZ5kM9XKCfhchqIYwAKRERtEnhYscZB79VRekuRSoYbN0+KVe3y8+q1h6A4svXtP7N+UoCA==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-env": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.13.15.tgz",
+ "integrity": "sha512-D4JAPMXcxk69PKe81jRJ21/fP/uYdcTZ3hJDF5QX2HSI9bBxxYw/dumdR6dGumhjxlprHPE4XWoPaqzZUVy2MA==",
+ "dependencies": {
+ "@babel/compat-data": "^7.13.15",
+ "@babel/helper-compilation-targets": "^7.13.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-validator-option": "^7.12.17",
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.13.12",
+ "@babel/plugin-proposal-async-generator-functions": "^7.13.15",
+ "@babel/plugin-proposal-class-properties": "^7.13.0",
+ "@babel/plugin-proposal-dynamic-import": "^7.13.8",
+ "@babel/plugin-proposal-export-namespace-from": "^7.12.13",
+ "@babel/plugin-proposal-json-strings": "^7.13.8",
+ "@babel/plugin-proposal-logical-assignment-operators": "^7.13.8",
+ "@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8",
+ "@babel/plugin-proposal-numeric-separator": "^7.12.13",
+ "@babel/plugin-proposal-object-rest-spread": "^7.13.8",
+ "@babel/plugin-proposal-optional-catch-binding": "^7.13.8",
+ "@babel/plugin-proposal-optional-chaining": "^7.13.12",
+ "@babel/plugin-proposal-private-methods": "^7.13.0",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.12.13",
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-top-level-await": "^7.12.13",
+ "@babel/plugin-transform-arrow-functions": "^7.13.0",
+ "@babel/plugin-transform-async-to-generator": "^7.13.0",
+ "@babel/plugin-transform-block-scoped-functions": "^7.12.13",
+ "@babel/plugin-transform-block-scoping": "^7.12.13",
+ "@babel/plugin-transform-classes": "^7.13.0",
+ "@babel/plugin-transform-computed-properties": "^7.13.0",
+ "@babel/plugin-transform-destructuring": "^7.13.0",
+ "@babel/plugin-transform-dotall-regex": "^7.12.13",
+ "@babel/plugin-transform-duplicate-keys": "^7.12.13",
+ "@babel/plugin-transform-exponentiation-operator": "^7.12.13",
+ "@babel/plugin-transform-for-of": "^7.13.0",
+ "@babel/plugin-transform-function-name": "^7.12.13",
+ "@babel/plugin-transform-literals": "^7.12.13",
+ "@babel/plugin-transform-member-expression-literals": "^7.12.13",
+ "@babel/plugin-transform-modules-amd": "^7.13.0",
+ "@babel/plugin-transform-modules-commonjs": "^7.13.8",
+ "@babel/plugin-transform-modules-systemjs": "^7.13.8",
+ "@babel/plugin-transform-modules-umd": "^7.13.0",
+ "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.13",
+ "@babel/plugin-transform-new-target": "^7.12.13",
+ "@babel/plugin-transform-object-super": "^7.12.13",
+ "@babel/plugin-transform-parameters": "^7.13.0",
+ "@babel/plugin-transform-property-literals": "^7.12.13",
+ "@babel/plugin-transform-regenerator": "^7.13.15",
+ "@babel/plugin-transform-reserved-words": "^7.12.13",
+ "@babel/plugin-transform-shorthand-properties": "^7.12.13",
+ "@babel/plugin-transform-spread": "^7.13.0",
+ "@babel/plugin-transform-sticky-regex": "^7.12.13",
+ "@babel/plugin-transform-template-literals": "^7.13.0",
+ "@babel/plugin-transform-typeof-symbol": "^7.12.13",
+ "@babel/plugin-transform-unicode-escapes": "^7.12.13",
+ "@babel/plugin-transform-unicode-regex": "^7.12.13",
+ "@babel/preset-modules": "^0.1.4",
+ "@babel/types": "^7.13.14",
+ "babel-plugin-polyfill-corejs2": "^0.2.0",
+ "babel-plugin-polyfill-corejs3": "^0.2.0",
+ "babel-plugin-polyfill-regenerator": "^0.2.0",
+ "core-js-compat": "^3.9.0",
+ "semver": "^6.3.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-modules": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz",
+ "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.4.4",
+ "@babel/plugin-transform-dotall-regex": "^7.4.4",
+ "@babel/types": "^7.4.4",
+ "esutils": "^2.0.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/runtime": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.17.tgz",
+ "integrity": "sha512-NCdgJEelPTSh+FEFylhnP1ylq848l1z9t9N0j1Lfbcw0+KXGjsTvUmkxy+voLLXB5SOKMbLLx4jxYliGrYQseA==",
+ "dependencies": {
+ "regenerator-runtime": "^0.13.4"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz",
+ "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==",
+ "dependencies": {
+ "@babel/code-frame": "^7.12.13",
+ "@babel/parser": "^7.12.13",
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.13.17.tgz",
+ "integrity": "sha512-BMnZn0R+X6ayqm3C3To7o1j7Q020gWdqdyP50KEoVqaCO2c/Im7sYZSmVgvefp8TTMQ+9CtwuBp0Z1CZ8V3Pvg==",
+ "dependencies": {
+ "@babel/code-frame": "^7.12.13",
+ "@babel/generator": "^7.13.16",
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-split-export-declaration": "^7.12.13",
+ "@babel/parser": "^7.13.16",
+ "@babel/types": "^7.13.17",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ }
+ },
+ "node_modules/@babel/traverse/node_modules/debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@babel/traverse/node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/@babel/types": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.13.17.tgz",
+ "integrity": "sha512-RawydLgxbOPDlTLJNtoIypwdmAy//uQIzlKt2+iBiJaRlVuI6QLUxVAyWGNfOzp8Yu4L4lLIacoCyTNtpb4wiA==",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "to-fast-properties": "^2.0.0"
+ }
+ },
+ "node_modules/@cosmos-ui/vue": {
+ "version": "0.35.0",
+ "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.35.0.tgz",
+ "integrity": "sha512-WTCJBWSoiDckgvXWPByKkQ7ZVSf9LSMsizIAHBnsi0Zp3GOaEqPNBpgjGt2JEhpDPr7+YwyIgmqQ0S3D+Hq5iQ==",
+ "dependencies": {
+ "algoliasearch": "^4.1.0",
+ "axios": "^0.19.2",
+ "clipboard-copy": "^3.1.0",
+ "fuse.js": "^3.4.6",
+ "hotkeys-js": "^3.7.3",
+ "js-base64": "^2.5.2",
+ "lodash": "^4.17.15",
+ "markdown-it": "^10.0.0",
+ "prismjs": "^1.19.0",
+ "querystring": "^0.2.0",
+ "tiny-cookie": "^2.3.1",
+ "vue": "^2.6.10"
+ }
+ },
+ "node_modules/@cosmos-ui/vue/node_modules/axios": {
+ "version": "0.19.2",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz",
+ "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==",
+ "deprecated": "Critical security vulnerability fixed in v0.21.1. For more information, see https://github.com/axios/axios/pull/3410",
+ "dependencies": {
+ "follow-redirects": "1.5.10"
+ }
+ },
+ "node_modules/@cosmos-ui/vue/node_modules/debug": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+ "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/@cosmos-ui/vue/node_modules/entities": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz",
+ "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ=="
+ },
+ "node_modules/@cosmos-ui/vue/node_modules/follow-redirects": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz",
+ "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==",
+ "dependencies": {
+ "debug": "=3.1.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/@cosmos-ui/vue/node_modules/linkify-it": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz",
+ "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==",
+ "dependencies": {
+ "uc.micro": "^1.0.1"
+ }
+ },
+ "node_modules/@cosmos-ui/vue/node_modules/markdown-it": {
+ "version": "10.0.0",
+ "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz",
+ "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "entities": "~2.0.0",
+ "linkify-it": "^2.0.0",
+ "mdurl": "^1.0.1",
+ "uc.micro": "^1.0.5"
+ },
+ "bin": {
+ "markdown-it": "bin/markdown-it.js"
+ }
+ },
+ "node_modules/@mrmlnc/readdir-enhanced": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz",
+ "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==",
+ "dependencies": {
+ "call-me-maybe": "^1.0.1",
+ "glob-to-regexp": "^0.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz",
+ "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/@sindresorhus/is": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz",
+ "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@szmarczak/http-timer": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz",
+ "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==",
+ "dependencies": {
+ "defer-to-connect": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@types/glob": {
+ "version": "7.1.3",
+ "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz",
+ "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==",
+ "dependencies": {
+ "@types/minimatch": "*",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/json-schema": {
+ "version": "7.0.7",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.7.tgz",
+ "integrity": "sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA=="
+ },
+ "node_modules/@types/minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA=="
+ },
+ "node_modules/@types/node": {
+ "version": "15.0.1",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-15.0.1.tgz",
+ "integrity": "sha512-TMkXt0Ck1y0KKsGr9gJtWGjttxlZnnvDtphxUOSd0bfaR6Q1jle+sPvrzNR1urqYTWMinoKvjKfXUGsumaO1PA=="
+ },
+ "node_modules/@types/q": {
+ "version": "1.5.4",
+ "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz",
+ "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug=="
+ },
+ "node_modules/@vue/babel-helper-vue-jsx-merge-props": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz",
+ "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA=="
+ },
+ "node_modules/@vue/babel-helper-vue-transform-on": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.0.2.tgz",
+ "integrity": "sha512-hz4R8tS5jMn8lDq6iD+yWL6XNB699pGIVLk7WSJnn1dbpjaazsjZQkieJoRX6gW5zpYSCFqQ7jUquPNY65tQYA=="
+ },
+ "node_modules/@vue/babel-plugin-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.5.tgz",
+ "integrity": "sha512-Jtipy7oI0am5e1q5Ahunm/cCcCh5ssf5VkMQsLR383S3un5Qh7NBfxgSK9kmWf4IXJEhDeYp9kHv8G/EnMai9A==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.0.0",
+ "@babel/plugin-syntax-jsx": "^7.0.0",
+ "@babel/template": "^7.0.0",
+ "@babel/traverse": "^7.0.0",
+ "@babel/types": "^7.0.0",
+ "@vue/babel-helper-vue-transform-on": "^1.0.2",
+ "camelcase": "^6.0.0",
+ "html-tags": "^3.1.0",
+ "svg-tags": "^1.0.0"
+ }
+ },
+ "node_modules/@vue/babel-plugin-transform-vue-jsx": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz",
+ "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.0.0",
+ "@babel/plugin-syntax-jsx": "^7.2.0",
+ "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1",
+ "html-tags": "^2.0.0",
+ "lodash.kebabcase": "^4.1.1",
+ "svg-tags": "^1.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-plugin-transform-vue-jsx/node_modules/html-tags": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz",
+ "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@vue/babel-preset-app": {
+ "version": "4.5.12",
+ "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.12.tgz",
+ "integrity": "sha512-8q67ORQ9O0Ms0nlqsXTVhaBefRBaLrzPxOewAZhdcO7onHwcO5/wRdWtHhZgfpCZlhY7NogkU16z3WnorSSkEA==",
+ "dependencies": {
+ "@babel/core": "^7.11.0",
+ "@babel/helper-compilation-targets": "^7.9.6",
+ "@babel/helper-module-imports": "^7.8.3",
+ "@babel/plugin-proposal-class-properties": "^7.8.3",
+ "@babel/plugin-proposal-decorators": "^7.8.3",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-jsx": "^7.8.3",
+ "@babel/plugin-transform-runtime": "^7.11.0",
+ "@babel/preset-env": "^7.11.0",
+ "@babel/runtime": "^7.11.0",
+ "@vue/babel-plugin-jsx": "^1.0.3",
+ "@vue/babel-preset-jsx": "^1.2.4",
+ "babel-plugin-dynamic-import-node": "^2.3.3",
+ "core-js": "^3.6.5",
+ "core-js-compat": "^3.6.5",
+ "semver": "^6.1.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "*",
+ "core-js": "^3",
+ "vue": "^2 || ^3.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "core-js": {
+ "optional": true
+ },
+ "vue": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@vue/babel-preset-jsx": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz",
+ "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==",
+ "dependencies": {
+ "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1",
+ "@vue/babel-plugin-transform-vue-jsx": "^1.2.1",
+ "@vue/babel-sugar-composition-api-inject-h": "^1.2.1",
+ "@vue/babel-sugar-composition-api-render-instance": "^1.2.4",
+ "@vue/babel-sugar-functional-vue": "^1.2.2",
+ "@vue/babel-sugar-inject-h": "^1.2.2",
+ "@vue/babel-sugar-v-model": "^1.2.3",
+ "@vue/babel-sugar-v-on": "^1.2.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-composition-api-inject-h": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz",
+ "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==",
+ "dependencies": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-composition-api-render-instance": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz",
+ "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==",
+ "dependencies": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-functional-vue": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz",
+ "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==",
+ "dependencies": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-inject-h": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz",
+ "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==",
+ "dependencies": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-v-model": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz",
+ "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==",
+ "dependencies": {
+ "@babel/plugin-syntax-jsx": "^7.2.0",
+ "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1",
+ "@vue/babel-plugin-transform-vue-jsx": "^1.2.1",
+ "camelcase": "^5.0.0",
+ "html-tags": "^2.0.0",
+ "svg-tags": "^1.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-v-model/node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@vue/babel-sugar-v-model/node_modules/html-tags": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz",
+ "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@vue/babel-sugar-v-on": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz",
+ "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==",
+ "dependencies": {
+ "@babel/plugin-syntax-jsx": "^7.2.0",
+ "@vue/babel-plugin-transform-vue-jsx": "^1.2.1",
+ "camelcase": "^5.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@vue/babel-sugar-v-on/node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@vue/component-compiler-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz",
+ "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==",
+ "dependencies": {
+ "consolidate": "^0.15.1",
+ "hash-sum": "^1.0.2",
+ "lru-cache": "^4.1.2",
+ "merge-source-map": "^1.1.0",
+ "postcss": "^7.0.14",
+ "postcss-selector-parser": "^6.0.2",
+ "source-map": "~0.6.1",
+ "vue-template-es2015-compiler": "^1.9.0"
+ },
+ "optionalDependencies": {
+ "prettier": "^1.18.2"
+ }
+ },
+ "node_modules/@vue/component-compiler-utils/node_modules/lru-cache": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz",
+ "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==",
+ "dependencies": {
+ "pseudomap": "^1.0.2",
+ "yallist": "^2.1.2"
+ }
+ },
+ "node_modules/@vue/component-compiler-utils/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/@vue/component-compiler-utils/node_modules/yallist": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz",
+ "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI="
+ },
+ "node_modules/@vuepress/core": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.8.2.tgz",
+ "integrity": "sha512-lh9BLC06k9s0wxTuWtCkiNj49fkbW87enp0XSrFZHEoyDGSGndQjZmMMErcHc5Hx7nrW1nzc33sPH1NNtJl0hw==",
+ "dependencies": {
+ "@babel/core": "^7.8.4",
+ "@vue/babel-preset-app": "^4.1.2",
+ "@vuepress/markdown": "1.8.2",
+ "@vuepress/markdown-loader": "1.8.2",
+ "@vuepress/plugin-last-updated": "1.8.2",
+ "@vuepress/plugin-register-components": "1.8.2",
+ "@vuepress/shared-utils": "1.8.2",
+ "autoprefixer": "^9.5.1",
+ "babel-loader": "^8.0.4",
+ "cache-loader": "^3.0.0",
+ "chokidar": "^2.0.3",
+ "connect-history-api-fallback": "^1.5.0",
+ "copy-webpack-plugin": "^5.0.2",
+ "core-js": "^3.6.4",
+ "cross-spawn": "^6.0.5",
+ "css-loader": "^2.1.1",
+ "file-loader": "^3.0.1",
+ "js-yaml": "^3.13.1",
+ "lru-cache": "^5.1.1",
+ "mini-css-extract-plugin": "0.6.0",
+ "optimize-css-assets-webpack-plugin": "^5.0.1",
+ "portfinder": "^1.0.13",
+ "postcss-loader": "^3.0.0",
+ "postcss-safe-parser": "^4.0.1",
+ "toml": "^3.0.0",
+ "url-loader": "^1.0.1",
+ "vue": "^2.6.10",
+ "vue-loader": "^15.7.1",
+ "vue-router": "^3.4.5",
+ "vue-server-renderer": "^2.6.10",
+ "vue-template-compiler": "^2.6.10",
+ "vuepress-html-webpack-plugin": "^3.2.0",
+ "vuepress-plugin-container": "^2.0.2",
+ "webpack": "^4.8.1",
+ "webpack-chain": "^6.0.0",
+ "webpack-dev-server": "^3.5.1",
+ "webpack-merge": "^4.1.2",
+ "webpackbar": "3.2.0"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/@vuepress/markdown": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.8.2.tgz",
+ "integrity": "sha512-zznBHVqW+iBkznF/BO/GY9RFu53khyl0Ey0PnGqvwCJpRLNan6y5EXgYumtjw2GSYn5nDTTALYxtyNBdz64PKg==",
+ "dependencies": {
+ "@vuepress/shared-utils": "1.8.2",
+ "markdown-it": "^8.4.1",
+ "markdown-it-anchor": "^5.0.2",
+ "markdown-it-chain": "^1.3.0",
+ "markdown-it-emoji": "^1.4.0",
+ "markdown-it-table-of-contents": "^0.4.0",
+ "prismjs": "^1.13.0"
+ }
+ },
+ "node_modules/@vuepress/markdown-loader": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.8.2.tgz",
+ "integrity": "sha512-mWzFXikCUcAN/chpKkqZpRYKdo0312hMv8cBea2hvrJYV6y4ODB066XKvXN8JwOcxuCjxWYJkhWGr+pXq1oTtw==",
+ "dependencies": {
+ "@vuepress/markdown": "1.8.2",
+ "loader-utils": "^1.1.0",
+ "lru-cache": "^5.1.1"
+ }
+ },
+ "node_modules/@vuepress/markdown/node_modules/entities": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz",
+ "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w=="
+ },
+ "node_modules/@vuepress/markdown/node_modules/linkify-it": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz",
+ "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==",
+ "dependencies": {
+ "uc.micro": "^1.0.1"
+ }
+ },
+ "node_modules/@vuepress/markdown/node_modules/markdown-it": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz",
+ "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "entities": "~1.1.1",
+ "linkify-it": "^2.0.0",
+ "mdurl": "^1.0.1",
+ "uc.micro": "^1.0.5"
+ },
+ "bin": {
+ "markdown-it": "bin/markdown-it.js"
+ }
+ },
+ "node_modules/@vuepress/plugin-active-header-links": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.8.2.tgz",
+ "integrity": "sha512-JmXAQg8D7J8mcKe2Ue3BZ9dOCzJMJXP4Cnkkc/IrqfDg0ET0l96gYWZohCqlvRIWt4f0VPiFAO4FLYrW+hko+g==",
+ "dependencies": {
+ "lodash.debounce": "^4.0.8"
+ }
+ },
+ "node_modules/@vuepress/plugin-google-analytics": {
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz",
+ "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ=="
+ },
+ "node_modules/@vuepress/plugin-last-updated": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.8.2.tgz",
+ "integrity": "sha512-pYIRZi52huO9b6HY3JQNPKNERCLzMHejjBRt9ekdnJ1xhLs4MmRvt37BoXjI/qzvXkYtr7nmGgnKThNBVRTZuA==",
+ "dependencies": {
+ "cross-spawn": "^6.0.5"
+ }
+ },
+ "node_modules/@vuepress/plugin-nprogress": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.8.2.tgz",
+ "integrity": "sha512-3TOBee2NM3WLr1tdjDTGfrAMggjN+OlEPyKyv8FqThsVkDYhw48O3HwqlThp9KX7UbL3ExxIFBwWRFLC+kYrdw==",
+ "dependencies": {
+ "nprogress": "^0.2.0"
+ }
+ },
+ "node_modules/@vuepress/plugin-register-components": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.8.2.tgz",
+ "integrity": "sha512-6SUq3nHFMEh9qKFnjA8QnrNxj0kLs7+Gspq1OBU8vtu0NQmSvLFZVaMV7pzT/9zN2nO5Pld5qhsUJv1g71MrEA==",
+ "dependencies": {
+ "@vuepress/shared-utils": "1.8.2"
+ }
+ },
+ "node_modules/@vuepress/plugin-search": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.8.2.tgz",
+ "integrity": "sha512-JrSJr9o0Kar14lVtZ4wfw39pplxvvMh8vDBD9oW09a+6Zi/4bySPGdcdaqdqGW+OHSiZNvG+6uyfKSBBBqF6PA=="
+ },
+ "node_modules/@vuepress/shared-utils": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.8.2.tgz",
+ "integrity": "sha512-6kGubc7iBDWruEBUU7yR+sQ++SOhMuvKWvWeTZJKRZedthycdzYz7QVpua0FaZSAJm5/dIt8ymU4WQvxTtZgTQ==",
+ "dependencies": {
+ "chalk": "^2.3.2",
+ "escape-html": "^1.0.3",
+ "fs-extra": "^7.0.1",
+ "globby": "^9.2.0",
+ "gray-matter": "^4.0.1",
+ "hash-sum": "^1.0.2",
+ "semver": "^6.0.0",
+ "toml": "^3.0.0",
+ "upath": "^1.1.0"
+ }
+ },
+ "node_modules/@vuepress/theme-default": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.8.2.tgz",
+ "integrity": "sha512-rE7M1rs3n2xp4a/GrweO8EGwqFn3EA5gnFWdVmVIHyr7C1nix+EqjpPQF1SVWNnIrDdQuCw38PqS+oND1K2vYw==",
+ "dependencies": {
+ "@vuepress/plugin-active-header-links": "1.8.2",
+ "@vuepress/plugin-nprogress": "1.8.2",
+ "@vuepress/plugin-search": "1.8.2",
+ "docsearch.js": "^2.5.2",
+ "lodash": "^4.17.15",
+ "stylus": "^0.54.8",
+ "stylus-loader": "^3.0.2",
+ "vuepress-plugin-container": "^2.0.2",
+ "vuepress-plugin-smooth-scroll": "^0.0.3"
+ }
+ },
+ "node_modules/@webassemblyjs/ast": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz",
+ "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==",
+ "dependencies": {
+ "@webassemblyjs/helper-module-context": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/wast-parser": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/floating-point-hex-parser": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz",
+ "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA=="
+ },
+ "node_modules/@webassemblyjs/helper-api-error": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz",
+ "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw=="
+ },
+ "node_modules/@webassemblyjs/helper-buffer": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz",
+ "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA=="
+ },
+ "node_modules/@webassemblyjs/helper-code-frame": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz",
+ "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==",
+ "dependencies": {
+ "@webassemblyjs/wast-printer": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/helper-fsm": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz",
+ "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw=="
+ },
+ "node_modules/@webassemblyjs/helper-module-context": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz",
+ "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/helper-wasm-bytecode": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz",
+ "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw=="
+ },
+ "node_modules/@webassemblyjs/helper-wasm-section": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz",
+ "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-buffer": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/wasm-gen": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/ieee754": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz",
+ "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==",
+ "dependencies": {
+ "@xtuc/ieee754": "^1.2.0"
+ }
+ },
+ "node_modules/@webassemblyjs/leb128": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz",
+ "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==",
+ "dependencies": {
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "node_modules/@webassemblyjs/utf8": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz",
+ "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w=="
+ },
+ "node_modules/@webassemblyjs/wasm-edit": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz",
+ "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-buffer": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/helper-wasm-section": "1.9.0",
+ "@webassemblyjs/wasm-gen": "1.9.0",
+ "@webassemblyjs/wasm-opt": "1.9.0",
+ "@webassemblyjs/wasm-parser": "1.9.0",
+ "@webassemblyjs/wast-printer": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/wasm-gen": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz",
+ "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/ieee754": "1.9.0",
+ "@webassemblyjs/leb128": "1.9.0",
+ "@webassemblyjs/utf8": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/wasm-opt": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz",
+ "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-buffer": "1.9.0",
+ "@webassemblyjs/wasm-gen": "1.9.0",
+ "@webassemblyjs/wasm-parser": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/wasm-parser": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz",
+ "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-api-error": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/ieee754": "1.9.0",
+ "@webassemblyjs/leb128": "1.9.0",
+ "@webassemblyjs/utf8": "1.9.0"
+ }
+ },
+ "node_modules/@webassemblyjs/wast-parser": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz",
+ "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/floating-point-hex-parser": "1.9.0",
+ "@webassemblyjs/helper-api-error": "1.9.0",
+ "@webassemblyjs/helper-code-frame": "1.9.0",
+ "@webassemblyjs/helper-fsm": "1.9.0",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "node_modules/@webassemblyjs/wast-printer": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz",
+ "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/wast-parser": "1.9.0",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "node_modules/@xtuc/ieee754": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
+ "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA=="
+ },
+ "node_modules/@xtuc/long": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
+ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ=="
+ },
+ "node_modules/abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q=="
+ },
+ "node_modules/accepts": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
+ "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
+ "dependencies": {
+ "mime-types": "~2.1.24",
+ "negotiator": "0.6.2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "7.4.1",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
+ "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz",
+ "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=",
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ajv-errors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz",
+ "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==",
+ "peerDependencies": {
+ "ajv": ">=5.0.0"
+ }
+ },
+ "node_modules/ajv-keywords": {
+ "version": "3.5.2",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz",
+ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==",
+ "peerDependencies": {
+ "ajv": "^6.9.1"
+ }
+ },
+ "node_modules/algoliasearch": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.9.1.tgz",
+ "integrity": "sha512-EeJUYXzBEhZSsL6tXc3hseLBCtlNLa1MZ4mlMK6EeX38yRjY5vgnFcNNml6uUhlOjvheKxgkKRpPWkxgL8Cqkg==",
+ "dependencies": {
+ "@algolia/cache-browser-local-storage": "4.9.1",
+ "@algolia/cache-common": "4.9.1",
+ "@algolia/cache-in-memory": "4.9.1",
+ "@algolia/client-account": "4.9.1",
+ "@algolia/client-analytics": "4.9.1",
+ "@algolia/client-common": "4.9.1",
+ "@algolia/client-recommendation": "4.9.1",
+ "@algolia/client-search": "4.9.1",
+ "@algolia/logger-common": "4.9.1",
+ "@algolia/logger-console": "4.9.1",
+ "@algolia/requester-browser-xhr": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/requester-node-http": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "node_modules/alphanum-sort": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz",
+ "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM="
+ },
+ "node_modules/ansi-align": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz",
+ "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==",
+ "dependencies": {
+ "string-width": "^3.0.0"
+ }
+ },
+ "node_modules/ansi-align/node_modules/ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-align/node_modules/emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "node_modules/ansi-align/node_modules/is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/ansi-align/node_modules/string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "dependencies": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-align/node_modules/strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dependencies": {
+ "ansi-regex": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-colors": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.4.tgz",
+ "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dependencies": {
+ "type-fest": "^0.21.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-escapes/node_modules/type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-html": {
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/ansi-html/-/ansi-html-0.0.7.tgz",
+ "integrity": "sha1-gTWEAhliqenm/QOflA0S9WynhZ4=",
+ "engines": [
+ "node >= 0.8.0"
+ ],
+ "bin": {
+ "ansi-html": "bin/ansi-html"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
+ "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz",
+ "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==",
+ "dependencies": {
+ "micromatch": "^3.1.4",
+ "normalize-path": "^2.1.1"
+ }
+ },
+ "node_modules/anymatch/node_modules/normalize-path": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
+ "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=",
+ "dependencies": {
+ "remove-trailing-separator": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/aproba": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz",
+ "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw=="
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/arr-diff": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+ "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/arr-flatten": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+ "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/arr-union": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array-flatten": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz",
+ "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ=="
+ },
+ "node_modules/array-union": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz",
+ "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=",
+ "dependencies": {
+ "array-uniq": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array-uniq": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz",
+ "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array-unique": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+ "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/asap": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
+ "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY="
+ },
+ "node_modules/asn1": {
+ "version": "0.2.4",
+ "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
+ "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
+ "dependencies": {
+ "safer-buffer": "~2.1.0"
+ }
+ },
+ "node_modules/asn1.js": {
+ "version": "5.4.1",
+ "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz",
+ "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==",
+ "dependencies": {
+ "bn.js": "^4.0.0",
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0",
+ "safer-buffer": "^2.1.0"
+ }
+ },
+ "node_modules/asn1.js/node_modules/bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ },
+ "node_modules/assert": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz",
+ "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==",
+ "dependencies": {
+ "object-assign": "^4.1.1",
+ "util": "0.10.3"
+ }
+ },
+ "node_modules/assert-never": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.2.1.tgz",
+ "integrity": "sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw=="
+ },
+ "node_modules/assert-plus": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
+ "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=",
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/assert/node_modules/inherits": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz",
+ "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE="
+ },
+ "node_modules/assert/node_modules/util": {
+ "version": "0.10.3",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz",
+ "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=",
+ "dependencies": {
+ "inherits": "2.0.1"
+ }
+ },
+ "node_modules/assign-symbols": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/async": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz",
+ "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==",
+ "dependencies": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "node_modules/async-each": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz",
+ "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ=="
+ },
+ "node_modules/async-limiter": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz",
+ "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ=="
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k="
+ },
+ "node_modules/atob": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+ "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
+ "bin": {
+ "atob": "bin/atob.js"
+ },
+ "engines": {
+ "node": ">= 4.5.0"
+ }
+ },
+ "node_modules/autocomplete.js": {
+ "version": "0.36.0",
+ "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.36.0.tgz",
+ "integrity": "sha512-jEwUXnVMeCHHutUt10i/8ZiRaCb0Wo+ZyKxeGsYwBDtw6EJHqEeDrq4UwZRD8YBSvp3g6klP678il2eeiVXN2Q==",
+ "dependencies": {
+ "immediate": "^3.2.3"
+ }
+ },
+ "node_modules/autoprefixer": {
+ "version": "9.8.6",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz",
+ "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==",
+ "dependencies": {
+ "browserslist": "^4.12.0",
+ "caniuse-lite": "^1.0.30001109",
+ "colorette": "^1.2.1",
+ "normalize-range": "^0.1.2",
+ "num2fraction": "^1.2.2",
+ "postcss": "^7.0.32",
+ "postcss-value-parser": "^4.1.0"
+ },
+ "bin": {
+ "autoprefixer": "bin/autoprefixer"
+ },
+ "funding": {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
+ }
+ },
+ "node_modules/aws-sign2": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
+ "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/aws4": {
+ "version": "1.11.0",
+ "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz",
+ "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA=="
+ },
+ "node_modules/axios": {
+ "version": "0.21.1",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz",
+ "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==",
+ "dependencies": {
+ "follow-redirects": "^1.10.0"
+ }
+ },
+ "node_modules/babel-loader": {
+ "version": "8.2.2",
+ "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz",
+ "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==",
+ "dependencies": {
+ "find-cache-dir": "^3.3.1",
+ "loader-utils": "^1.4.0",
+ "make-dir": "^3.1.0",
+ "schema-utils": "^2.6.5"
+ },
+ "engines": {
+ "node": ">= 8.9"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0",
+ "webpack": ">=2"
+ }
+ },
+ "node_modules/babel-plugin-dynamic-import-node": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz",
+ "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==",
+ "dependencies": {
+ "object.assign": "^4.1.0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-corejs2": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz",
+ "integrity": "sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg==",
+ "dependencies": {
+ "@babel/compat-data": "^7.13.11",
+ "@babel/helper-define-polyfill-provider": "^0.2.0",
+ "semver": "^6.1.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-corejs3": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz",
+ "integrity": "sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg==",
+ "dependencies": {
+ "@babel/helper-define-polyfill-provider": "^0.2.0",
+ "core-js-compat": "^3.9.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-regenerator": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz",
+ "integrity": "sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg==",
+ "dependencies": {
+ "@babel/helper-define-polyfill-provider": "^0.2.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/babel-walk": {
+ "version": "3.0.0-canary-5",
+ "resolved": "https://registry.npmjs.org/babel-walk/-/babel-walk-3.0.0-canary-5.tgz",
+ "integrity": "sha512-GAwkz0AihzY5bkwIY5QDR+LvsRQgB/B+1foMPvi0FZPMl5fjD7ICiznUiBdLYMH1QYe6vqu4gWYytZOccLouFw==",
+ "dependencies": {
+ "@babel/types": "^7.9.6"
+ },
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "node_modules/base": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+ "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+ "dependencies": {
+ "cache-base": "^1.0.1",
+ "class-utils": "^0.3.5",
+ "component-emitter": "^1.2.1",
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.1",
+ "mixin-deep": "^1.2.0",
+ "pascalcase": "^0.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/base/node_modules/define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dependencies": {
+ "is-descriptor": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/base64-js": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
+ "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY="
+ },
+ "node_modules/bcrypt-pbkdf": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
+ "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
+ "dependencies": {
+ "tweetnacl": "^0.14.3"
+ }
+ },
+ "node_modules/big.js": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
+ "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/binary-extensions": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz",
+ "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/bindings": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
+ "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
+ "optional": true,
+ "dependencies": {
+ "file-uri-to-path": "1.0.0"
+ }
+ },
+ "node_modules/bluebird": {
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
+ "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
+ },
+ "node_modules/bn.js": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.0.tgz",
+ "integrity": "sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw=="
+ },
+ "node_modules/body-parser": {
+ "version": "1.19.0",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
+ "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
+ "dependencies": {
+ "bytes": "3.1.0",
+ "content-type": "~1.0.4",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "on-finished": "~2.3.0",
+ "qs": "6.7.0",
+ "raw-body": "2.4.0",
+ "type-is": "~1.6.17"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/body-parser/node_modules/bytes": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
+ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/body-parser/node_modules/qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/bonjour": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/bonjour/-/bonjour-3.5.0.tgz",
+ "integrity": "sha1-jokKGD2O6aI5OzhExpGkK897yfU=",
+ "dependencies": {
+ "array-flatten": "^2.1.0",
+ "deep-equal": "^1.0.1",
+ "dns-equal": "^1.0.0",
+ "dns-txt": "^2.0.2",
+ "multicast-dns": "^6.0.1",
+ "multicast-dns-service-types": "^1.1.0"
+ }
+ },
+ "node_modules/boolbase": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
+ "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24="
+ },
+ "node_modules/boxen": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz",
+ "integrity": "sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ==",
+ "dependencies": {
+ "ansi-align": "^3.0.0",
+ "camelcase": "^5.3.1",
+ "chalk": "^3.0.0",
+ "cli-boxes": "^2.2.0",
+ "string-width": "^4.1.0",
+ "term-size": "^2.1.0",
+ "type-fest": "^0.8.1",
+ "widest-line": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/boxen/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/boxen/node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/boxen/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/boxen/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/boxen/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/boxen/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/boxen/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+ "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+ "dependencies": {
+ "arr-flatten": "^1.1.0",
+ "array-unique": "^0.3.2",
+ "extend-shallow": "^2.0.1",
+ "fill-range": "^4.0.0",
+ "isobject": "^3.0.1",
+ "repeat-element": "^1.1.2",
+ "snapdragon": "^0.8.1",
+ "snapdragon-node": "^2.0.1",
+ "split-string": "^3.0.2",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/brorand": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz",
+ "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8="
+ },
+ "node_modules/browserify-aes": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz",
+ "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==",
+ "dependencies": {
+ "buffer-xor": "^1.0.3",
+ "cipher-base": "^1.0.0",
+ "create-hash": "^1.1.0",
+ "evp_bytestokey": "^1.0.3",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "node_modules/browserify-cipher": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz",
+ "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==",
+ "dependencies": {
+ "browserify-aes": "^1.0.4",
+ "browserify-des": "^1.0.0",
+ "evp_bytestokey": "^1.0.0"
+ }
+ },
+ "node_modules/browserify-des": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz",
+ "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==",
+ "dependencies": {
+ "cipher-base": "^1.0.1",
+ "des.js": "^1.0.0",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "node_modules/browserify-rsa": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz",
+ "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==",
+ "dependencies": {
+ "bn.js": "^5.0.0",
+ "randombytes": "^2.0.1"
+ }
+ },
+ "node_modules/browserify-sign": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz",
+ "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==",
+ "dependencies": {
+ "bn.js": "^5.1.1",
+ "browserify-rsa": "^4.0.1",
+ "create-hash": "^1.2.0",
+ "create-hmac": "^1.1.7",
+ "elliptic": "^6.5.3",
+ "inherits": "^2.0.4",
+ "parse-asn1": "^5.1.5",
+ "readable-stream": "^3.6.0",
+ "safe-buffer": "^5.2.0"
+ }
+ },
+ "node_modules/browserify-sign/node_modules/readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/browserify-sign/node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/browserify-zlib": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz",
+ "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==",
+ "dependencies": {
+ "pako": "~1.0.5"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.16.5",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.5.tgz",
+ "integrity": "sha512-C2HAjrM1AI/djrpAUU/tr4pml1DqLIzJKSLDBXBrNErl9ZCCTXdhwxdJjYc16953+mBWf7Lw+uUJgpgb8cN71A==",
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001214",
+ "colorette": "^1.2.2",
+ "electron-to-chromium": "^1.3.719",
+ "escalade": "^3.1.1",
+ "node-releases": "^1.1.71"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ }
+ },
+ "node_modules/buffer": {
+ "version": "4.9.2",
+ "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz",
+ "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==",
+ "dependencies": {
+ "base64-js": "^1.0.2",
+ "ieee754": "^1.1.4",
+ "isarray": "^1.0.0"
+ }
+ },
+ "node_modules/buffer-from": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+ "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A=="
+ },
+ "node_modules/buffer-indexof": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-indexof/-/buffer-indexof-1.1.1.tgz",
+ "integrity": "sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g=="
+ },
+ "node_modules/buffer-json": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/buffer-json/-/buffer-json-2.0.0.tgz",
+ "integrity": "sha512-+jjPFVqyfF1esi9fvfUs3NqM0pH1ziZ36VP4hmA/y/Ssfo/5w5xHKfTw9BwQjoJ1w/oVtpLomqwUHKdefGyuHw=="
+ },
+ "node_modules/buffer-xor": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz",
+ "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk="
+ },
+ "node_modules/builtin-status-codes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz",
+ "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug="
+ },
+ "node_modules/bytes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/cac": {
+ "version": "6.7.3",
+ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.3.tgz",
+ "integrity": "sha512-ECVqVZh74qgSuZG9YOt2OJPI3wGcf+EwwuF/XIOYqZBD0KZYLtgPWqFPxmDPQ6joxI1nOlvVgRV6VT53Ooyocg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cacache": {
+ "version": "12.0.4",
+ "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz",
+ "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==",
+ "dependencies": {
+ "bluebird": "^3.5.5",
+ "chownr": "^1.1.1",
+ "figgy-pudding": "^3.5.1",
+ "glob": "^7.1.4",
+ "graceful-fs": "^4.1.15",
+ "infer-owner": "^1.0.3",
+ "lru-cache": "^5.1.1",
+ "mississippi": "^3.0.0",
+ "mkdirp": "^0.5.1",
+ "move-concurrently": "^1.0.1",
+ "promise-inflight": "^1.0.1",
+ "rimraf": "^2.6.3",
+ "ssri": "^6.0.1",
+ "unique-filename": "^1.1.1",
+ "y18n": "^4.0.0"
+ }
+ },
+ "node_modules/cacache/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/cache-base": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+ "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+ "dependencies": {
+ "collection-visit": "^1.0.0",
+ "component-emitter": "^1.2.1",
+ "get-value": "^2.0.6",
+ "has-value": "^1.0.0",
+ "isobject": "^3.0.1",
+ "set-value": "^2.0.0",
+ "to-object-path": "^0.3.0",
+ "union-value": "^1.0.0",
+ "unset-value": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/cache-loader": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/cache-loader/-/cache-loader-3.0.1.tgz",
+ "integrity": "sha512-HzJIvGiGqYsFUrMjAJNDbVZoG7qQA+vy9AIoKs7s9DscNfki0I589mf2w6/tW+kkFH3zyiknoWV5Jdynu6b/zw==",
+ "dependencies": {
+ "buffer-json": "^2.0.0",
+ "find-cache-dir": "^2.1.0",
+ "loader-utils": "^1.2.3",
+ "mkdirp": "^0.5.1",
+ "neo-async": "^2.6.1",
+ "schema-utils": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0"
+ }
+ },
+ "node_modules/cache-loader/node_modules/find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "dependencies": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cache-loader/node_modules/find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dependencies": {
+ "locate-path": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cache-loader/node_modules/locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dependencies": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cache-loader/node_modules/make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "dependencies": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cache-loader/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/cache-loader/node_modules/p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dependencies": {
+ "p-limit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cache-loader/node_modules/path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/cache-loader/node_modules/pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "dependencies": {
+ "find-up": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cache-loader/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/cache-loader/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/cacheable-request": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz",
+ "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==",
+ "dependencies": {
+ "clone-response": "^1.0.2",
+ "get-stream": "^5.1.0",
+ "http-cache-semantics": "^4.0.0",
+ "keyv": "^3.0.0",
+ "lowercase-keys": "^2.0.0",
+ "normalize-url": "^4.1.0",
+ "responselike": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cacheable-request/node_modules/get-stream": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
+ "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
+ "dependencies": {
+ "pump": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/cacheable-request/node_modules/lowercase-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz",
+ "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cacheable-request/node_modules/normalize-url": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz",
+ "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/call-bind": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
+ "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
+ "dependencies": {
+ "function-bind": "^1.1.1",
+ "get-intrinsic": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/call-me-maybe": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz",
+ "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms="
+ },
+ "node_modules/caller-callsite": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz",
+ "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=",
+ "dependencies": {
+ "callsites": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/caller-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz",
+ "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=",
+ "dependencies": {
+ "caller-callsite": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz",
+ "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/camel-case": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-3.0.0.tgz",
+ "integrity": "sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M=",
+ "dependencies": {
+ "no-case": "^2.2.0",
+ "upper-case": "^1.1.1"
+ }
+ },
+ "node_modules/camelcase": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz",
+ "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/caniuse-api": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz",
+ "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "caniuse-lite": "^1.0.0",
+ "lodash.memoize": "^4.1.2",
+ "lodash.uniq": "^4.5.0"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001219",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001219.tgz",
+ "integrity": "sha512-c0yixVG4v9KBc/tQ2rlbB3A/bgBFRvl8h8M4IeUbqCca4gsiCfvtaheUssbnux/Mb66Vjz7x8yYjDgYcNQOhyQ=="
+ },
+ "node_modules/caseless": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
+ "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw="
+ },
+ "node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/character-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz",
+ "integrity": "sha1-x84o821LzZdE5f/CxfzeHHMmH8A=",
+ "dependencies": {
+ "is-regex": "^1.0.3"
+ }
+ },
+ "node_modules/cheerio": {
+ "version": "1.0.0-rc.6",
+ "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.6.tgz",
+ "integrity": "sha512-hjx1XE1M/D5pAtMgvWwE21QClmAEeGHOIDfycgmndisdNgI6PE1cGRQkMGBcsbUbmEQyWu5PJLUcAOjtQS8DWw==",
+ "dependencies": {
+ "cheerio-select": "^1.3.0",
+ "dom-serializer": "^1.3.1",
+ "domhandler": "^4.1.0",
+ "htmlparser2": "^6.1.0",
+ "parse5": "^6.0.1",
+ "parse5-htmlparser2-tree-adapter": "^6.0.1"
+ },
+ "engines": {
+ "node": ">= 0.12"
+ }
+ },
+ "node_modules/cheerio-select": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.4.0.tgz",
+ "integrity": "sha512-sobR3Yqz27L553Qa7cK6rtJlMDbiKPdNywtR95Sj/YgfpLfy0u6CGJuaBKe5YE/vTc23SCRKxWSdlon/w6I/Ew==",
+ "dependencies": {
+ "css-select": "^4.1.2",
+ "css-what": "^5.0.0",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0",
+ "domutils": "^2.6.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/chokidar": {
+ "version": "2.1.8",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz",
+ "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==",
+ "deprecated": "Chokidar 2 will break on node v14+. Upgrade to chokidar 3 with 15x less dependencies.",
+ "dependencies": {
+ "anymatch": "^2.0.0",
+ "async-each": "^1.0.1",
+ "braces": "^2.3.2",
+ "glob-parent": "^3.1.0",
+ "inherits": "^2.0.3",
+ "is-binary-path": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "normalize-path": "^3.0.0",
+ "path-is-absolute": "^1.0.0",
+ "readdirp": "^2.2.1",
+ "upath": "^1.1.1"
+ },
+ "optionalDependencies": {
+ "fsevents": "^1.2.7"
+ }
+ },
+ "node_modules/chownr": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
+ "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
+ },
+ "node_modules/chrome-trace-event": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz",
+ "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==",
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ=="
+ },
+ "node_modules/cipher-base": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz",
+ "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==",
+ "dependencies": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "node_modules/class-utils": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+ "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+ "dependencies": {
+ "arr-union": "^3.1.0",
+ "define-property": "^0.2.5",
+ "isobject": "^3.0.0",
+ "static-extend": "^0.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dependencies": {
+ "is-descriptor": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/is-accessor-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/is-data-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dependencies": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/class-utils/node_modules/kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/clean-css": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz",
+ "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==",
+ "dependencies": {
+ "source-map": "~0.6.0"
+ },
+ "engines": {
+ "node": ">= 4.0"
+ }
+ },
+ "node_modules/clean-css/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/cli-boxes": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz",
+ "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==",
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/clipboard": {
+ "version": "2.0.8",
+ "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz",
+ "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==",
+ "optional": true,
+ "dependencies": {
+ "good-listener": "^1.2.2",
+ "select": "^1.1.2",
+ "tiny-emitter": "^2.0.0"
+ }
+ },
+ "node_modules/clipboard-copy": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz",
+ "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/cliui": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz",
+ "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==",
+ "dependencies": {
+ "string-width": "^3.1.0",
+ "strip-ansi": "^5.2.0",
+ "wrap-ansi": "^5.1.0"
+ }
+ },
+ "node_modules/cliui/node_modules/ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cliui/node_modules/emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "node_modules/cliui/node_modules/is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/cliui/node_modules/string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "dependencies": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cliui/node_modules/strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dependencies": {
+ "ansi-regex": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/clone-response": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz",
+ "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=",
+ "dependencies": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "node_modules/coa": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz",
+ "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==",
+ "dependencies": {
+ "@types/q": "^1.5.1",
+ "chalk": "^2.4.1",
+ "q": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 4.0"
+ }
+ },
+ "node_modules/collection-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+ "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+ "dependencies": {
+ "map-visit": "^1.0.0",
+ "object-visit": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz",
+ "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==",
+ "dependencies": {
+ "color-convert": "^1.9.1",
+ "color-string": "^1.5.4"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
+ },
+ "node_modules/color-string": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz",
+ "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==",
+ "dependencies": {
+ "color-name": "^1.0.0",
+ "simple-swizzle": "^0.2.2"
+ }
+ },
+ "node_modules/colorette": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz",
+ "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w=="
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
+ },
+ "node_modules/commondir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
+ "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs="
+ },
+ "node_modules/component-emitter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
+ "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg=="
+ },
+ "node_modules/compressible": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
+ "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
+ "dependencies": {
+ "mime-db": ">= 1.43.0 < 2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/compression": {
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz",
+ "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==",
+ "dependencies": {
+ "accepts": "~1.3.5",
+ "bytes": "3.0.0",
+ "compressible": "~2.0.16",
+ "debug": "2.6.9",
+ "on-headers": "~1.0.2",
+ "safe-buffer": "5.1.2",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
+ },
+ "node_modules/concat-stream": {
+ "version": "1.6.2",
+ "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz",
+ "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==",
+ "engines": [
+ "node >= 0.8"
+ ],
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.2.2",
+ "typedarray": "^0.0.6"
+ }
+ },
+ "node_modules/configstore": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz",
+ "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==",
+ "dependencies": {
+ "dot-prop": "^5.2.0",
+ "graceful-fs": "^4.1.2",
+ "make-dir": "^3.0.0",
+ "unique-string": "^2.0.0",
+ "write-file-atomic": "^3.0.0",
+ "xdg-basedir": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/connect-history-api-fallback": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz",
+ "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==",
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/consola": {
+ "version": "2.15.3",
+ "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz",
+ "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw=="
+ },
+ "node_modules/console-browserify": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz",
+ "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA=="
+ },
+ "node_modules/consolidate": {
+ "version": "0.15.1",
+ "resolved": "https://registry.npmjs.org/consolidate/-/consolidate-0.15.1.tgz",
+ "integrity": "sha512-DW46nrsMJgy9kqAbPt5rKaCr7uFtpo4mSUvLHIUbJEjm0vo+aY5QLwBUq3FK4tRnJr/X0Psc0C4jf/h+HtXSMw==",
+ "dependencies": {
+ "bluebird": "^3.1.1"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/constantinople": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-4.0.1.tgz",
+ "integrity": "sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==",
+ "dependencies": {
+ "@babel/parser": "^7.6.0",
+ "@babel/types": "^7.6.1"
+ }
+ },
+ "node_modules/constants-browserify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz",
+ "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U="
+ },
+ "node_modules/content-disposition": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
+ "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
+ "dependencies": {
+ "safe-buffer": "5.1.2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz",
+ "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==",
+ "dependencies": {
+ "safe-buffer": "~5.1.1"
+ }
+ },
+ "node_modules/cookie": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
+ "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
+ },
+ "node_modules/copy-concurrently": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz",
+ "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==",
+ "dependencies": {
+ "aproba": "^1.1.1",
+ "fs-write-stream-atomic": "^1.0.8",
+ "iferr": "^0.1.5",
+ "mkdirp": "^0.5.1",
+ "rimraf": "^2.5.4",
+ "run-queue": "^1.0.0"
+ }
+ },
+ "node_modules/copy-concurrently/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/copy-descriptor": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/copy-webpack-plugin": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.2.tgz",
+ "integrity": "sha512-Uh7crJAco3AjBvgAy9Z75CjK8IG+gxaErro71THQ+vv/bl4HaQcpkexAY8KVW/T6D2W2IRr+couF/knIRkZMIQ==",
+ "dependencies": {
+ "cacache": "^12.0.3",
+ "find-cache-dir": "^2.1.0",
+ "glob-parent": "^3.1.0",
+ "globby": "^7.1.1",
+ "is-glob": "^4.0.1",
+ "loader-utils": "^1.2.3",
+ "minimatch": "^3.0.4",
+ "normalize-path": "^3.0.0",
+ "p-limit": "^2.2.1",
+ "schema-utils": "^1.0.0",
+ "serialize-javascript": "^4.0.0",
+ "webpack-log": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0 || ^5.0.0"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "dependencies": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dependencies": {
+ "locate-path": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/globby": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-7.1.1.tgz",
+ "integrity": "sha1-+yzP+UAfhgCUXfral0QMypcrhoA=",
+ "dependencies": {
+ "array-union": "^1.0.1",
+ "dir-glob": "^2.0.0",
+ "glob": "^7.1.2",
+ "ignore": "^3.3.5",
+ "pify": "^3.0.0",
+ "slash": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/globby/node_modules/pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/ignore": {
+ "version": "3.3.10",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz",
+ "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug=="
+ },
+ "node_modules/copy-webpack-plugin/node_modules/locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dependencies": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "dependencies": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dependencies": {
+ "p-limit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "dependencies": {
+ "find-up": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/slash": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz",
+ "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/core-js": {
+ "version": "3.11.1",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.11.1.tgz",
+ "integrity": "sha512-k93Isqg7e4txZWMGNYwevZL9MiogLk8pd1PtwrmFmi8IBq4GXqUaVW/a33Llt6amSI36uSjd0GWwc9pTT9ALlQ==",
+ "hasInstallScript": true,
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/core-js-compat": {
+ "version": "3.11.1",
+ "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.11.1.tgz",
+ "integrity": "sha512-aZ0e4tmlG/aOBHj92/TuOuZwp6jFvn1WNabU5VOVixzhu5t5Ao+JZkQOPlgNXu6ynwLrwJxklT4Gw1G1VGEh+g==",
+ "dependencies": {
+ "browserslist": "^4.16.5",
+ "semver": "7.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/core-js-compat/node_modules/semver": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz",
+ "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/core-util-is": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+ "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
+ },
+ "node_modules/cosmiconfig": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz",
+ "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==",
+ "dependencies": {
+ "import-fresh": "^2.0.0",
+ "is-directory": "^0.3.1",
+ "js-yaml": "^3.13.1",
+ "parse-json": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/create-ecdh": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz",
+ "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==",
+ "dependencies": {
+ "bn.js": "^4.1.0",
+ "elliptic": "^6.5.3"
+ }
+ },
+ "node_modules/create-ecdh/node_modules/bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ },
+ "node_modules/create-hash": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz",
+ "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==",
+ "dependencies": {
+ "cipher-base": "^1.0.1",
+ "inherits": "^2.0.1",
+ "md5.js": "^1.3.4",
+ "ripemd160": "^2.0.1",
+ "sha.js": "^2.4.0"
+ }
+ },
+ "node_modules/create-hmac": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz",
+ "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==",
+ "dependencies": {
+ "cipher-base": "^1.0.3",
+ "create-hash": "^1.1.0",
+ "inherits": "^2.0.1",
+ "ripemd160": "^2.0.0",
+ "safe-buffer": "^5.0.1",
+ "sha.js": "^2.4.8"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "6.0.5",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
+ "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
+ "dependencies": {
+ "nice-try": "^1.0.4",
+ "path-key": "^2.0.1",
+ "semver": "^5.5.0",
+ "shebang-command": "^1.2.0",
+ "which": "^1.2.9"
+ },
+ "engines": {
+ "node": ">=4.8"
+ }
+ },
+ "node_modules/cross-spawn/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/crypto-browserify": {
+ "version": "3.12.0",
+ "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz",
+ "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==",
+ "dependencies": {
+ "browserify-cipher": "^1.0.0",
+ "browserify-sign": "^4.0.0",
+ "create-ecdh": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "create-hmac": "^1.1.0",
+ "diffie-hellman": "^5.0.0",
+ "inherits": "^2.0.1",
+ "pbkdf2": "^3.0.3",
+ "public-encrypt": "^4.0.0",
+ "randombytes": "^2.0.0",
+ "randomfill": "^1.0.3"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/crypto-random-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
+ "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/css": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz",
+ "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "source-map": "^0.6.1",
+ "source-map-resolve": "^0.5.2",
+ "urix": "^0.1.0"
+ }
+ },
+ "node_modules/css-color-names": {
+ "version": "0.0.4",
+ "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz",
+ "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA=",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/css-declaration-sorter": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz",
+ "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==",
+ "dependencies": {
+ "postcss": "^7.0.1",
+ "timsort": "^0.3.0"
+ },
+ "engines": {
+ "node": ">4"
+ }
+ },
+ "node_modules/css-loader": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-2.1.1.tgz",
+ "integrity": "sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w==",
+ "dependencies": {
+ "camelcase": "^5.2.0",
+ "icss-utils": "^4.1.0",
+ "loader-utils": "^1.2.3",
+ "normalize-path": "^3.0.0",
+ "postcss": "^7.0.14",
+ "postcss-modules-extract-imports": "^2.0.0",
+ "postcss-modules-local-by-default": "^2.0.6",
+ "postcss-modules-scope": "^2.1.0",
+ "postcss-modules-values": "^2.0.0",
+ "postcss-value-parser": "^3.3.0",
+ "schema-utils": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0"
+ }
+ },
+ "node_modules/css-loader/node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/css-loader/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/css-loader/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/css-parse": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/css-parse/-/css-parse-2.0.0.tgz",
+ "integrity": "sha1-pGjuZnwW2BzPBcWMONKpfHgNv9Q=",
+ "dependencies": {
+ "css": "^2.0.0"
+ }
+ },
+ "node_modules/css-select": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.1.2.tgz",
+ "integrity": "sha512-nu5ye2Hg/4ISq4XqdLY2bEatAcLIdt3OYGFc9Tm9n7VSlFBcfRv0gBNksHRgSdUDQGtN3XrZ94ztW+NfzkFSUw==",
+ "dependencies": {
+ "boolbase": "^1.0.0",
+ "css-what": "^5.0.0",
+ "domhandler": "^4.2.0",
+ "domutils": "^2.6.0",
+ "nth-check": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/css-select-base-adapter": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz",
+ "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w=="
+ },
+ "node_modules/css-tree": {
+ "version": "1.0.0-alpha.37",
+ "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz",
+ "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==",
+ "dependencies": {
+ "mdn-data": "2.0.4",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/css-tree/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/css-what": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.0.0.tgz",
+ "integrity": "sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA==",
+ "engines": {
+ "node": ">= 6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/css/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
+ "bin": {
+ "cssesc": "bin/cssesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/cssnano": {
+ "version": "4.1.11",
+ "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz",
+ "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==",
+ "dependencies": {
+ "cosmiconfig": "^5.0.0",
+ "cssnano-preset-default": "^4.0.8",
+ "is-resolvable": "^1.0.0",
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/cssnano-preset-default": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz",
+ "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==",
+ "dependencies": {
+ "css-declaration-sorter": "^4.0.1",
+ "cssnano-util-raw-cache": "^4.0.1",
+ "postcss": "^7.0.0",
+ "postcss-calc": "^7.0.1",
+ "postcss-colormin": "^4.0.3",
+ "postcss-convert-values": "^4.0.1",
+ "postcss-discard-comments": "^4.0.2",
+ "postcss-discard-duplicates": "^4.0.2",
+ "postcss-discard-empty": "^4.0.1",
+ "postcss-discard-overridden": "^4.0.1",
+ "postcss-merge-longhand": "^4.0.11",
+ "postcss-merge-rules": "^4.0.3",
+ "postcss-minify-font-values": "^4.0.2",
+ "postcss-minify-gradients": "^4.0.2",
+ "postcss-minify-params": "^4.0.2",
+ "postcss-minify-selectors": "^4.0.2",
+ "postcss-normalize-charset": "^4.0.1",
+ "postcss-normalize-display-values": "^4.0.2",
+ "postcss-normalize-positions": "^4.0.2",
+ "postcss-normalize-repeat-style": "^4.0.2",
+ "postcss-normalize-string": "^4.0.2",
+ "postcss-normalize-timing-functions": "^4.0.2",
+ "postcss-normalize-unicode": "^4.0.1",
+ "postcss-normalize-url": "^4.0.1",
+ "postcss-normalize-whitespace": "^4.0.2",
+ "postcss-ordered-values": "^4.1.2",
+ "postcss-reduce-initial": "^4.0.3",
+ "postcss-reduce-transforms": "^4.0.2",
+ "postcss-svgo": "^4.0.3",
+ "postcss-unique-selectors": "^4.0.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/cssnano-util-get-arguments": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz",
+ "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8=",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/cssnano-util-get-match": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz",
+ "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0=",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/cssnano-util-raw-cache": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz",
+ "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==",
+ "dependencies": {
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/cssnano-util-same-parent": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz",
+ "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/csso": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz",
+ "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==",
+ "dependencies": {
+ "css-tree": "^1.1.2"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/csso/node_modules/css-tree": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz",
+ "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==",
+ "dependencies": {
+ "mdn-data": "2.0.14",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/csso/node_modules/mdn-data": {
+ "version": "2.0.14",
+ "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz",
+ "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow=="
+ },
+ "node_modules/csso/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/cyclist": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz",
+ "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk="
+ },
+ "node_modules/dashdash": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
+ "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
+ "dependencies": {
+ "assert-plus": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/de-indent": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz",
+ "integrity": "sha1-sgOOhG3DO6pXlhKNCAS0VbjB4h0="
+ },
+ "node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/decamelize": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
+ "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/decode-uri-component": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+ "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=",
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/decompress-response": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz",
+ "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=",
+ "dependencies": {
+ "mimic-response": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/deep-equal": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz",
+ "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==",
+ "dependencies": {
+ "is-arguments": "^1.0.4",
+ "is-date-object": "^1.0.1",
+ "is-regex": "^1.0.4",
+ "object-is": "^1.0.1",
+ "object-keys": "^1.1.1",
+ "regexp.prototype.flags": "^1.2.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/deepmerge": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-1.5.2.tgz",
+ "integrity": "sha512-95k0GDqvBjZavkuvzx/YqVLv/6YYa17fz6ILMSf7neqQITCPbnfEnQvEgMPNjH4kgobe7+WIL0yJEHku+H3qtQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/default-gateway": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-4.2.0.tgz",
+ "integrity": "sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA==",
+ "dependencies": {
+ "execa": "^1.0.0",
+ "ip-regex": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/defer-to-connect": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz",
+ "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ=="
+ },
+ "node_modules/define-properties": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz",
+ "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==",
+ "dependencies": {
+ "object-keys": "^1.0.12"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dependencies": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/del": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/del/-/del-4.1.1.tgz",
+ "integrity": "sha512-QwGuEUouP2kVwQenAsOof5Fv8K9t3D8Ca8NxcXKrIpEHjTXK5J2nXLdP+ALI1cgv8wj7KuwBhTwBkOZSJKM5XQ==",
+ "dependencies": {
+ "@types/glob": "^7.1.1",
+ "globby": "^6.1.0",
+ "is-path-cwd": "^2.0.0",
+ "is-path-in-cwd": "^2.0.0",
+ "p-map": "^2.0.0",
+ "pify": "^4.0.1",
+ "rimraf": "^2.6.3"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/del/node_modules/globby": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-6.1.0.tgz",
+ "integrity": "sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=",
+ "dependencies": {
+ "array-union": "^1.0.1",
+ "glob": "^7.0.3",
+ "object-assign": "^4.0.1",
+ "pify": "^2.0.0",
+ "pinkie-promise": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/del/node_modules/globby/node_modules/pify": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
+ "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/delegate": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz",
+ "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==",
+ "optional": true
+ },
+ "node_modules/depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/des.js": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz",
+ "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==",
+ "dependencies": {
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+ "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
+ },
+ "node_modules/detect-node": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.5.tgz",
+ "integrity": "sha512-qi86tE6hRcFHy8jI1m2VG+LaPUR1LhqDa5G8tVjuUXmOrpuAgqsA1pN0+ldgr3aKUH+QLI9hCY/OcRYisERejw=="
+ },
+ "node_modules/diffie-hellman": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz",
+ "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==",
+ "dependencies": {
+ "bn.js": "^4.1.0",
+ "miller-rabin": "^4.0.0",
+ "randombytes": "^2.0.0"
+ }
+ },
+ "node_modules/diffie-hellman/node_modules/bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ },
+ "node_modules/dir-glob": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.2.2.tgz",
+ "integrity": "sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw==",
+ "dependencies": {
+ "path-type": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/dns-equal": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz",
+ "integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0="
+ },
+ "node_modules/dns-packet": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.1.tgz",
+ "integrity": "sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg==",
+ "dependencies": {
+ "ip": "^1.1.0",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "node_modules/dns-txt": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz",
+ "integrity": "sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=",
+ "dependencies": {
+ "buffer-indexof": "^1.0.0"
+ }
+ },
+ "node_modules/docsearch.js": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/docsearch.js/-/docsearch.js-2.6.3.tgz",
+ "integrity": "sha512-GN+MBozuyz664ycpZY0ecdQE0ND/LSgJKhTLA0/v3arIS3S1Rpf2OJz6A35ReMsm91V5apcmzr5/kM84cvUg+A==",
+ "dependencies": {
+ "algoliasearch": "^3.24.5",
+ "autocomplete.js": "0.36.0",
+ "hogan.js": "^3.0.2",
+ "request": "^2.87.0",
+ "stack-utils": "^1.0.1",
+ "to-factory": "^1.0.0",
+ "zepto": "^1.2.0"
+ }
+ },
+ "node_modules/docsearch.js/node_modules/algoliasearch": {
+ "version": "3.35.1",
+ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz",
+ "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==",
+ "dependencies": {
+ "agentkeepalive": "^2.2.0",
+ "debug": "^2.6.9",
+ "envify": "^4.0.0",
+ "es6-promise": "^4.1.0",
+ "events": "^1.1.0",
+ "foreach": "^2.0.5",
+ "global": "^4.3.2",
+ "inherits": "^2.0.1",
+ "isarray": "^2.0.1",
+ "load-script": "^1.0.0",
+ "object-keys": "^1.0.11",
+ "querystring-es3": "^0.2.1",
+ "reduce": "^1.0.1",
+ "semver": "^5.1.0",
+ "tunnel-agent": "^0.6.0"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/docsearch.js/node_modules/events": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz",
+ "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/docsearch.js/node_modules/isarray": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+ "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw=="
+ },
+ "node_modules/docsearch.js/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/doctypes": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz",
+ "integrity": "sha1-6oCxBqh1OHdOijpKWv4pPeSJ4Kk="
+ },
+ "node_modules/dom-converter": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz",
+ "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==",
+ "dependencies": {
+ "utila": "~0.4"
+ }
+ },
+ "node_modules/dom-serializer": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.1.tgz",
+ "integrity": "sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q==",
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "entities": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
+ }
+ },
+ "node_modules/dom-walk": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz",
+ "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w=="
+ },
+ "node_modules/domain-browser": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz",
+ "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA==",
+ "engines": {
+ "node": ">=0.4",
+ "npm": ">=1.2"
+ }
+ },
+ "node_modules/domelementtype": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz",
+ "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fb55"
+ }
+ ]
+ },
+ "node_modules/domhandler": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.2.0.tgz",
+ "integrity": "sha512-zk7sgt970kzPks2Bf+dwT/PLzghLnsivb9CcxkvR8Mzr66Olr0Ofd8neSbglHJHaHa2MadfoSdNlKYAaafmWfA==",
+ "dependencies": {
+ "domelementtype": "^2.2.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domhandler?sponsor=1"
+ }
+ },
+ "node_modules/domutils": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.6.0.tgz",
+ "integrity": "sha512-y0BezHuy4MDYxh6OvolXYsH+1EMGmFbwv5FKW7ovwMG6zTPWqNPq3WF9ayZssFq+UlKdffGLbOEaghNdaOm1WA==",
+ "dependencies": {
+ "dom-serializer": "^1.0.1",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domutils?sponsor=1"
+ }
+ },
+ "node_modules/dot-prop": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
+ "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
+ "dependencies": {
+ "is-obj": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/duplexer3": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz",
+ "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI="
+ },
+ "node_modules/duplexify": {
+ "version": "3.7.1",
+ "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz",
+ "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==",
+ "dependencies": {
+ "end-of-stream": "^1.0.0",
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.0.0",
+ "stream-shift": "^1.0.0"
+ }
+ },
+ "node_modules/ecc-jsbn": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
+ "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
+ "dependencies": {
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.1.0"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.3.723",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.723.tgz",
+ "integrity": "sha512-L+WXyXI7c7+G1V8ANzRsPI5giiimLAUDC6Zs1ojHHPhYXb3k/iTABFmWjivEtsWrRQymjnO66/rO2ZTABGdmWg=="
+ },
+ "node_modules/elliptic": {
+ "version": "6.5.4",
+ "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz",
+ "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==",
+ "dependencies": {
+ "bn.js": "^4.11.9",
+ "brorand": "^1.1.0",
+ "hash.js": "^1.0.0",
+ "hmac-drbg": "^1.0.1",
+ "inherits": "^2.0.4",
+ "minimalistic-assert": "^1.0.1",
+ "minimalistic-crypto-utils": "^1.0.1"
+ }
+ },
+ "node_modules/elliptic/node_modules/bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/emojis-list": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz",
+ "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "dependencies": {
+ "once": "^1.4.0"
+ }
+ },
+ "node_modules/enhanced-resolve": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz",
+ "integrity": "sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==",
+ "dependencies": {
+ "graceful-fs": "^4.1.2",
+ "memory-fs": "^0.5.0",
+ "tapable": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/enhanced-resolve/node_modules/memory-fs": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz",
+ "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==",
+ "dependencies": {
+ "errno": "^0.1.3",
+ "readable-stream": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=4.3.0 <5.0.0 || >=5.10"
+ }
+ },
+ "node_modules/entities": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz",
+ "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==",
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/envify": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz",
+ "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==",
+ "dependencies": {
+ "esprima": "^4.0.0",
+ "through": "~2.3.4"
+ },
+ "bin": {
+ "envify": "bin/envify"
+ }
+ },
+ "node_modules/envinfo": {
+ "version": "7.8.1",
+ "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz",
+ "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw==",
+ "bin": {
+ "envinfo": "dist/cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/errno": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz",
+ "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==",
+ "dependencies": {
+ "prr": "~1.0.1"
+ },
+ "bin": {
+ "errno": "cli.js"
+ }
+ },
+ "node_modules/error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dependencies": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "node_modules/es-abstract": {
+ "version": "1.18.0",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz",
+ "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "es-to-primitive": "^1.2.1",
+ "function-bind": "^1.1.1",
+ "get-intrinsic": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.2",
+ "is-callable": "^1.2.3",
+ "is-negative-zero": "^2.0.1",
+ "is-regex": "^1.1.2",
+ "is-string": "^1.0.5",
+ "object-inspect": "^1.9.0",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.2",
+ "string.prototype.trimend": "^1.0.4",
+ "string.prototype.trimstart": "^1.0.4",
+ "unbox-primitive": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "dependencies": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w=="
+ },
+ "node_modules/escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-goat": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz",
+ "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/eslint-scope": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz",
+ "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==",
+ "dependencies": {
+ "esrecurse": "^4.1.0",
+ "estraverse": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/esm": {
+ "version": "3.2.25",
+ "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz",
+ "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dependencies": {
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esrecurse/node_modules/estraverse": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
+ "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ },
+ "node_modules/events": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz",
+ "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==",
+ "engines": {
+ "node": ">=0.8.x"
+ }
+ },
+ "node_modules/eventsource": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz",
+ "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==",
+ "dependencies": {
+ "original": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/evp_bytestokey": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz",
+ "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==",
+ "dependencies": {
+ "md5.js": "^1.3.4",
+ "safe-buffer": "^5.1.1"
+ }
+ },
+ "node_modules/execa": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz",
+ "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==",
+ "dependencies": {
+ "cross-spawn": "^6.0.0",
+ "get-stream": "^4.0.0",
+ "is-stream": "^1.1.0",
+ "npm-run-path": "^2.0.0",
+ "p-finally": "^1.0.0",
+ "signal-exit": "^3.0.0",
+ "strip-eof": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/expand-brackets": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+ "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+ "dependencies": {
+ "debug": "^2.3.3",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "posix-character-classes": "^0.1.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dependencies": {
+ "is-descriptor": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/is-accessor-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/is-data-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dependencies": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/express": {
+ "version": "4.17.1",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
+ "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
+ "dependencies": {
+ "accepts": "~1.3.7",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.19.0",
+ "content-disposition": "0.5.3",
+ "content-type": "~1.0.4",
+ "cookie": "0.4.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "~1.1.2",
+ "fresh": "0.5.2",
+ "merge-descriptors": "1.0.1",
+ "methods": "~1.1.2",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.7",
+ "proxy-addr": "~2.0.5",
+ "qs": "6.7.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.1.2",
+ "send": "0.17.1",
+ "serve-static": "1.14.1",
+ "setprototypeof": "1.1.1",
+ "statuses": "~1.5.0",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/express/node_modules/array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
+ },
+ "node_modules/express/node_modules/qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "node_modules/extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dependencies": {
+ "is-extendable": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extglob": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+ "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+ "dependencies": {
+ "array-unique": "^0.3.2",
+ "define-property": "^1.0.0",
+ "expand-brackets": "^2.1.4",
+ "extend-shallow": "^2.0.1",
+ "fragment-cache": "^0.2.1",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extglob/node_modules/define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dependencies": {
+ "is-descriptor": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extsprintf": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
+ "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=",
+ "engines": [
+ "node >=0.6.0"
+ ]
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "node_modules/fast-glob": {
+ "version": "2.2.7",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz",
+ "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==",
+ "dependencies": {
+ "@mrmlnc/readdir-enhanced": "^2.2.1",
+ "@nodelib/fs.stat": "^1.1.2",
+ "glob-parent": "^3.1.0",
+ "is-glob": "^4.0.0",
+ "merge2": "^1.2.3",
+ "micromatch": "^3.1.10"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "node_modules/faye-websocket": {
+ "version": "0.11.3",
+ "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.3.tgz",
+ "integrity": "sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==",
+ "dependencies": {
+ "websocket-driver": ">=0.5.1"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/figgy-pudding": {
+ "version": "3.5.2",
+ "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz",
+ "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw=="
+ },
+ "node_modules/figures": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
+ "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
+ "dependencies": {
+ "escape-string-regexp": "^1.0.5"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/file-loader": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-3.0.1.tgz",
+ "integrity": "sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw==",
+ "dependencies": {
+ "loader-utils": "^1.0.2",
+ "schema-utils": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0"
+ }
+ },
+ "node_modules/file-loader/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/file-uri-to-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
+ "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
+ "optional": true
+ },
+ "node_modules/fill-range": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+ "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1",
+ "to-regex-range": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/finalhandler": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
+ "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "statuses": "~1.5.0",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/find-cache-dir": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz",
+ "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==",
+ "dependencies": {
+ "commondir": "^1.0.1",
+ "make-dir": "^3.0.2",
+ "pkg-dir": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/avajs/find-cache-dir?sponsor=1"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dependencies": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/flush-write-stream": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz",
+ "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.3.6"
+ }
+ },
+ "node_modules/follow-redirects": {
+ "version": "1.14.0",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.0.tgz",
+ "integrity": "sha512-0vRwd7RKQBTt+mgu87mtYeofLFZpTas2S9zY+jIeuLJMNvudIgF52nr19q40HOwH5RrhWIPuj9puybzSJiRrVg==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/for-in": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+ "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/foreach": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz",
+ "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k="
+ },
+ "node_modules/forever-agent": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
+ "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
+ "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.6",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 0.12"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
+ "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fragment-cache": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+ "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+ "dependencies": {
+ "map-cache": "^0.2.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/from2": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
+ "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
+ "dependencies": {
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.0.0"
+ }
+ },
+ "node_modules/fs-extra": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+ "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
+ "dependencies": {
+ "graceful-fs": "^4.1.2",
+ "jsonfile": "^4.0.0",
+ "universalify": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=6 <7 || >=8"
+ }
+ },
+ "node_modules/fs-write-stream-atomic": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz",
+ "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=",
+ "dependencies": {
+ "graceful-fs": "^4.1.2",
+ "iferr": "^0.1.5",
+ "imurmurhash": "^0.1.4",
+ "readable-stream": "1 || 2"
+ }
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
+ },
+ "node_modules/fsevents": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz",
+ "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==",
+ "deprecated": "fsevents 1 will break on node v14+ and could be using insecure binaries. Upgrade to fsevents 2.",
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "dependencies": {
+ "bindings": "^1.5.0",
+ "nan": "^2.12.1"
+ },
+ "engines": {
+ "node": ">= 4.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
+ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
+ },
+ "node_modules/fuse.js": {
+ "version": "3.6.1",
+ "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz",
+ "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz",
+ "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==",
+ "dependencies": {
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
+ "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
+ "dependencies": {
+ "pump": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/get-value": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+ "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/getpass": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
+ "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
+ "dependencies": {
+ "assert-plus": "^1.0.0"
+ }
+ },
+ "node_modules/glob": {
+ "version": "7.1.6",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+ "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+ "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+ "dependencies": {
+ "is-glob": "^3.1.0",
+ "path-dirname": "^1.0.0"
+ }
+ },
+ "node_modules/glob-parent/node_modules/is-glob": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+ "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+ "dependencies": {
+ "is-extglob": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/glob-to-regexp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz",
+ "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs="
+ },
+ "node_modules/global": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz",
+ "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==",
+ "dependencies": {
+ "min-document": "^2.19.0",
+ "process": "^0.11.10"
+ }
+ },
+ "node_modules/global-dirs": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz",
+ "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==",
+ "dependencies": {
+ "ini": "1.3.7"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/globby": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-9.2.0.tgz",
+ "integrity": "sha512-ollPHROa5mcxDEkwg6bPt3QbEf4pDQSNtd6JPL1YvOvAo/7/0VAm9TccUeoTmarjPw4pfUthSCqcyfNB1I3ZSg==",
+ "dependencies": {
+ "@types/glob": "^7.1.1",
+ "array-union": "^1.0.2",
+ "dir-glob": "^2.2.2",
+ "fast-glob": "^2.2.6",
+ "glob": "^7.1.3",
+ "ignore": "^4.0.3",
+ "pify": "^4.0.1",
+ "slash": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/good-listener": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz",
+ "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=",
+ "optional": true,
+ "dependencies": {
+ "delegate": "^3.1.2"
+ }
+ },
+ "node_modules/got": {
+ "version": "9.6.0",
+ "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz",
+ "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==",
+ "dependencies": {
+ "@sindresorhus/is": "^0.14.0",
+ "@szmarczak/http-timer": "^1.1.2",
+ "cacheable-request": "^6.0.0",
+ "decompress-response": "^3.3.0",
+ "duplexer3": "^0.1.4",
+ "get-stream": "^4.1.0",
+ "lowercase-keys": "^1.0.1",
+ "mimic-response": "^1.0.1",
+ "p-cancelable": "^1.0.0",
+ "to-readable-stream": "^1.0.0",
+ "url-parse-lax": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.6",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz",
+ "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ=="
+ },
+ "node_modules/gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "dependencies": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/handle-thing": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
+ "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg=="
+ },
+ "node_modules/har-schema": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
+ "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/har-validator": {
+ "version": "5.1.5",
+ "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz",
+ "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==",
+ "deprecated": "this library is no longer supported",
+ "dependencies": {
+ "ajv": "^6.12.3",
+ "har-schema": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "dependencies": {
+ "function-bind": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/has-ansi": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+ "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-ansi/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-bigints": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz",
+ "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz",
+ "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+ "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+ "dependencies": {
+ "get-value": "^2.0.6",
+ "has-values": "^1.0.0",
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-values": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+ "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+ "dependencies": {
+ "is-number": "^3.0.0",
+ "kind-of": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-values/node_modules/kind-of": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+ "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-yarn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz",
+ "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/hash-base": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz",
+ "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==",
+ "dependencies": {
+ "inherits": "^2.0.4",
+ "readable-stream": "^3.6.0",
+ "safe-buffer": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/hash-base/node_modules/readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/hash-base/node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/hash-sum": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/hash-sum/-/hash-sum-1.0.2.tgz",
+ "integrity": "sha1-M7QHd3VMZDJXPBIMw4CLvRDUfwQ="
+ },
+ "node_modules/hash.js": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz",
+ "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "minimalistic-assert": "^1.0.1"
+ }
+ },
+ "node_modules/he": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
+ "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
+ "bin": {
+ "he": "bin/he"
+ }
+ },
+ "node_modules/hex-color-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz",
+ "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ=="
+ },
+ "node_modules/hmac-drbg": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz",
+ "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=",
+ "dependencies": {
+ "hash.js": "^1.0.3",
+ "minimalistic-assert": "^1.0.0",
+ "minimalistic-crypto-utils": "^1.0.1"
+ }
+ },
+ "node_modules/hogan.js": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz",
+ "integrity": "sha1-TNnhq9QpQUbnZ55B14mHMrAse/0=",
+ "dependencies": {
+ "mkdirp": "0.3.0",
+ "nopt": "1.0.10"
+ },
+ "bin": {
+ "hulk": "bin/hulk"
+ }
+ },
+ "node_modules/hogan.js/node_modules/mkdirp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz",
+ "integrity": "sha1-G79asbqCevI1dRQ0kEJkVfSB/h4=",
+ "deprecated": "Legacy versions of mkdirp are no longer supported. Please update to mkdirp 1.x. (Note that the API surface has changed to use Promises in 1.x.)",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/hotkeys-js": {
+ "version": "3.8.1",
+ "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz",
+ "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw=="
+ },
+ "node_modules/hpack.js": {
+ "version": "2.1.6",
+ "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz",
+ "integrity": "sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=",
+ "dependencies": {
+ "inherits": "^2.0.1",
+ "obuf": "^1.0.0",
+ "readable-stream": "^2.0.1",
+ "wbuf": "^1.1.0"
+ }
+ },
+ "node_modules/hsl-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz",
+ "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4="
+ },
+ "node_modules/hsla-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz",
+ "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg="
+ },
+ "node_modules/html-entities": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.4.0.tgz",
+ "integrity": "sha512-8nxjcBcd8wovbeKx7h3wTji4e6+rhaVuPNpMqwWgnHh+N9ToqsCs6XztWRBPQ+UtzsoMAdKZtUENoVzU/EMtZA=="
+ },
+ "node_modules/html-minifier": {
+ "version": "3.5.21",
+ "resolved": "https://registry.npmjs.org/html-minifier/-/html-minifier-3.5.21.tgz",
+ "integrity": "sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA==",
+ "dependencies": {
+ "camel-case": "3.0.x",
+ "clean-css": "4.2.x",
+ "commander": "2.17.x",
+ "he": "1.2.x",
+ "param-case": "2.1.x",
+ "relateurl": "0.2.x",
+ "uglify-js": "3.4.x"
+ },
+ "bin": {
+ "html-minifier": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/html-minifier/node_modules/commander": {
+ "version": "2.17.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz",
+ "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg=="
+ },
+ "node_modules/html-tags": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz",
+ "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/htmlparser2": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz",
+ "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==",
+ "funding": [
+ "https://github.com/fb55/htmlparser2?sponsor=1",
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fb55"
+ }
+ ],
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "domutils": "^2.5.2",
+ "entities": "^2.0.0"
+ }
+ },
+ "node_modules/http-cache-semantics": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz",
+ "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ=="
+ },
+ "node_modules/http-deceiver": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz",
+ "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc="
+ },
+ "node_modules/http-errors": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
+ "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
+ "dependencies": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.1",
+ "statuses": ">= 1.5.0 < 2",
+ "toidentifier": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/http-errors/node_modules/inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ },
+ "node_modules/http-parser-js": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.3.tgz",
+ "integrity": "sha512-t7hjvef/5HEK7RWTdUzVUhl8zkEu+LlaE0IYzdMuvbSDipxBRpOn4Uhw8ZyECEa808iVT8XCjzo6xmYt4CiLZg=="
+ },
+ "node_modules/http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "dependencies": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/http-proxy-middleware": {
+ "version": "0.19.1",
+ "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz",
+ "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==",
+ "dependencies": {
+ "http-proxy": "^1.17.0",
+ "is-glob": "^4.0.0",
+ "lodash": "^4.17.11",
+ "micromatch": "^3.1.10"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/http-signature": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
+ "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
+ "dependencies": {
+ "assert-plus": "^1.0.0",
+ "jsprim": "^1.2.2",
+ "sshpk": "^1.7.0"
+ },
+ "engines": {
+ "node": ">=0.8",
+ "npm": ">=1.3.7"
+ }
+ },
+ "node_modules/https-browserify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz",
+ "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM="
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/icss-replace-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz",
+ "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0="
+ },
+ "node_modules/icss-utils": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz",
+ "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==",
+ "dependencies": {
+ "postcss": "^7.0.14"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/ieee754": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
+ "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/iferr": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz",
+ "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE="
+ },
+ "node_modules/ignore": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
+ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/immediate": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz",
+ "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q=="
+ },
+ "node_modules/import-cwd": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-2.1.0.tgz",
+ "integrity": "sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=",
+ "dependencies": {
+ "import-from": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/import-fresh": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz",
+ "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=",
+ "dependencies": {
+ "caller-path": "^2.0.0",
+ "resolve-from": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/import-from": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-from/-/import-from-2.1.0.tgz",
+ "integrity": "sha1-M1238qev/VOqpHHUuAId7ja387E=",
+ "dependencies": {
+ "resolve-from": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/import-lazy": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz",
+ "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/import-local": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz",
+ "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==",
+ "dependencies": {
+ "pkg-dir": "^3.0.0",
+ "resolve-cwd": "^2.0.0"
+ },
+ "bin": {
+ "import-local-fixture": "fixtures/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/import-local/node_modules/find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dependencies": {
+ "locate-path": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/import-local/node_modules/locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dependencies": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/import-local/node_modules/p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dependencies": {
+ "p-limit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/import-local/node_modules/path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/import-local/node_modules/pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "dependencies": {
+ "find-up": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/indexes-of": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz",
+ "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc="
+ },
+ "node_modules/infer-owner": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz",
+ "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A=="
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "node_modules/ini": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz",
+ "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ=="
+ },
+ "node_modules/internal-ip": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-4.3.0.tgz",
+ "integrity": "sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg==",
+ "dependencies": {
+ "default-gateway": "^4.2.0",
+ "ipaddr.js": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ip": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz",
+ "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo="
+ },
+ "node_modules/ip-regex": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-2.1.0.tgz",
+ "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-absolute-url": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz",
+ "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-arguments": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz",
+ "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==",
+ "dependencies": {
+ "call-bind": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0="
+ },
+ "node_modules/is-bigint": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz",
+ "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-binary-path": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz",
+ "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=",
+ "dependencies": {
+ "binary-extensions": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-boolean-object": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz",
+ "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==",
+ "dependencies": {
+ "call-bind": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
+ },
+ "node_modules/is-callable": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz",
+ "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "dependencies": {
+ "ci-info": "^2.0.0"
+ },
+ "bin": {
+ "is-ci": "bin.js"
+ }
+ },
+ "node_modules/is-color-stop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz",
+ "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=",
+ "dependencies": {
+ "css-color-names": "^0.0.4",
+ "hex-color-regex": "^1.1.0",
+ "hsl-regex": "^1.0.0",
+ "hsla-regex": "^1.0.0",
+ "rgb-regex": "^1.0.1",
+ "rgba-regex": "^1.0.0"
+ }
+ },
+ "node_modules/is-core-module": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.3.0.tgz",
+ "integrity": "sha512-xSphU2KG9867tsYdLD4RWQ1VqdFl4HTO9Thf3I/3dLEfr0dbPTWKsuCKrgqMljg4nPE+Gq0VCnzT3gr0CyBmsw==",
+ "dependencies": {
+ "has": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-date-object": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz",
+ "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-directory": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz",
+ "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-expression": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-4.0.0.tgz",
+ "integrity": "sha512-zMIXX63sxzG3XrkHkrAPvm/OVZVSCPNkwMHU8oTX7/U3AL78I0QXCEICXUM13BIa8TYGZ68PiTKfQz3yaTNr4A==",
+ "dependencies": {
+ "acorn": "^7.1.1",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "node_modules/is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-installed-globally": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz",
+ "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==",
+ "dependencies": {
+ "global-dirs": "^2.0.1",
+ "is-path-inside": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-negative-zero": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz",
+ "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-npm": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz",
+ "integrity": "sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-number-object": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz",
+ "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-number/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-obj": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
+ "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-path-cwd": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
+ "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-path-in-cwd": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-2.1.0.tgz",
+ "integrity": "sha512-rNocXHgipO+rvnP6dk3zI20RpOtrAM/kzbB258Uw5BWr3TpXi861yzjo16Dn4hUox07iw5AyeMLHWsujkjzvRQ==",
+ "dependencies": {
+ "is-path-inside": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-path-in-cwd/node_modules/is-path-inside": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-2.1.0.tgz",
+ "integrity": "sha512-wiyhTzfDWsvwAW53OBWF5zuvaOGlZ6PwYxAbPVDhpm+gM09xKQGjBq/8uYN12aDvMxnAnq3dxTyoSoRNmg5YFg==",
+ "dependencies": {
+ "path-is-inside": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-path-inside": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
+ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
+ "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "dependencies": {
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-promise": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz",
+ "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ=="
+ },
+ "node_modules/is-regex": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz",
+ "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-symbols": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-resolvable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz",
+ "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg=="
+ },
+ "node_modules/is-stream": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
+ "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-string": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz",
+ "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-symbol": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz",
+ "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==",
+ "dependencies": {
+ "has-symbols": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-typedarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo="
+ },
+ "node_modules/is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-wsl": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz",
+ "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/is-yarn-global": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz",
+ "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw=="
+ },
+ "node_modules/isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA="
+ },
+ "node_modules/isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/isstream": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
+ "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo="
+ },
+ "node_modules/javascript-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.1.0.tgz",
+ "integrity": "sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg=="
+ },
+ "node_modules/js-base64": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz",
+ "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ=="
+ },
+ "node_modules/js-stringify": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz",
+ "integrity": "sha1-Fzb939lyTyijaCrcYjCufk6Weds="
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsbn": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
+ "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM="
+ },
+ "node_modules/jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/json-buffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz",
+ "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg="
+ },
+ "node_modules/json-parse-better-errors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
+ "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw=="
+ },
+ "node_modules/json-schema": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
+ "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM="
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "node_modules/json-stringify-safe": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+ "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus="
+ },
+ "node_modules/json3": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.3.tgz",
+ "integrity": "sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA=="
+ },
+ "node_modules/json5": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+ "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+ "dependencies": {
+ "minimist": "^1.2.0"
+ },
+ "bin": {
+ "json5": "lib/cli.js"
+ }
+ },
+ "node_modules/jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/jsonp": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/jsonp/-/jsonp-0.2.1.tgz",
+ "integrity": "sha1-pltPoPEL2nGaBUQep7lMVfPhW64=",
+ "dependencies": {
+ "debug": "^2.1.3"
+ }
+ },
+ "node_modules/jsprim": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
+ "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
+ "engines": [
+ "node >=0.6.0"
+ ],
+ "dependencies": {
+ "assert-plus": "1.0.0",
+ "extsprintf": "1.3.0",
+ "json-schema": "0.2.3",
+ "verror": "1.10.0"
+ }
+ },
+ "node_modules/jstransformer": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz",
+ "integrity": "sha1-7Yvwkh4vPx7U1cGkT2hwntJHIsM=",
+ "dependencies": {
+ "is-promise": "^2.0.0",
+ "promise": "^7.0.1"
+ }
+ },
+ "node_modules/keyv": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz",
+ "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==",
+ "dependencies": {
+ "json-buffer": "3.0.0"
+ }
+ },
+ "node_modules/killable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/killable/-/killable-1.0.1.tgz",
+ "integrity": "sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg=="
+ },
+ "node_modules/kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/last-call-webpack-plugin": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz",
+ "integrity": "sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w==",
+ "dependencies": {
+ "lodash": "^4.17.5",
+ "webpack-sources": "^1.1.0"
+ }
+ },
+ "node_modules/latest-version": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz",
+ "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==",
+ "dependencies": {
+ "package-json": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/linkify-it": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz",
+ "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==",
+ "dependencies": {
+ "uc.micro": "^1.0.1"
+ }
+ },
+ "node_modules/load-script": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz",
+ "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ="
+ },
+ "node_modules/loader-runner": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz",
+ "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==",
+ "engines": {
+ "node": ">=4.3.0 <5.0.0 || >=5.10"
+ }
+ },
+ "node_modules/loader-utils": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz",
+ "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==",
+ "dependencies": {
+ "big.js": "^5.2.2",
+ "emojis-list": "^3.0.0",
+ "json5": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dependencies": {
+ "p-locate": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "node_modules/lodash._reinterpolate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz",
+ "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0="
+ },
+ "node_modules/lodash.chunk": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz",
+ "integrity": "sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw="
+ },
+ "node_modules/lodash.clonedeep": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz",
+ "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8="
+ },
+ "node_modules/lodash.debounce": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
+ "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168="
+ },
+ "node_modules/lodash.kebabcase": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz",
+ "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY="
+ },
+ "node_modules/lodash.memoize": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
+ "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4="
+ },
+ "node_modules/lodash.padstart": {
+ "version": "4.6.1",
+ "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz",
+ "integrity": "sha1-0uPuv/DZ05rVD1y9G1KnvOa7YRs="
+ },
+ "node_modules/lodash.sortby": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz",
+ "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg="
+ },
+ "node_modules/lodash.template": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz",
+ "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==",
+ "dependencies": {
+ "lodash._reinterpolate": "^3.0.0",
+ "lodash.templatesettings": "^4.0.0"
+ }
+ },
+ "node_modules/lodash.templatesettings": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz",
+ "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==",
+ "dependencies": {
+ "lodash._reinterpolate": "^3.0.0"
+ }
+ },
+ "node_modules/lodash.uniq": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
+ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M="
+ },
+ "node_modules/loglevel": {
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz",
+ "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw==",
+ "engines": {
+ "node": ">= 0.6.0"
+ },
+ "funding": {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/loglevel"
+ }
+ },
+ "node_modules/lower-case": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz",
+ "integrity": "sha1-miyr0bno4K6ZOkv31YdcOcQujqw="
+ },
+ "node_modules/lowercase-keys": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+ "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/make-dir": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
+ "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
+ "dependencies": {
+ "semver": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/map-cache": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+ "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/map-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+ "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+ "dependencies": {
+ "object-visit": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/markdown-it": {
+ "version": "12.0.6",
+ "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.6.tgz",
+ "integrity": "sha512-qv3sVLl4lMT96LLtR7xeRJX11OUFjsaD5oVat2/SNBIb21bJXwal2+SklcRbTwGwqWpWH/HRtYavOoJE+seL8w==",
+ "dependencies": {
+ "argparse": "^2.0.1",
+ "entities": "~2.1.0",
+ "linkify-it": "^3.0.1",
+ "mdurl": "^1.0.1",
+ "uc.micro": "^1.0.5"
+ },
+ "bin": {
+ "markdown-it": "bin/markdown-it.js"
+ }
+ },
+ "node_modules/markdown-it-anchor": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz",
+ "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==",
+ "peerDependencies": {
+ "markdown-it": "*"
+ }
+ },
+ "node_modules/markdown-it-attrs": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz",
+ "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==",
+ "engines": {
+ "node": ">=6"
+ },
+ "peerDependencies": {
+ "markdown-it": ">= 9.0.0 < 12.0.0"
+ }
+ },
+ "node_modules/markdown-it-chain": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-chain/-/markdown-it-chain-1.3.0.tgz",
+ "integrity": "sha512-XClV8I1TKy8L2qsT9iX3qiV+50ZtcInGXI80CA+DP62sMs7hXlyV/RM3hfwy5O3Ad0sJm9xIwQELgANfESo8mQ==",
+ "dependencies": {
+ "webpack-chain": "^4.9.0"
+ },
+ "engines": {
+ "node": ">=6.9"
+ },
+ "peerDependencies": {
+ "markdown-it": ">=5.0.0"
+ }
+ },
+ "node_modules/markdown-it-chain/node_modules/javascript-stringify": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz",
+ "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM="
+ },
+ "node_modules/markdown-it-chain/node_modules/webpack-chain": {
+ "version": "4.12.1",
+ "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-4.12.1.tgz",
+ "integrity": "sha512-BCfKo2YkDe2ByqkEWe1Rw+zko4LsyS75LVr29C6xIrxAg9JHJ4pl8kaIZ396SUSNp6b4815dRZPSTAS8LlURRQ==",
+ "dependencies": {
+ "deepmerge": "^1.5.2",
+ "javascript-stringify": "^1.6.0"
+ }
+ },
+ "node_modules/markdown-it-container": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-container/-/markdown-it-container-2.0.0.tgz",
+ "integrity": "sha1-ABm0P9Au7+zi8ZYKKJX7qBpARpU="
+ },
+ "node_modules/markdown-it-emoji": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-emoji/-/markdown-it-emoji-1.4.0.tgz",
+ "integrity": "sha1-m+4OmpkKljupbfaYDE/dsF37Tcw="
+ },
+ "node_modules/markdown-it-table-of-contents": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/markdown-it-table-of-contents/-/markdown-it-table-of-contents-0.4.4.tgz",
+ "integrity": "sha512-TAIHTHPwa9+ltKvKPWulm/beozQU41Ab+FIefRaQV1NRnpzwcV9QOe6wXQS5WLivm5Q/nlo0rl6laGkMDZE7Gw==",
+ "engines": {
+ "node": ">6.4.0"
+ }
+ },
+ "node_modules/markdown-it/node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
+ },
+ "node_modules/md5.js": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz",
+ "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==",
+ "dependencies": {
+ "hash-base": "^3.0.0",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "node_modules/mdn-data": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz",
+ "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA=="
+ },
+ "node_modules/mdurl": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz",
+ "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4="
+ },
+ "node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/memory-fs": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz",
+ "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=",
+ "dependencies": {
+ "errno": "^0.1.3",
+ "readable-stream": "^2.0.1"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
+ "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
+ },
+ "node_modules/merge-source-map": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz",
+ "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==",
+ "dependencies": {
+ "source-map": "^0.6.1"
+ }
+ },
+ "node_modules/merge-source-map/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "3.1.10",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+ "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+ "dependencies": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "braces": "^2.3.1",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "extglob": "^2.0.4",
+ "fragment-cache": "^0.2.1",
+ "kind-of": "^6.0.2",
+ "nanomatch": "^1.2.9",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/micromatch/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/micromatch/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/miller-rabin": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz",
+ "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==",
+ "dependencies": {
+ "bn.js": "^4.0.0",
+ "brorand": "^1.0.1"
+ },
+ "bin": {
+ "miller-rabin": "bin/miller-rabin"
+ }
+ },
+ "node_modules/miller-rabin/node_modules/bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ },
+ "node_modules/mime": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz",
+ "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg==",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.47.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz",
+ "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.30",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz",
+ "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==",
+ "dependencies": {
+ "mime-db": "1.47.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-response": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
+ "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/min-document": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz",
+ "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=",
+ "dependencies": {
+ "dom-walk": "^0.1.0"
+ }
+ },
+ "node_modules/mini-css-extract-plugin": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-0.6.0.tgz",
+ "integrity": "sha512-79q5P7YGI6rdnVyIAV4NXpBQJFWdkzJxCim3Kog4078fM0piAaFlwocqbejdWtLW1cEzCexPrh6EdyFsPgVdAw==",
+ "dependencies": {
+ "loader-utils": "^1.1.0",
+ "normalize-url": "^2.0.1",
+ "schema-utils": "^1.0.0",
+ "webpack-sources": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^4.4.0"
+ }
+ },
+ "node_modules/mini-css-extract-plugin/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/minimalistic-assert": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
+ "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
+ },
+ "node_modules/minimalistic-crypto-utils": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz",
+ "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo="
+ },
+ "node_modules/minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.5",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
+ "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
+ },
+ "node_modules/mississippi": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
+ "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
+ "dependencies": {
+ "concat-stream": "^1.5.0",
+ "duplexify": "^3.4.2",
+ "end-of-stream": "^1.1.0",
+ "flush-write-stream": "^1.0.0",
+ "from2": "^2.1.0",
+ "parallel-transform": "^1.1.0",
+ "pump": "^3.0.0",
+ "pumpify": "^1.3.3",
+ "stream-each": "^1.1.0",
+ "through2": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/mixin-deep": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+ "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+ "dependencies": {
+ "for-in": "^1.0.2",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/mixin-deep/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/mkdirp": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
+ "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==",
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/move-concurrently": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz",
+ "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=",
+ "dependencies": {
+ "aproba": "^1.1.1",
+ "copy-concurrently": "^1.0.0",
+ "fs-write-stream-atomic": "^1.0.8",
+ "mkdirp": "^0.5.1",
+ "rimraf": "^2.5.4",
+ "run-queue": "^1.0.3"
+ }
+ },
+ "node_modules/move-concurrently/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+ },
+ "node_modules/multicast-dns": {
+ "version": "6.2.3",
+ "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-6.2.3.tgz",
+ "integrity": "sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==",
+ "dependencies": {
+ "dns-packet": "^1.3.1",
+ "thunky": "^1.0.2"
+ },
+ "bin": {
+ "multicast-dns": "cli.js"
+ }
+ },
+ "node_modules/multicast-dns-service-types": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz",
+ "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE="
+ },
+ "node_modules/nan": {
+ "version": "2.14.2",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz",
+ "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==",
+ "optional": true
+ },
+ "node_modules/nanomatch": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+ "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+ "dependencies": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "fragment-cache": "^0.2.1",
+ "is-windows": "^1.0.2",
+ "kind-of": "^6.0.2",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/negotiator": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
+ "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/neo-async": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw=="
+ },
+ "node_modules/nice-try": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz",
+ "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ=="
+ },
+ "node_modules/no-case": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/no-case/-/no-case-2.3.2.tgz",
+ "integrity": "sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==",
+ "dependencies": {
+ "lower-case": "^1.1.1"
+ }
+ },
+ "node_modules/node-forge": {
+ "version": "0.10.0",
+ "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz",
+ "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==",
+ "engines": {
+ "node": ">= 6.0.0"
+ }
+ },
+ "node_modules/node-libs-browser": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz",
+ "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==",
+ "dependencies": {
+ "assert": "^1.1.1",
+ "browserify-zlib": "^0.2.0",
+ "buffer": "^4.3.0",
+ "console-browserify": "^1.1.0",
+ "constants-browserify": "^1.0.0",
+ "crypto-browserify": "^3.11.0",
+ "domain-browser": "^1.1.1",
+ "events": "^3.0.0",
+ "https-browserify": "^1.0.0",
+ "os-browserify": "^0.3.0",
+ "path-browserify": "0.0.1",
+ "process": "^0.11.10",
+ "punycode": "^1.2.4",
+ "querystring-es3": "^0.2.0",
+ "readable-stream": "^2.3.3",
+ "stream-browserify": "^2.0.1",
+ "stream-http": "^2.7.2",
+ "string_decoder": "^1.0.0",
+ "timers-browserify": "^2.0.4",
+ "tty-browserify": "0.0.0",
+ "url": "^0.11.0",
+ "util": "^0.11.0",
+ "vm-browserify": "^1.0.1"
+ }
+ },
+ "node_modules/node-libs-browser/node_modules/punycode": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+ "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4="
+ },
+ "node_modules/node-releases": {
+ "version": "1.1.71",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.71.tgz",
+ "integrity": "sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg=="
+ },
+ "node_modules/nopt": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz",
+ "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=",
+ "dependencies": {
+ "abbrev": "1"
+ },
+ "bin": {
+ "nopt": "bin/nopt.js"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/normalize-range": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
+ "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/normalize-url": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz",
+ "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==",
+ "dependencies": {
+ "prepend-http": "^2.0.0",
+ "query-string": "^5.0.1",
+ "sort-keys": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz",
+ "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=",
+ "dependencies": {
+ "path-key": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/nprogress": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz",
+ "integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E="
+ },
+ "node_modules/nth-check": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz",
+ "integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==",
+ "dependencies": {
+ "boolbase": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/nth-check?sponsor=1"
+ }
+ },
+ "node_modules/num2fraction": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz",
+ "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4="
+ },
+ "node_modules/oauth-sign": {
+ "version": "0.9.0",
+ "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
+ "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+ "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+ "dependencies": {
+ "copy-descriptor": "^0.1.0",
+ "define-property": "^0.2.5",
+ "kind-of": "^3.0.3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dependencies": {
+ "is-descriptor": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dependencies": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/is-descriptor/node_modules/kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.10.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.10.2.tgz",
+ "integrity": "sha512-gz58rdPpadwztRrPjZE9DZLOABUpTGdcANUgOwBFO1C+HZZhePoP83M65WGDmbpwFYJSWqavbl4SgDn4k8RYTA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object-is": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz",
+ "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object-visit": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+ "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+ "dependencies": {
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object.assign": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz",
+ "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==",
+ "dependencies": {
+ "call-bind": "^1.0.0",
+ "define-properties": "^1.1.3",
+ "has-symbols": "^1.0.1",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.getownpropertydescriptors": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz",
+ "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.18.0-next.2"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.pick": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+ "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+ "dependencies": {
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object.values": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz",
+ "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.18.0-next.2",
+ "has": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/obuf": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
+ "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
+ },
+ "node_modules/on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/opencollective-postinstall": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz",
+ "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q==",
+ "bin": {
+ "opencollective-postinstall": "index.js"
+ }
+ },
+ "node_modules/opn": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz",
+ "integrity": "sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==",
+ "dependencies": {
+ "is-wsl": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/optimize-css-assets-webpack-plugin": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz",
+ "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==",
+ "dependencies": {
+ "cssnano": "^4.1.10",
+ "last-call-webpack-plugin": "^3.0.0"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0"
+ }
+ },
+ "node_modules/original": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/original/-/original-1.0.2.tgz",
+ "integrity": "sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg==",
+ "dependencies": {
+ "url-parse": "^1.4.3"
+ }
+ },
+ "node_modules/os-browserify": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz",
+ "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc="
+ },
+ "node_modules/p-cancelable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz",
+ "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/p-finally": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
+ "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dependencies": {
+ "p-try": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dependencies": {
+ "p-limit": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/p-map": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz",
+ "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/p-retry": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-3.0.1.tgz",
+ "integrity": "sha512-XE6G4+YTTkT2a0UWb2kjZe8xNwf8bIbnqpc/IS/idOBVhyves0mK5OJgeocjx7q5pvX/6m23xuzVPYT1uGM73w==",
+ "dependencies": {
+ "retry": "^0.12.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/package-json": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz",
+ "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==",
+ "dependencies": {
+ "got": "^9.6.0",
+ "registry-auth-token": "^4.0.0",
+ "registry-url": "^5.0.0",
+ "semver": "^6.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pako": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz",
+ "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw=="
+ },
+ "node_modules/parallel-transform": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz",
+ "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==",
+ "dependencies": {
+ "cyclist": "^1.0.1",
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.1.5"
+ }
+ },
+ "node_modules/param-case": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/param-case/-/param-case-2.1.1.tgz",
+ "integrity": "sha1-35T9jPZTHs915r75oIWPvHK+Ikc=",
+ "dependencies": {
+ "no-case": "^2.2.0"
+ }
+ },
+ "node_modules/parse-asn1": {
+ "version": "5.1.6",
+ "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz",
+ "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==",
+ "dependencies": {
+ "asn1.js": "^5.2.0",
+ "browserify-aes": "^1.0.0",
+ "evp_bytestokey": "^1.0.0",
+ "pbkdf2": "^3.0.3",
+ "safe-buffer": "^5.1.1"
+ }
+ },
+ "node_modules/parse-json": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
+ "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=",
+ "dependencies": {
+ "error-ex": "^1.3.1",
+ "json-parse-better-errors": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
+ "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="
+ },
+ "node_modules/parse5-htmlparser2-tree-adapter": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz",
+ "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==",
+ "dependencies": {
+ "parse5": "^6.0.1"
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/pascalcase": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-browserify": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz",
+ "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ=="
+ },
+ "node_modules/path-dirname": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA="
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-is-inside": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
+ "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM="
+ },
+ "node_modules/path-key": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
+ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+ "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
+ },
+ "node_modules/path-to-regexp": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
+ "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
+ },
+ "node_modules/path-type": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz",
+ "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==",
+ "dependencies": {
+ "pify": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/path-type/node_modules/pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/pbkdf2": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz",
+ "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==",
+ "dependencies": {
+ "create-hash": "^1.1.2",
+ "create-hmac": "^1.1.4",
+ "ripemd160": "^2.0.1",
+ "safe-buffer": "^5.0.1",
+ "sha.js": "^2.4.8"
+ },
+ "engines": {
+ "node": ">=0.12"
+ }
+ },
+ "node_modules/performance-now": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
+ "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns="
+ },
+ "node_modules/picomatch": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz",
+ "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==",
+ "optional": true,
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pify": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
+ "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/pinkie": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz",
+ "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/pinkie-promise": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz",
+ "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=",
+ "dependencies": {
+ "pinkie": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "dependencies": {
+ "find-up": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/portfinder": {
+ "version": "1.0.28",
+ "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz",
+ "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==",
+ "dependencies": {
+ "async": "^2.6.2",
+ "debug": "^3.1.1",
+ "mkdirp": "^0.5.5"
+ },
+ "engines": {
+ "node": ">= 0.12.0"
+ }
+ },
+ "node_modules/portfinder/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/portfinder/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/portfinder/node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ },
+ "node_modules/posix-character-classes": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "7.0.35",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz",
+ "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==",
+ "dependencies": {
+ "chalk": "^2.4.2",
+ "source-map": "^0.6.1",
+ "supports-color": "^6.1.0"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ }
+ },
+ "node_modules/postcss-calc": {
+ "version": "7.0.5",
+ "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz",
+ "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==",
+ "dependencies": {
+ "postcss": "^7.0.27",
+ "postcss-selector-parser": "^6.0.2",
+ "postcss-value-parser": "^4.0.2"
+ }
+ },
+ "node_modules/postcss-colormin": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz",
+ "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "color": "^3.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-colormin/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-convert-values": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz",
+ "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==",
+ "dependencies": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-convert-values/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-discard-comments": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz",
+ "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==",
+ "dependencies": {
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-discard-duplicates": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz",
+ "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==",
+ "dependencies": {
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-discard-empty": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz",
+ "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==",
+ "dependencies": {
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-discard-overridden": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz",
+ "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==",
+ "dependencies": {
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-load-config": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz",
+ "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==",
+ "dependencies": {
+ "cosmiconfig": "^5.0.0",
+ "import-cwd": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ }
+ },
+ "node_modules/postcss-loader": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-3.0.0.tgz",
+ "integrity": "sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA==",
+ "dependencies": {
+ "loader-utils": "^1.1.0",
+ "postcss": "^7.0.0",
+ "postcss-load-config": "^2.0.0",
+ "schema-utils": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/postcss-loader/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/postcss-merge-longhand": {
+ "version": "4.0.11",
+ "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz",
+ "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==",
+ "dependencies": {
+ "css-color-names": "0.0.4",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0",
+ "stylehacks": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-merge-longhand/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-merge-rules": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz",
+ "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "caniuse-api": "^3.0.0",
+ "cssnano-util-same-parent": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-selector-parser": "^3.0.0",
+ "vendors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-merge-rules/node_modules/postcss-selector-parser": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz",
+ "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==",
+ "dependencies": {
+ "dot-prop": "^5.2.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/postcss-minify-font-values": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz",
+ "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==",
+ "dependencies": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-minify-font-values/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-minify-gradients": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz",
+ "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==",
+ "dependencies": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "is-color-stop": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-minify-gradients/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-minify-params": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz",
+ "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==",
+ "dependencies": {
+ "alphanum-sort": "^1.0.0",
+ "browserslist": "^4.0.0",
+ "cssnano-util-get-arguments": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0",
+ "uniqs": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-minify-params/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-minify-selectors": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz",
+ "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==",
+ "dependencies": {
+ "alphanum-sort": "^1.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-selector-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-minify-selectors/node_modules/postcss-selector-parser": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz",
+ "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==",
+ "dependencies": {
+ "dot-prop": "^5.2.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/postcss-modules-extract-imports": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz",
+ "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==",
+ "dependencies": {
+ "postcss": "^7.0.5"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/postcss-modules-local-by-default": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz",
+ "integrity": "sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA==",
+ "dependencies": {
+ "postcss": "^7.0.6",
+ "postcss-selector-parser": "^6.0.0",
+ "postcss-value-parser": "^3.3.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/postcss-modules-local-by-default/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-modules-scope": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz",
+ "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==",
+ "dependencies": {
+ "postcss": "^7.0.6",
+ "postcss-selector-parser": "^6.0.0"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/postcss-modules-values": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz",
+ "integrity": "sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w==",
+ "dependencies": {
+ "icss-replace-symbols": "^1.1.0",
+ "postcss": "^7.0.6"
+ }
+ },
+ "node_modules/postcss-normalize-charset": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz",
+ "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==",
+ "dependencies": {
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-display-values": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz",
+ "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==",
+ "dependencies": {
+ "cssnano-util-get-match": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-display-values/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-positions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz",
+ "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==",
+ "dependencies": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-positions/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-repeat-style": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz",
+ "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==",
+ "dependencies": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "cssnano-util-get-match": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-repeat-style/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-string": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz",
+ "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==",
+ "dependencies": {
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-string/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-timing-functions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz",
+ "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==",
+ "dependencies": {
+ "cssnano-util-get-match": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-timing-functions/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-unicode": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz",
+ "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-unicode/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-url": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz",
+ "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==",
+ "dependencies": {
+ "is-absolute-url": "^2.0.0",
+ "normalize-url": "^3.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-url/node_modules/normalize-url": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz",
+ "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/postcss-normalize-url/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-normalize-whitespace": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz",
+ "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==",
+ "dependencies": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-normalize-whitespace/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-ordered-values": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz",
+ "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==",
+ "dependencies": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-ordered-values/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-reduce-initial": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz",
+ "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "caniuse-api": "^3.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-reduce-transforms": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz",
+ "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==",
+ "dependencies": {
+ "cssnano-util-get-match": "^4.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-reduce-transforms/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-safe-parser": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-4.0.2.tgz",
+ "integrity": "sha512-Uw6ekxSWNLCPesSv/cmqf2bY/77z11O7jZGPax3ycZMFU/oi2DMH9i89AdHc1tRwFg/arFoEwX0IS3LCUxJh1g==",
+ "dependencies": {
+ "postcss": "^7.0.26"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/postcss-selector-parser": {
+ "version": "6.0.5",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.5.tgz",
+ "integrity": "sha512-aFYPoYmXbZ1V6HZaSvat08M97A8HqO6Pjz+PiNpw/DhuRrC72XWAdp3hL6wusDCN31sSmcZyMGa2hZEuX+Xfhg==",
+ "dependencies": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/postcss-svgo": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz",
+ "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==",
+ "dependencies": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0",
+ "svgo": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-svgo/node_modules/postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "node_modules/postcss-unique-selectors": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz",
+ "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==",
+ "dependencies": {
+ "alphanum-sort": "^1.0.0",
+ "postcss": "^7.0.0",
+ "uniqs": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/postcss-value-parser": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz",
+ "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ=="
+ },
+ "node_modules/postcss/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/postcss/node_modules/supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/prepend-http": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz",
+ "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/prettier": {
+ "version": "1.19.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz",
+ "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==",
+ "optional": true,
+ "bin": {
+ "prettier": "bin-prettier.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/pretty-error": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz",
+ "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==",
+ "dependencies": {
+ "lodash": "^4.17.20",
+ "renderkid": "^2.0.4"
+ }
+ },
+ "node_modules/pretty-time": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz",
+ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/prismjs": {
+ "version": "1.23.0",
+ "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz",
+ "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==",
+ "optionalDependencies": {
+ "clipboard": "^2.0.0"
+ }
+ },
+ "node_modules/process": {
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
+ "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI=",
+ "engines": {
+ "node": ">= 0.6.0"
+ }
+ },
+ "node_modules/process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
+ },
+ "node_modules/promise": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz",
+ "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==",
+ "dependencies": {
+ "asap": "~2.0.3"
+ }
+ },
+ "node_modules/promise-inflight": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz",
+ "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM="
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz",
+ "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==",
+ "dependencies": {
+ "forwarded": "~0.1.2",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/prr": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz",
+ "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY="
+ },
+ "node_modules/pseudomap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz",
+ "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM="
+ },
+ "node_modules/psl": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz",
+ "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ=="
+ },
+ "node_modules/public-encrypt": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz",
+ "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==",
+ "dependencies": {
+ "bn.js": "^4.1.0",
+ "browserify-rsa": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "parse-asn1": "^5.0.0",
+ "randombytes": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "node_modules/public-encrypt/node_modules/bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ },
+ "node_modules/pug": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/pug/-/pug-3.0.2.tgz",
+ "integrity": "sha512-bp0I/hiK1D1vChHh6EfDxtndHji55XP/ZJKwsRqrz6lRia6ZC2OZbdAymlxdVFwd1L70ebrVJw4/eZ79skrIaw==",
+ "dependencies": {
+ "pug-code-gen": "^3.0.2",
+ "pug-filters": "^4.0.0",
+ "pug-lexer": "^5.0.1",
+ "pug-linker": "^4.0.0",
+ "pug-load": "^3.0.0",
+ "pug-parser": "^6.0.0",
+ "pug-runtime": "^3.0.1",
+ "pug-strip-comments": "^2.0.0"
+ }
+ },
+ "node_modules/pug-attrs": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-3.0.0.tgz",
+ "integrity": "sha512-azINV9dUtzPMFQktvTXciNAfAuVh/L/JCl0vtPCwvOA21uZrC08K/UnmrL+SXGEVc1FwzjW62+xw5S/uaLj6cA==",
+ "dependencies": {
+ "constantinople": "^4.0.1",
+ "js-stringify": "^1.0.2",
+ "pug-runtime": "^3.0.0"
+ }
+ },
+ "node_modules/pug-code-gen": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-3.0.2.tgz",
+ "integrity": "sha512-nJMhW16MbiGRiyR4miDTQMRWDgKplnHyeLvioEJYbk1RsPI3FuA3saEP8uwnTb2nTJEKBU90NFVWJBk4OU5qyg==",
+ "dependencies": {
+ "constantinople": "^4.0.1",
+ "doctypes": "^1.1.0",
+ "js-stringify": "^1.0.2",
+ "pug-attrs": "^3.0.0",
+ "pug-error": "^2.0.0",
+ "pug-runtime": "^3.0.0",
+ "void-elements": "^3.1.0",
+ "with": "^7.0.0"
+ }
+ },
+ "node_modules/pug-error": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-2.0.0.tgz",
+ "integrity": "sha512-sjiUsi9M4RAGHktC1drQfCr5C5eriu24Lfbt4s+7SykztEOwVZtbFk1RRq0tzLxcMxMYTBR+zMQaG07J/btayQ=="
+ },
+ "node_modules/pug-filters": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-4.0.0.tgz",
+ "integrity": "sha512-yeNFtq5Yxmfz0f9z2rMXGw/8/4i1cCFecw/Q7+D0V2DdtII5UvqE12VaZ2AY7ri6o5RNXiweGH79OCq+2RQU4A==",
+ "dependencies": {
+ "constantinople": "^4.0.1",
+ "jstransformer": "1.0.0",
+ "pug-error": "^2.0.0",
+ "pug-walk": "^2.0.0",
+ "resolve": "^1.15.1"
+ }
+ },
+ "node_modules/pug-lexer": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-5.0.1.tgz",
+ "integrity": "sha512-0I6C62+keXlZPZkOJeVam9aBLVP2EnbeDw3An+k0/QlqdwH6rv8284nko14Na7c0TtqtogfWXcRoFE4O4Ff20w==",
+ "dependencies": {
+ "character-parser": "^2.2.0",
+ "is-expression": "^4.0.0",
+ "pug-error": "^2.0.0"
+ }
+ },
+ "node_modules/pug-linker": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-4.0.0.tgz",
+ "integrity": "sha512-gjD1yzp0yxbQqnzBAdlhbgoJL5qIFJw78juN1NpTLt/mfPJ5VgC4BvkoD3G23qKzJtIIXBbcCt6FioLSFLOHdw==",
+ "dependencies": {
+ "pug-error": "^2.0.0",
+ "pug-walk": "^2.0.0"
+ }
+ },
+ "node_modules/pug-load": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-3.0.0.tgz",
+ "integrity": "sha512-OCjTEnhLWZBvS4zni/WUMjH2YSUosnsmjGBB1An7CsKQarYSWQ0GCVyd4eQPMFJqZ8w9xgs01QdiZXKVjk92EQ==",
+ "dependencies": {
+ "object-assign": "^4.1.1",
+ "pug-walk": "^2.0.0"
+ }
+ },
+ "node_modules/pug-parser": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-6.0.0.tgz",
+ "integrity": "sha512-ukiYM/9cH6Cml+AOl5kETtM9NR3WulyVP2y4HOU45DyMim1IeP/OOiyEWRr6qk5I5klpsBnbuHpwKmTx6WURnw==",
+ "dependencies": {
+ "pug-error": "^2.0.0",
+ "token-stream": "1.0.0"
+ }
+ },
+ "node_modules/pug-plain-loader": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.1.0.tgz",
+ "integrity": "sha512-1nYgIJLaahRuHJHhzSPODV44aZfb00bO7kiJiMkke6Hj4SVZftuvx6shZ4BOokk50dJc2RSFqNUBOlus0dniFQ==",
+ "dependencies": {
+ "loader-utils": "^1.1.0"
+ },
+ "peerDependencies": {
+ "pug": "^2.0.0 || ^3.0.0"
+ }
+ },
+ "node_modules/pug-runtime": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-3.0.1.tgz",
+ "integrity": "sha512-L50zbvrQ35TkpHwv0G6aLSuueDRwc/97XdY8kL3tOT0FmhgG7UypU3VztfV/LATAvmUfYi4wNxSajhSAeNN+Kg=="
+ },
+ "node_modules/pug-strip-comments": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-2.0.0.tgz",
+ "integrity": "sha512-zo8DsDpH7eTkPHCXFeAk1xZXJbyoTfdPlNR0bK7rpOMuhBYb0f5qUVCO1xlsitYd3w5FQTK7zpNVKb3rZoUrrQ==",
+ "dependencies": {
+ "pug-error": "^2.0.0"
+ }
+ },
+ "node_modules/pug-walk": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-2.0.0.tgz",
+ "integrity": "sha512-yYELe9Q5q9IQhuvqsZNwA5hfPkMJ8u92bQLIMcsMxf/VADjNtEYptU+inlufAFYcWdHlwNfZOEnOOQrZrcyJCQ=="
+ },
+ "node_modules/pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "dependencies": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "node_modules/pumpify": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz",
+ "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==",
+ "dependencies": {
+ "duplexify": "^3.6.0",
+ "inherits": "^2.0.3",
+ "pump": "^2.0.0"
+ }
+ },
+ "node_modules/pumpify/node_modules/pump": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
+ "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
+ "dependencies": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
+ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/pupa": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz",
+ "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==",
+ "dependencies": {
+ "escape-goat": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/q": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz",
+ "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=",
+ "engines": {
+ "node": ">=0.6.0",
+ "teleport": ">=0.2.0"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+ "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/query-string": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz",
+ "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==",
+ "dependencies": {
+ "decode-uri-component": "^0.2.0",
+ "object-assign": "^4.1.0",
+ "strict-uri-encode": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/querystring": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz",
+ "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg==",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/querystring-es3": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz",
+ "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/querystringify": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
+ "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ=="
+ },
+ "node_modules/randombytes": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
+ "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
+ "dependencies": {
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "node_modules/randomfill": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz",
+ "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==",
+ "dependencies": {
+ "randombytes": "^2.0.5",
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
+ "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
+ "dependencies": {
+ "bytes": "3.1.0",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/raw-body/node_modules/bytes": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
+ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/rc": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+ "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
+ "dependencies": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ },
+ "bin": {
+ "rc": "cli.js"
+ }
+ },
+ "node_modules/readable-stream": {
+ "version": "2.3.7",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+ "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+ "dependencies": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz",
+ "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==",
+ "dependencies": {
+ "graceful-fs": "^4.1.11",
+ "micromatch": "^3.1.10",
+ "readable-stream": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/reduce": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.2.tgz",
+ "integrity": "sha512-xX7Fxke/oHO5IfZSk77lvPa/7bjMh9BuCk4OOoX5XTXrM7s0Z+MkPfSDfz0q7r91BhhGSs8gii/VEN/7zhCPpQ==",
+ "dependencies": {
+ "object-keys": "^1.1.0"
+ }
+ },
+ "node_modules/regenerate": {
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz",
+ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A=="
+ },
+ "node_modules/regenerate-unicode-properties": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz",
+ "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==",
+ "dependencies": {
+ "regenerate": "^1.4.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/regenerator-runtime": {
+ "version": "0.13.7",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz",
+ "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew=="
+ },
+ "node_modules/regenerator-transform": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz",
+ "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==",
+ "dependencies": {
+ "@babel/runtime": "^7.8.4"
+ }
+ },
+ "node_modules/regex-not": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+ "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+ "dependencies": {
+ "extend-shallow": "^3.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/regex-not/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/regex-not/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/regexp.prototype.flags": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz",
+ "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/regexpu-core": {
+ "version": "4.7.1",
+ "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz",
+ "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==",
+ "dependencies": {
+ "regenerate": "^1.4.0",
+ "regenerate-unicode-properties": "^8.2.0",
+ "regjsgen": "^0.5.1",
+ "regjsparser": "^0.6.4",
+ "unicode-match-property-ecmascript": "^1.0.4",
+ "unicode-match-property-value-ecmascript": "^1.2.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/registry-auth-token": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz",
+ "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==",
+ "dependencies": {
+ "rc": "^1.2.8"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/registry-url": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz",
+ "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==",
+ "dependencies": {
+ "rc": "^1.2.8"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/regjsgen": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz",
+ "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A=="
+ },
+ "node_modules/regjsparser": {
+ "version": "0.6.9",
+ "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.9.tgz",
+ "integrity": "sha512-ZqbNRz1SNjLAiYuwY0zoXW8Ne675IX5q+YHioAGbCw4X96Mjl2+dcX9B2ciaeyYjViDAfvIjFpQjJgLttTEERQ==",
+ "dependencies": {
+ "jsesc": "~0.5.0"
+ },
+ "bin": {
+ "regjsparser": "bin/parser"
+ }
+ },
+ "node_modules/regjsparser/node_modules/jsesc": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
+ "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ }
+ },
+ "node_modules/relateurl": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz",
+ "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk=",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/remove-trailing-separator": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz",
+ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8="
+ },
+ "node_modules/renderkid": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.5.tgz",
+ "integrity": "sha512-ccqoLg+HLOHq1vdfYNm4TBeaCDIi1FLt3wGojTDSvdewUv65oTmI3cnT2E4hRjl1gzKZIPK+KZrXzlUYKnR+vQ==",
+ "dependencies": {
+ "css-select": "^2.0.2",
+ "dom-converter": "^0.2",
+ "htmlparser2": "^3.10.1",
+ "lodash": "^4.17.20",
+ "strip-ansi": "^3.0.0"
+ }
+ },
+ "node_modules/renderkid/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/renderkid/node_modules/css-select": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz",
+ "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==",
+ "dependencies": {
+ "boolbase": "^1.0.0",
+ "css-what": "^3.2.1",
+ "domutils": "^1.7.0",
+ "nth-check": "^1.0.2"
+ }
+ },
+ "node_modules/renderkid/node_modules/css-what": {
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz",
+ "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==",
+ "engines": {
+ "node": ">= 6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/renderkid/node_modules/dom-serializer": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz",
+ "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==",
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "entities": "^2.0.0"
+ }
+ },
+ "node_modules/renderkid/node_modules/domhandler": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz",
+ "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==",
+ "dependencies": {
+ "domelementtype": "1"
+ }
+ },
+ "node_modules/renderkid/node_modules/domhandler/node_modules/domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ },
+ "node_modules/renderkid/node_modules/domutils": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz",
+ "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==",
+ "dependencies": {
+ "dom-serializer": "0",
+ "domelementtype": "1"
+ }
+ },
+ "node_modules/renderkid/node_modules/domutils/node_modules/domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ },
+ "node_modules/renderkid/node_modules/htmlparser2": {
+ "version": "3.10.1",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz",
+ "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==",
+ "dependencies": {
+ "domelementtype": "^1.3.1",
+ "domhandler": "^2.3.0",
+ "domutils": "^1.5.1",
+ "entities": "^1.1.1",
+ "inherits": "^2.0.1",
+ "readable-stream": "^3.1.1"
+ }
+ },
+ "node_modules/renderkid/node_modules/htmlparser2/node_modules/domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ },
+ "node_modules/renderkid/node_modules/htmlparser2/node_modules/entities": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz",
+ "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w=="
+ },
+ "node_modules/renderkid/node_modules/nth-check": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
+ "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
+ "dependencies": {
+ "boolbase": "~1.0.0"
+ }
+ },
+ "node_modules/renderkid/node_modules/readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/renderkid/node_modules/strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/repeat-element": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz",
+ "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=",
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/request": {
+ "version": "2.88.2",
+ "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz",
+ "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==",
+ "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142",
+ "dependencies": {
+ "aws-sign2": "~0.7.0",
+ "aws4": "^1.8.0",
+ "caseless": "~0.12.0",
+ "combined-stream": "~1.0.6",
+ "extend": "~3.0.2",
+ "forever-agent": "~0.6.1",
+ "form-data": "~2.3.2",
+ "har-validator": "~5.1.3",
+ "http-signature": "~1.2.0",
+ "is-typedarray": "~1.0.0",
+ "isstream": "~0.1.2",
+ "json-stringify-safe": "~5.0.1",
+ "mime-types": "~2.1.19",
+ "oauth-sign": "~0.9.0",
+ "performance-now": "^2.1.0",
+ "qs": "~6.5.2",
+ "safe-buffer": "^5.1.2",
+ "tough-cookie": "~2.5.0",
+ "tunnel-agent": "^0.6.0",
+ "uuid": "^3.3.2"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/require-main-filename": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz",
+ "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="
+ },
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8="
+ },
+ "node_modules/resolve": {
+ "version": "1.20.0",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz",
+ "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==",
+ "dependencies": {
+ "is-core-module": "^2.2.0",
+ "path-parse": "^1.0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-cwd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz",
+ "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=",
+ "dependencies": {
+ "resolve-from": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz",
+ "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/resolve-url": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+ "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
+ "deprecated": "https://github.com/lydell/resolve-url#deprecated"
+ },
+ "node_modules/responselike": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz",
+ "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=",
+ "dependencies": {
+ "lowercase-keys": "^1.0.0"
+ }
+ },
+ "node_modules/ret": {
+ "version": "0.1.15",
+ "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
+ "engines": {
+ "node": ">=0.12"
+ }
+ },
+ "node_modules/retry": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
+ "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs=",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/rgb-regex": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz",
+ "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE="
+ },
+ "node_modules/rgba-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz",
+ "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM="
+ },
+ "node_modules/rimraf": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
+ "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ }
+ },
+ "node_modules/ripemd160": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz",
+ "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==",
+ "dependencies": {
+ "hash-base": "^3.0.0",
+ "inherits": "^2.0.1"
+ }
+ },
+ "node_modules/run-queue": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz",
+ "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=",
+ "dependencies": {
+ "aproba": "^1.1.1"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "node_modules/safe-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+ "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+ "dependencies": {
+ "ret": "~0.1.10"
+ }
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "node_modules/sax": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
+ },
+ "node_modules/schema-utils": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz",
+ "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.5",
+ "ajv": "^6.12.4",
+ "ajv-keywords": "^3.5.2"
+ },
+ "engines": {
+ "node": ">= 8.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/select": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz",
+ "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=",
+ "optional": true
+ },
+ "node_modules/select-hose": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
+ "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo="
+ },
+ "node_modules/selfsigned": {
+ "version": "1.10.8",
+ "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz",
+ "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==",
+ "dependencies": {
+ "node-forge": "^0.10.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/semver-diff": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz",
+ "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==",
+ "dependencies": {
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/send": {
+ "version": "0.17.1",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
+ "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "destroy": "~1.0.4",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "~1.7.2",
+ "mime": "1.6.0",
+ "ms": "2.1.1",
+ "on-finished": "~2.3.0",
+ "range-parser": "~1.2.1",
+ "statuses": "~1.5.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/send/node_modules/mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/send/node_modules/ms": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
+ "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
+ },
+ "node_modules/serialize-javascript": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz",
+ "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==",
+ "dependencies": {
+ "randombytes": "^2.1.0"
+ }
+ },
+ "node_modules/serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=",
+ "dependencies": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.17",
+ "parseurl": "~1.3.2"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/serve-index/node_modules/http-errors": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "dependencies": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serve-index/node_modules/inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ },
+ "node_modules/serve-index/node_modules/setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
+ },
+ "node_modules/serve-static": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
+ "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
+ "dependencies": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.17.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/set-blocking": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz",
+ "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc="
+ },
+ "node_modules/set-value": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+ "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "is-extendable": "^0.1.1",
+ "is-plain-object": "^2.0.3",
+ "split-string": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/setimmediate": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz",
+ "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU="
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
+ "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
+ },
+ "node_modules/sha.js": {
+ "version": "2.4.11",
+ "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz",
+ "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==",
+ "dependencies": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ },
+ "bin": {
+ "sha.js": "bin.js"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
+ "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=",
+ "dependencies": {
+ "shebang-regex": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
+ "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz",
+ "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA=="
+ },
+ "node_modules/simple-swizzle": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
+ "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=",
+ "dependencies": {
+ "is-arrayish": "^0.3.1"
+ }
+ },
+ "node_modules/simple-swizzle/node_modules/is-arrayish": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
+ "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
+ },
+ "node_modules/sitemap": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz",
+ "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==",
+ "dependencies": {
+ "lodash.chunk": "^4.2.0",
+ "lodash.padstart": "^4.6.1",
+ "whatwg-url": "^7.0.0",
+ "xmlbuilder": "^13.0.0"
+ },
+ "engines": {
+ "node": ">=6.0.0",
+ "npm": ">=4.0.0"
+ }
+ },
+ "node_modules/slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/smoothscroll-polyfill": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/smoothscroll-polyfill/-/smoothscroll-polyfill-0.4.4.tgz",
+ "integrity": "sha512-TK5ZA9U5RqCwMpfoMq/l1mrH0JAR7y7KRvOBx0n2869aLxch+gT9GhN3yUfjiw+d/DiF1mKo14+hd62JyMmoBg=="
+ },
+ "node_modules/snapdragon": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+ "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+ "dependencies": {
+ "base": "^0.11.1",
+ "debug": "^2.2.0",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "map-cache": "^0.2.2",
+ "source-map": "^0.5.6",
+ "source-map-resolve": "^0.5.0",
+ "use": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+ "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+ "dependencies": {
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.0",
+ "snapdragon-util": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node/node_modules/define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dependencies": {
+ "is-descriptor": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-util": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+ "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+ "dependencies": {
+ "kind-of": "^3.2.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-util/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dependencies": {
+ "is-descriptor": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/is-accessor-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/is-data-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dependencies": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/sockjs": {
+ "version": "0.3.21",
+ "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.21.tgz",
+ "integrity": "sha512-DhbPFGpxjc6Z3I+uX07Id5ZO2XwYsWOrYjaSeieES78cq+JaJvVe5q/m1uvjIQhXinhIeCFRH6JgXe+mvVMyXw==",
+ "dependencies": {
+ "faye-websocket": "^0.11.3",
+ "uuid": "^3.4.0",
+ "websocket-driver": "^0.7.4"
+ }
+ },
+ "node_modules/sockjs-client": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.1.tgz",
+ "integrity": "sha512-VnVAb663fosipI/m6pqRXakEOw7nvd7TUgdr3PlR/8V2I95QIdwT8L4nMxhyU8SmDBHYXU1TOElaKOmKLfYzeQ==",
+ "dependencies": {
+ "debug": "^3.2.6",
+ "eventsource": "^1.0.7",
+ "faye-websocket": "^0.11.3",
+ "inherits": "^2.0.4",
+ "json3": "^3.3.3",
+ "url-parse": "^1.5.1"
+ }
+ },
+ "node_modules/sockjs-client/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/sockjs-client/node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ },
+ "node_modules/sort-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz",
+ "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=",
+ "dependencies": {
+ "is-plain-obj": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/source-list-map": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
+ "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
+ },
+ "node_modules/source-map": {
+ "version": "0.7.3",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz",
+ "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/source-map-resolve": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
+ "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
+ "dependencies": {
+ "atob": "^2.1.2",
+ "decode-uri-component": "^0.2.0",
+ "resolve-url": "^0.2.1",
+ "source-map-url": "^0.4.0",
+ "urix": "^0.1.0"
+ }
+ },
+ "node_modules/source-map-support": {
+ "version": "0.5.19",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz",
+ "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==",
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "node_modules/source-map-support/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-url": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz",
+ "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw=="
+ },
+ "node_modules/spdy": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz",
+ "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==",
+ "dependencies": {
+ "debug": "^4.1.0",
+ "handle-thing": "^2.0.0",
+ "http-deceiver": "^1.2.7",
+ "select-hose": "^2.0.0",
+ "spdy-transport": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/spdy-transport": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz",
+ "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==",
+ "dependencies": {
+ "debug": "^4.1.0",
+ "detect-node": "^2.0.4",
+ "hpack.js": "^2.1.6",
+ "obuf": "^1.1.2",
+ "readable-stream": "^3.0.6",
+ "wbuf": "^1.7.3"
+ }
+ },
+ "node_modules/spdy-transport/node_modules/debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/spdy-transport/node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/spdy-transport/node_modules/readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/spdy/node_modules/debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/spdy/node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/split-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+ "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+ "dependencies": {
+ "extend-shallow": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/split-string/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/split-string/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw="
+ },
+ "node_modules/sshpk": {
+ "version": "1.16.1",
+ "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz",
+ "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==",
+ "dependencies": {
+ "asn1": "~0.2.3",
+ "assert-plus": "^1.0.0",
+ "bcrypt-pbkdf": "^1.0.0",
+ "dashdash": "^1.12.0",
+ "ecc-jsbn": "~0.1.1",
+ "getpass": "^0.1.1",
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.0.2",
+ "tweetnacl": "~0.14.0"
+ },
+ "bin": {
+ "sshpk-conv": "bin/sshpk-conv",
+ "sshpk-sign": "bin/sshpk-sign",
+ "sshpk-verify": "bin/sshpk-verify"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/ssri": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.2.tgz",
+ "integrity": "sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==",
+ "dependencies": {
+ "figgy-pudding": "^3.5.1"
+ }
+ },
+ "node_modules/stable": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz",
+ "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w=="
+ },
+ "node_modules/stack-utils": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.5.tgz",
+ "integrity": "sha512-KZiTzuV3CnSnSvgMRrARVCj+Ht7rMbauGDK0LdVFRGyenwdylpajAp4Q0i6SX8rEmbTpMMf6ryq2gb8pPq2WgQ==",
+ "dependencies": {
+ "escape-string-regexp": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/stack-utils/node_modules/escape-string-regexp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
+ "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/static-extend": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+ "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+ "dependencies": {
+ "define-property": "^0.2.5",
+ "object-copy": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dependencies": {
+ "is-descriptor": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/is-accessor-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/is-data-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dependencies": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/static-extend/node_modules/kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/std-env": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.0.tgz",
+ "integrity": "sha512-4qT5B45+Kjef2Z6pE0BkskzsH0GO7GrND0wGlTM1ioUe3v0dGYx9ZJH0Aro/YyA8fqQ5EyIKDRjZojJYMFTflw==",
+ "dependencies": {
+ "ci-info": "^3.0.0"
+ }
+ },
+ "node_modules/std-env/node_modules/ci-info": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.1.1.tgz",
+ "integrity": "sha512-kdRWLBIJwdsYJWYJFtAFFYxybguqeF91qpZaggjG5Nf8QKdizFG2hjqvaTXbxFIcYbSaD74KpAXv6BSm17DHEQ=="
+ },
+ "node_modules/stream-browserify": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz",
+ "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==",
+ "dependencies": {
+ "inherits": "~2.0.1",
+ "readable-stream": "^2.0.2"
+ }
+ },
+ "node_modules/stream-each": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz",
+ "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==",
+ "dependencies": {
+ "end-of-stream": "^1.1.0",
+ "stream-shift": "^1.0.0"
+ }
+ },
+ "node_modules/stream-http": {
+ "version": "2.8.3",
+ "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz",
+ "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==",
+ "dependencies": {
+ "builtin-status-codes": "^3.0.0",
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.3.6",
+ "to-arraybuffer": "^1.0.0",
+ "xtend": "^4.0.0"
+ }
+ },
+ "node_modules/stream-shift": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz",
+ "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ=="
+ },
+ "node_modules/strict-uri-encode": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz",
+ "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dependencies": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
+ "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string.prototype.trimend": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz",
+ "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimstart": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz",
+ "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
+ "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
+ "dependencies": {
+ "ansi-regex": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/strip-eof": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
+ "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
+ "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/stylehacks": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz",
+ "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-selector-parser": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/stylehacks/node_modules/postcss-selector-parser": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz",
+ "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==",
+ "dependencies": {
+ "dot-prop": "^5.2.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/stylus": {
+ "version": "0.54.8",
+ "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz",
+ "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==",
+ "dependencies": {
+ "css-parse": "~2.0.0",
+ "debug": "~3.1.0",
+ "glob": "^7.1.6",
+ "mkdirp": "~1.0.4",
+ "safer-buffer": "^2.1.2",
+ "sax": "~1.2.4",
+ "semver": "^6.3.0",
+ "source-map": "^0.7.3"
+ },
+ "bin": {
+ "stylus": "bin/stylus"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/stylus-loader": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/stylus-loader/-/stylus-loader-3.0.2.tgz",
+ "integrity": "sha512-+VomPdZ6a0razP+zinir61yZgpw2NfljeSsdUF5kJuEzlo3khXhY19Fn6l8QQz1GRJGtMCo8nG5C04ePyV7SUA==",
+ "dependencies": {
+ "loader-utils": "^1.0.2",
+ "lodash.clonedeep": "^4.5.0",
+ "when": "~3.6.x"
+ },
+ "peerDependencies": {
+ "stylus": ">=0.52.4"
+ }
+ },
+ "node_modules/stylus/node_modules/debug": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+ "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/svg-tags": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz",
+ "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q="
+ },
+ "node_modules/svgo": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz",
+ "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==",
+ "dependencies": {
+ "chalk": "^2.4.1",
+ "coa": "^2.0.2",
+ "css-select": "^2.0.0",
+ "css-select-base-adapter": "^0.1.1",
+ "css-tree": "1.0.0-alpha.37",
+ "csso": "^4.0.2",
+ "js-yaml": "^3.13.1",
+ "mkdirp": "~0.5.1",
+ "object.values": "^1.1.0",
+ "sax": "~1.2.4",
+ "stable": "^0.1.8",
+ "unquote": "~1.1.1",
+ "util.promisify": "~1.0.0"
+ },
+ "bin": {
+ "svgo": "bin/svgo"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/svgo/node_modules/css-select": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz",
+ "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==",
+ "dependencies": {
+ "boolbase": "^1.0.0",
+ "css-what": "^3.2.1",
+ "domutils": "^1.7.0",
+ "nth-check": "^1.0.2"
+ }
+ },
+ "node_modules/svgo/node_modules/css-what": {
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz",
+ "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==",
+ "engines": {
+ "node": ">= 6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/svgo/node_modules/dom-serializer": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz",
+ "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==",
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "entities": "^2.0.0"
+ }
+ },
+ "node_modules/svgo/node_modules/domutils": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz",
+ "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==",
+ "dependencies": {
+ "dom-serializer": "0",
+ "domelementtype": "1"
+ }
+ },
+ "node_modules/svgo/node_modules/domutils/node_modules/domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ },
+ "node_modules/svgo/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/svgo/node_modules/nth-check": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
+ "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
+ "dependencies": {
+ "boolbase": "~1.0.0"
+ }
+ },
+ "node_modules/tapable": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
+ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/term-size": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz",
+ "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/terser": {
+ "version": "4.8.0",
+ "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz",
+ "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==",
+ "dependencies": {
+ "commander": "^2.20.0",
+ "source-map": "~0.6.1",
+ "source-map-support": "~0.5.12"
+ },
+ "bin": {
+ "terser": "bin/terser"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/terser-webpack-plugin": {
+ "version": "1.4.5",
+ "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz",
+ "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==",
+ "dependencies": {
+ "cacache": "^12.0.2",
+ "find-cache-dir": "^2.1.0",
+ "is-wsl": "^1.1.0",
+ "schema-utils": "^1.0.0",
+ "serialize-javascript": "^4.0.0",
+ "source-map": "^0.6.1",
+ "terser": "^4.1.2",
+ "webpack-sources": "^1.4.0",
+ "worker-farm": "^1.7.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "dependencies": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dependencies": {
+ "locate-path": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dependencies": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "dependencies": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dependencies": {
+ "p-limit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "dependencies": {
+ "find-up": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/terser/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ="
+ },
+ "node_modules/through": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
+ "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU="
+ },
+ "node_modules/through2": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
+ "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
+ "dependencies": {
+ "readable-stream": "~2.3.6",
+ "xtend": "~4.0.1"
+ }
+ },
+ "node_modules/thunky": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz",
+ "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA=="
+ },
+ "node_modules/timers-browserify": {
+ "version": "2.0.12",
+ "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz",
+ "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==",
+ "dependencies": {
+ "setimmediate": "^1.0.4"
+ },
+ "engines": {
+ "node": ">=0.6.0"
+ }
+ },
+ "node_modules/timsort": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz",
+ "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q="
+ },
+ "node_modules/tiny-cookie": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz",
+ "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg=="
+ },
+ "node_modules/tiny-emitter": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz",
+ "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==",
+ "optional": true
+ },
+ "node_modules/to-arraybuffer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz",
+ "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M="
+ },
+ "node_modules/to-factory": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-factory/-/to-factory-1.0.0.tgz",
+ "integrity": "sha1-hzivi9lxIK0dQEeXKtpVY7+UebE="
+ },
+ "node_modules/to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/to-object-path": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+ "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-object-path/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-readable-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz",
+ "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/to-regex": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+ "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+ "dependencies": {
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "regex-not": "^1.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+ "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+ "dependencies": {
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+ "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/token-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-1.0.0.tgz",
+ "integrity": "sha1-zCAOqyYT9BZtJ/+a/HylbUnfbrQ="
+ },
+ "node_modules/toml": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz",
+ "integrity": "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w=="
+ },
+ "node_modules/toposort": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/toposort/-/toposort-1.0.7.tgz",
+ "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk="
+ },
+ "node_modules/tough-cookie": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz",
+ "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==",
+ "dependencies": {
+ "psl": "^1.1.28",
+ "punycode": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz",
+ "integrity": "sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=",
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/tty-browserify": {
+ "version": "0.0.0",
+ "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz",
+ "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY="
+ },
+ "node_modules/tunnel-agent": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
+ "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
+ "dependencies": {
+ "safe-buffer": "^5.0.1"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/tweetnacl": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
+ "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q="
+ },
+ "node_modules/type-fest": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
+ "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/typedarray": {
+ "version": "0.0.6",
+ "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
+ "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c="
+ },
+ "node_modules/typedarray-to-buffer": {
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz",
+ "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==",
+ "dependencies": {
+ "is-typedarray": "^1.0.0"
+ }
+ },
+ "node_modules/uc.micro": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz",
+ "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA=="
+ },
+ "node_modules/uglify-js": {
+ "version": "3.4.10",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.4.10.tgz",
+ "integrity": "sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw==",
+ "dependencies": {
+ "commander": "~2.19.0",
+ "source-map": "~0.6.1"
+ },
+ "bin": {
+ "uglifyjs": "bin/uglifyjs"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/uglify-js/node_modules/commander": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.19.0.tgz",
+ "integrity": "sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg=="
+ },
+ "node_modules/uglify-js/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unbox-primitive": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz",
+ "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==",
+ "dependencies": {
+ "function-bind": "^1.1.1",
+ "has-bigints": "^1.0.1",
+ "has-symbols": "^1.0.2",
+ "which-boxed-primitive": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/unicode-canonical-property-names-ecmascript": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz",
+ "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-match-property-ecmascript": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz",
+ "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==",
+ "dependencies": {
+ "unicode-canonical-property-names-ecmascript": "^1.0.4",
+ "unicode-property-aliases-ecmascript": "^1.0.4"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-match-property-value-ecmascript": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz",
+ "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-property-aliases-ecmascript": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz",
+ "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/union-value": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+ "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+ "dependencies": {
+ "arr-union": "^3.1.0",
+ "get-value": "^2.0.6",
+ "is-extendable": "^0.1.1",
+ "set-value": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/uniq": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz",
+ "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8="
+ },
+ "node_modules/uniqs": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz",
+ "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI="
+ },
+ "node_modules/unique-filename": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz",
+ "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==",
+ "dependencies": {
+ "unique-slug": "^2.0.0"
+ }
+ },
+ "node_modules/unique-slug": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz",
+ "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==",
+ "dependencies": {
+ "imurmurhash": "^0.1.4"
+ }
+ },
+ "node_modules/unique-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
+ "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
+ "dependencies": {
+ "crypto-random-string": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/unquote": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz",
+ "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ="
+ },
+ "node_modules/unset-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+ "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+ "dependencies": {
+ "has-value": "^0.3.1",
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unset-value/node_modules/has-value": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+ "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+ "dependencies": {
+ "get-value": "^2.0.3",
+ "has-values": "^0.1.4",
+ "isobject": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unset-value/node_modules/has-value/node_modules/isobject": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+ "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+ "dependencies": {
+ "isarray": "1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unset-value/node_modules/has-values": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+ "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/upath": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz",
+ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==",
+ "engines": {
+ "node": ">=4",
+ "yarn": "*"
+ }
+ },
+ "node_modules/update-notifier": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz",
+ "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==",
+ "dependencies": {
+ "boxen": "^4.2.0",
+ "chalk": "^3.0.0",
+ "configstore": "^5.0.1",
+ "has-yarn": "^2.1.0",
+ "import-lazy": "^2.1.0",
+ "is-ci": "^2.0.0",
+ "is-installed-globally": "^0.3.1",
+ "is-npm": "^4.0.0",
+ "is-yarn-global": "^0.3.0",
+ "latest-version": "^5.0.0",
+ "pupa": "^2.0.1",
+ "semver-diff": "^3.1.1",
+ "xdg-basedir": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/yeoman/update-notifier?sponsor=1"
+ }
+ },
+ "node_modules/update-notifier/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/update-notifier/node_modules/chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/update-notifier/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/update-notifier/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/update-notifier/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/update-notifier/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/upper-case": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-1.1.3.tgz",
+ "integrity": "sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg="
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/urix": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+ "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
+ "deprecated": "Please see https://github.com/lydell/urix#deprecated"
+ },
+ "node_modules/url": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz",
+ "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=",
+ "dependencies": {
+ "punycode": "1.3.2",
+ "querystring": "0.2.0"
+ }
+ },
+ "node_modules/url-loader": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-1.1.2.tgz",
+ "integrity": "sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg==",
+ "dependencies": {
+ "loader-utils": "^1.1.0",
+ "mime": "^2.0.3",
+ "schema-utils": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^3.0.0 || ^4.0.0"
+ }
+ },
+ "node_modules/url-loader/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/url-parse": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz",
+ "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==",
+ "dependencies": {
+ "querystringify": "^2.1.1",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "node_modules/url-parse-lax": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz",
+ "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=",
+ "dependencies": {
+ "prepend-http": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/url/node_modules/punycode": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz",
+ "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0="
+ },
+ "node_modules/url/node_modules/querystring": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz",
+ "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=",
+ "engines": {
+ "node": ">=0.4.x"
+ }
+ },
+ "node_modules/use": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+ "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/util": {
+ "version": "0.11.1",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz",
+ "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==",
+ "dependencies": {
+ "inherits": "2.0.3"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
+ },
+ "node_modules/util.promisify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz",
+ "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==",
+ "dependencies": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.2",
+ "has-symbols": "^1.0.1",
+ "object.getownpropertydescriptors": "^2.1.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/util/node_modules/inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ },
+ "node_modules/utila": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz",
+ "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw="
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
+ "bin": {
+ "uuid": "bin/uuid"
+ }
+ },
+ "node_modules/v-runtime-template": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/v-runtime-template/-/v-runtime-template-1.10.0.tgz",
+ "integrity": "sha512-WLlq9jUepSfUrMEenw3mn7FDXX6hhbl11JjC1OKhwLzifHzVrY5a696TUHDPyj9jke3GGnR7b+2T3od/RL5cww=="
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/vendors": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz",
+ "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/verror": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
+ "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
+ "engines": [
+ "node >=0.6.0"
+ ],
+ "dependencies": {
+ "assert-plus": "^1.0.0",
+ "core-util-is": "1.0.2",
+ "extsprintf": "^1.2.0"
+ }
+ },
+ "node_modules/vm-browserify": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz",
+ "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ=="
+ },
+ "node_modules/void-elements": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz",
+ "integrity": "sha1-YU9/v42AHwu18GYfWy9XhXUOTwk=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/vue": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz",
+ "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg=="
+ },
+ "node_modules/vue-hot-reload-api": {
+ "version": "2.3.4",
+ "resolved": "https://registry.npmjs.org/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz",
+ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog=="
+ },
+ "node_modules/vue-loader": {
+ "version": "15.9.6",
+ "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.6.tgz",
+ "integrity": "sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==",
+ "dependencies": {
+ "@vue/component-compiler-utils": "^3.1.0",
+ "hash-sum": "^1.0.2",
+ "loader-utils": "^1.1.0",
+ "vue-hot-reload-api": "^2.3.0",
+ "vue-style-loader": "^4.1.0"
+ },
+ "peerDependencies": {
+ "css-loader": "*",
+ "webpack": "^3.0.0 || ^4.1.0 || ^5.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "cache-loader": {
+ "optional": true
+ },
+ "vue-template-compiler": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/vue-router": {
+ "version": "3.5.1",
+ "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.1.tgz",
+ "integrity": "sha512-RRQNLT8Mzr8z7eL4p7BtKvRaTSGdCbTy2+Mm5HTJvLGYSSeG9gDzNasJPP/yOYKLy+/cLG/ftrqq5fvkFwBJEw=="
+ },
+ "node_modules/vue-server-renderer": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz",
+ "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==",
+ "dependencies": {
+ "chalk": "^1.1.3",
+ "hash-sum": "^1.0.2",
+ "he": "^1.1.0",
+ "lodash.template": "^4.5.0",
+ "lodash.uniq": "^4.5.0",
+ "resolve": "^1.2.0",
+ "serialize-javascript": "^3.1.0",
+ "source-map": "0.5.6"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/ansi-styles": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+ "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/chalk": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+ "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+ "dependencies": {
+ "ansi-styles": "^2.2.1",
+ "escape-string-regexp": "^1.0.2",
+ "has-ansi": "^2.0.0",
+ "strip-ansi": "^3.0.0",
+ "supports-color": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/serialize-javascript": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz",
+ "integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==",
+ "dependencies": {
+ "randombytes": "^2.1.0"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/source-map": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz",
+ "integrity": "sha1-dc449SvwczxafwwRjYEzSiu19BI=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/vue-server-renderer/node_modules/supports-color": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+ "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/vue-style-loader": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.3.tgz",
+ "integrity": "sha512-sFuh0xfbtpRlKfm39ss/ikqs9AbKCoXZBpHeVZ8Tx650o0k0q/YCM7FRvigtxpACezfq6af+a7JeqVTWvncqDg==",
+ "dependencies": {
+ "hash-sum": "^1.0.2",
+ "loader-utils": "^1.0.2"
+ }
+ },
+ "node_modules/vue-template-compiler": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz",
+ "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==",
+ "dependencies": {
+ "de-indent": "^1.0.2",
+ "he": "^1.1.0"
+ }
+ },
+ "node_modules/vue-template-es2015-compiler": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz",
+ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw=="
+ },
+ "node_modules/vuepress": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.8.2.tgz",
+ "integrity": "sha512-BU1lUDwsA3ghf7a9ga4dsf0iTc++Z/l7BR1kUagHWVBHw7HNRgRDfAZBDDQXhllMILVToIxaTifpne9mSi94OA==",
+ "hasInstallScript": true,
+ "dependencies": {
+ "@vuepress/core": "1.8.2",
+ "@vuepress/theme-default": "1.8.2",
+ "cac": "^6.5.6",
+ "envinfo": "^7.2.0",
+ "opencollective-postinstall": "^2.0.2",
+ "update-notifier": "^4.0.0"
+ },
+ "bin": {
+ "vuepress": "cli.js"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/vuepress-html-webpack-plugin": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/vuepress-html-webpack-plugin/-/vuepress-html-webpack-plugin-3.2.0.tgz",
+ "integrity": "sha512-BebAEl1BmWlro3+VyDhIOCY6Gef2MCBllEVAP3NUAtMguiyOwo/dClbwJ167WYmcxHJKLl7b0Chr9H7fpn1d0A==",
+ "dependencies": {
+ "html-minifier": "^3.2.3",
+ "loader-utils": "^0.2.16",
+ "lodash": "^4.17.3",
+ "pretty-error": "^2.0.2",
+ "tapable": "^1.0.0",
+ "toposort": "^1.0.0",
+ "util.promisify": "1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9"
+ },
+ "peerDependencies": {
+ "webpack": "^1.0.0 || ^2.0.0 || ^3.0.0 || ^4.0.0"
+ }
+ },
+ "node_modules/vuepress-html-webpack-plugin/node_modules/big.js": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz",
+ "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/vuepress-html-webpack-plugin/node_modules/emojis-list": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz",
+ "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/vuepress-html-webpack-plugin/node_modules/json5": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz",
+ "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=",
+ "bin": {
+ "json5": "lib/cli.js"
+ }
+ },
+ "node_modules/vuepress-html-webpack-plugin/node_modules/loader-utils": {
+ "version": "0.2.17",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz",
+ "integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=",
+ "dependencies": {
+ "big.js": "^3.1.3",
+ "emojis-list": "^2.0.0",
+ "json5": "^0.5.0",
+ "object-assign": "^4.0.1"
+ }
+ },
+ "node_modules/vuepress-html-webpack-plugin/node_modules/util.promisify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz",
+ "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==",
+ "dependencies": {
+ "define-properties": "^1.1.2",
+ "object.getownpropertydescriptors": "^2.0.3"
+ }
+ },
+ "node_modules/vuepress-plugin-container": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.5.tgz",
+ "integrity": "sha512-TQrDX/v+WHOihj3jpilVnjXu9RcTm6m8tzljNJwYhxnJUW0WWQ0hFLcDTqTBwgKIFdEiSxVOmYE+bJX/sq46MA==",
+ "dependencies": {
+ "@vuepress/shared-utils": "^1.2.0",
+ "markdown-it-container": "^2.0.0"
+ }
+ },
+ "node_modules/vuepress-plugin-google-tag-manager": {
+ "version": "0.0.5",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-google-tag-manager/-/vuepress-plugin-google-tag-manager-0.0.5.tgz",
+ "integrity": "sha512-Hm1GNDdNmc4Vs9c3OMfTtHicB/oZWNCmzMFPdlOObVN1OjizIjImdm+LZIwiVKVndT2TQ4BPhMx7HQkovmD2Lg=="
+ },
+ "node_modules/vuepress-plugin-sitemap": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-sitemap/-/vuepress-plugin-sitemap-2.3.1.tgz",
+ "integrity": "sha512-n+8lbukhrKrsI9H/EX0EBgkE1pn85LAQFvQ5dIvrZP4Kz6JxPOPPNTQmZMhahQV1tXbLZQCEN7A1WZH4x+arJQ==",
+ "dependencies": {
+ "sitemap": "^3.0.0"
+ },
+ "bin": {
+ "vuepress-sitemap": "cli.js"
+ },
+ "peerDependencies": {
+ "chalk": "^2.0.0",
+ "commander": "^2.0.0",
+ "esm": "^3.0.0"
+ }
+ },
+ "node_modules/vuepress-plugin-smooth-scroll": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-smooth-scroll/-/vuepress-plugin-smooth-scroll-0.0.3.tgz",
+ "integrity": "sha512-qsQkDftLVFLe8BiviIHaLV0Ea38YLZKKonDGsNQy1IE0wllFpFIEldWD8frWZtDFdx6b/O3KDMgVQ0qp5NjJCg==",
+ "dependencies": {
+ "smoothscroll-polyfill": "^0.4.3"
+ }
+ },
+ "node_modules/vuepress-theme-cosmos": {
+ "version": "1.0.182",
+ "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.182.tgz",
+ "integrity": "sha512-Mc1ZOsSqLGgbB9xEXsx5QkHUBkKXOoDgkjrp5iX+fwmM4TCmR4MWbTlKpEzfzsxZ1DuixtwVkv0MT+eNvD2Lfw==",
+ "dependencies": {
+ "@cosmos-ui/vue": "^0.35.0",
+ "@vuepress/plugin-google-analytics": "1.7.1",
+ "algoliasearch": "^4.2.0",
+ "axios": "^0.21.0",
+ "cheerio": "^1.0.0-rc.3",
+ "clipboard-copy": "^3.1.0",
+ "entities": "2.1.0",
+ "esm": "^3.2.25",
+ "gray-matter": "^4.0.2",
+ "hotkeys-js": "3.8.1",
+ "jsonp": "^0.2.1",
+ "markdown-it": "^12.0.0",
+ "markdown-it-attrs": "^3.0.3",
+ "prismjs": "^1.22.0",
+ "pug": "^3.0.1",
+ "pug-plain-loader": "^1.0.0",
+ "stylus": "^0.54.8",
+ "stylus-loader": "^3.0.2",
+ "tiny-cookie": "^2.3.2",
+ "v-runtime-template": "^1.10.0",
+ "vuepress": "^1.5.4",
+ "vuepress-plugin-google-tag-manager": "0.0.5",
+ "vuepress-plugin-sitemap": "^2.3.1"
+ }
+ },
+ "node_modules/watchpack": {
+ "version": "1.7.5",
+ "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz",
+ "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==",
+ "dependencies": {
+ "graceful-fs": "^4.1.2",
+ "neo-async": "^2.5.0"
+ },
+ "optionalDependencies": {
+ "chokidar": "^3.4.1",
+ "watchpack-chokidar2": "^2.0.1"
+ }
+ },
+ "node_modules/watchpack-chokidar2": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz",
+ "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==",
+ "optional": true,
+ "dependencies": {
+ "chokidar": "^2.1.8"
+ }
+ },
+ "node_modules/watchpack/node_modules/anymatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz",
+ "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==",
+ "optional": true,
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/watchpack/node_modules/binary-extensions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
+ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
+ "optional": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/watchpack/node_modules/braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "optional": true,
+ "dependencies": {
+ "fill-range": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/watchpack/node_modules/chokidar": {
+ "version": "3.5.1",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz",
+ "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==",
+ "optional": true,
+ "dependencies": {
+ "anymatch": "~3.1.1",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.0",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.5.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.1"
+ }
+ },
+ "node_modules/watchpack/node_modules/fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "optional": true,
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/watchpack/node_modules/fsevents": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
+ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/watchpack/node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "optional": true,
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/watchpack/node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "optional": true,
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/watchpack/node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "optional": true,
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/watchpack/node_modules/readdirp": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
+ "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
+ "optional": true,
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/watchpack/node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "optional": true,
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/wbuf": {
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz",
+ "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==",
+ "dependencies": {
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz",
+ "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg=="
+ },
+ "node_modules/webpack": {
+ "version": "4.46.0",
+ "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.46.0.tgz",
+ "integrity": "sha512-6jJuJjg8znb/xRItk7bkT0+Q7AHCYjjFnvKIWQPkNIOyRqoCGvkOs0ipeQzrqz4l5FtN5ZI/ukEHroeX/o1/5Q==",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-module-context": "1.9.0",
+ "@webassemblyjs/wasm-edit": "1.9.0",
+ "@webassemblyjs/wasm-parser": "1.9.0",
+ "acorn": "^6.4.1",
+ "ajv": "^6.10.2",
+ "ajv-keywords": "^3.4.1",
+ "chrome-trace-event": "^1.0.2",
+ "enhanced-resolve": "^4.5.0",
+ "eslint-scope": "^4.0.3",
+ "json-parse-better-errors": "^1.0.2",
+ "loader-runner": "^2.4.0",
+ "loader-utils": "^1.2.3",
+ "memory-fs": "^0.4.1",
+ "micromatch": "^3.1.10",
+ "mkdirp": "^0.5.3",
+ "neo-async": "^2.6.1",
+ "node-libs-browser": "^2.2.1",
+ "schema-utils": "^1.0.0",
+ "tapable": "^1.1.3",
+ "terser-webpack-plugin": "^1.4.3",
+ "watchpack": "^1.7.4",
+ "webpack-sources": "^1.4.1"
+ },
+ "bin": {
+ "webpack": "bin/webpack.js"
+ },
+ "engines": {
+ "node": ">=6.11.5"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependenciesMeta": {
+ "webpack-cli": {
+ "optional": true
+ },
+ "webpack-command": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webpack-chain": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.5.1.tgz",
+ "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==",
+ "dependencies": {
+ "deepmerge": "^1.5.2",
+ "javascript-stringify": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/webpack-dev-middleware": {
+ "version": "3.7.3",
+ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.7.3.tgz",
+ "integrity": "sha512-djelc/zGiz9nZj/U7PTBi2ViorGJXEWo/3ltkPbDyxCXhhEXkW0ce99falaok4TPj+AsxLiXJR0EBOb0zh9fKQ==",
+ "dependencies": {
+ "memory-fs": "^0.4.1",
+ "mime": "^2.4.4",
+ "mkdirp": "^0.5.1",
+ "range-parser": "^1.2.1",
+ "webpack-log": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 6"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0 || ^5.0.0"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/webpack-dev-server": {
+ "version": "3.11.2",
+ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.2.tgz",
+ "integrity": "sha512-A80BkuHRQfCiNtGBS1EMf2ChTUs0x+B3wGDFmOeT4rmJOHhHTCH2naNxIHhmkr0/UillP4U3yeIyv1pNp+QDLQ==",
+ "dependencies": {
+ "ansi-html": "0.0.7",
+ "bonjour": "^3.5.0",
+ "chokidar": "^2.1.8",
+ "compression": "^1.7.4",
+ "connect-history-api-fallback": "^1.6.0",
+ "debug": "^4.1.1",
+ "del": "^4.1.1",
+ "express": "^4.17.1",
+ "html-entities": "^1.3.1",
+ "http-proxy-middleware": "0.19.1",
+ "import-local": "^2.0.0",
+ "internal-ip": "^4.3.0",
+ "ip": "^1.1.5",
+ "is-absolute-url": "^3.0.3",
+ "killable": "^1.0.1",
+ "loglevel": "^1.6.8",
+ "opn": "^5.5.0",
+ "p-retry": "^3.0.1",
+ "portfinder": "^1.0.26",
+ "schema-utils": "^1.0.0",
+ "selfsigned": "^1.10.8",
+ "semver": "^6.3.0",
+ "serve-index": "^1.9.1",
+ "sockjs": "^0.3.21",
+ "sockjs-client": "^1.5.0",
+ "spdy": "^4.0.2",
+ "strip-ansi": "^3.0.1",
+ "supports-color": "^6.1.0",
+ "url": "^0.11.0",
+ "webpack-dev-middleware": "^3.7.2",
+ "webpack-log": "^2.0.0",
+ "ws": "^6.2.1",
+ "yargs": "^13.3.2"
+ },
+ "bin": {
+ "webpack-dev-server": "bin/webpack-dev-server.js"
+ },
+ "engines": {
+ "node": ">= 6.11.5"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0 || ^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "webpack-cli": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/is-absolute-url": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz",
+ "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/webpack-dev-server/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/webpack-log": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/webpack-log/-/webpack-log-2.0.0.tgz",
+ "integrity": "sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg==",
+ "dependencies": {
+ "ansi-colors": "^3.0.0",
+ "uuid": "^3.3.2"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/webpack-merge": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-4.2.2.tgz",
+ "integrity": "sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g==",
+ "dependencies": {
+ "lodash": "^4.17.15"
+ }
+ },
+ "node_modules/webpack-sources": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
+ "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
+ "dependencies": {
+ "source-list-map": "^2.0.0",
+ "source-map": "~0.6.1"
+ }
+ },
+ "node_modules/webpack-sources/node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/webpack/node_modules/acorn": {
+ "version": "6.4.2",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz",
+ "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/webpack/node_modules/mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "dependencies": {
+ "minimist": "^1.2.5"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/webpack/node_modules/schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "dependencies": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/webpackbar": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-3.2.0.tgz",
+ "integrity": "sha512-PC4o+1c8gWWileUfwabe0gqptlXUDJd5E0zbpr2xHP1VSOVlZVPBZ8j6NCR8zM5zbKdxPhctHXahgpNK1qFDPw==",
+ "dependencies": {
+ "ansi-escapes": "^4.1.0",
+ "chalk": "^2.4.1",
+ "consola": "^2.6.0",
+ "figures": "^3.0.0",
+ "pretty-time": "^1.1.0",
+ "std-env": "^2.2.1",
+ "text-table": "^0.2.0",
+ "wrap-ansi": "^5.1.0"
+ },
+ "engines": {
+ "node": ">= 6.9.0"
+ },
+ "peerDependencies": {
+ "webpack": "^3.0.0 || ^4.0.0"
+ }
+ },
+ "node_modules/websocket-driver": {
+ "version": "0.7.4",
+ "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz",
+ "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==",
+ "dependencies": {
+ "http-parser-js": ">=0.5.1",
+ "safe-buffer": ">=5.1.0",
+ "websocket-extensions": ">=0.1.1"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/websocket-extensions": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
+ "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/whatwg-url": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz",
+ "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==",
+ "dependencies": {
+ "lodash.sortby": "^4.7.0",
+ "tr46": "^1.0.1",
+ "webidl-conversions": "^4.0.2"
+ }
+ },
+ "node_modules/when": {
+ "version": "3.6.4",
+ "resolved": "https://registry.npmjs.org/when/-/when-3.6.4.tgz",
+ "integrity": "sha1-RztRfsFZ4rhQBUl6E5g/CVQS404="
+ },
+ "node_modules/which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "which": "bin/which"
+ }
+ },
+ "node_modules/which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "dependencies": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/which-module": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz",
+ "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho="
+ },
+ "node_modules/widest-line": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz",
+ "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==",
+ "dependencies": {
+ "string-width": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/with": {
+ "version": "7.0.2",
+ "resolved": "https://registry.npmjs.org/with/-/with-7.0.2.tgz",
+ "integrity": "sha512-RNGKj82nUPg3g5ygxkQl0R937xLyho1J24ItRCBTr/m1YnZkzJy1hUiHUJrc/VlsDQzsCnInEGSg3bci0Lmd4w==",
+ "dependencies": {
+ "@babel/parser": "^7.9.6",
+ "@babel/types": "^7.9.6",
+ "assert-never": "^1.2.1",
+ "babel-walk": "3.0.0-canary-5"
+ },
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/worker-farm": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz",
+ "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==",
+ "dependencies": {
+ "errno": "~0.1.7"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz",
+ "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==",
+ "dependencies": {
+ "ansi-styles": "^3.2.0",
+ "string-width": "^3.0.0",
+ "strip-ansi": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "node_modules/wrap-ansi/node_modules/is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "dependencies": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dependencies": {
+ "ansi-regex": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
+ },
+ "node_modules/write-file-atomic": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
+ "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==",
+ "dependencies": {
+ "imurmurhash": "^0.1.4",
+ "is-typedarray": "^1.0.0",
+ "signal-exit": "^3.0.2",
+ "typedarray-to-buffer": "^3.1.5"
+ }
+ },
+ "node_modules/ws": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz",
+ "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==",
+ "dependencies": {
+ "async-limiter": "~1.0.0"
+ }
+ },
+ "node_modules/xdg-basedir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz",
+ "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/xmlbuilder": {
+ "version": "13.0.2",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz",
+ "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ==",
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/xtend": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
+ "engines": {
+ "node": ">=0.4"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz",
+ "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ=="
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="
+ },
+ "node_modules/yargs": {
+ "version": "13.3.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz",
+ "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==",
+ "dependencies": {
+ "cliui": "^5.0.0",
+ "find-up": "^3.0.0",
+ "get-caller-file": "^2.0.1",
+ "require-directory": "^2.1.1",
+ "require-main-filename": "^2.0.0",
+ "set-blocking": "^2.0.0",
+ "string-width": "^3.0.0",
+ "which-module": "^2.0.0",
+ "y18n": "^4.0.0",
+ "yargs-parser": "^13.1.2"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "13.1.2",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz",
+ "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==",
+ "dependencies": {
+ "camelcase": "^5.0.0",
+ "decamelize": "^1.2.0"
+ }
+ },
+ "node_modules/yargs-parser/node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yargs/node_modules/ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yargs/node_modules/emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "node_modules/yargs/node_modules/find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dependencies": {
+ "locate-path": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yargs/node_modules/is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/yargs/node_modules/locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dependencies": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yargs/node_modules/p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dependencies": {
+ "p-limit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yargs/node_modules/path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/yargs/node_modules/string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "dependencies": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yargs/node_modules/strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "dependencies": {
+ "ansi-regex": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/zepto": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/zepto/-/zepto-1.2.0.tgz",
+ "integrity": "sha1-4Se9nmb9hGvl6rSME5SIL3wOT5g="
+ }
+ },
+ "dependencies": {
+ "@algolia/cache-browser-local-storage": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.9.1.tgz",
+ "integrity": "sha512-bAUU9vKCy45uTTlzJw0LYu1IjoZsmzL6lgjaVFaW1crhX/4P+JD5ReQv3n/wpiXSFaHq1WEO3WyH2g3ymzeipQ==",
+ "requires": {
+ "@algolia/cache-common": "4.9.1"
+ }
+ },
+ "@algolia/cache-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.9.1.tgz",
+ "integrity": "sha512-tcvw4mOfFy44V4ZxDEy9wNGr6vFROZKRpXKTEBgdw/WBn6mX51H1ar4RWtceDEcDU4H5fIv5tsY3ip2hU+fTPg=="
+ },
+ "@algolia/cache-in-memory": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.9.1.tgz",
+ "integrity": "sha512-IEJrHonvdymW2CnRfJtsTVWyfAH05xPEFkGXGCw00+6JNCj8Dln3TeaRLiaaY1srlyGedkemekQm1/Xb46CGOQ==",
+ "requires": {
+ "@algolia/cache-common": "4.9.1"
+ }
+ },
+ "@algolia/client-account": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.9.1.tgz",
+ "integrity": "sha512-Shpjeuwb7i2LR5QuWREb6UbEQLGB+Pl/J5+wPgILJDP/uWp7jpl0ase9mYNQGKj7TjztpSpQCPZ3dSHPnzZPfw==",
+ "requires": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/client-search": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "@algolia/client-analytics": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.9.1.tgz",
+ "integrity": "sha512-/g6OkOSIA+A0t/tjvbL6iG/zV4El4LPFgv/tcAYHTH27BmlNtnEXw+iFpGjeUlQoPily9WVB3QNLMJkaNwL3HA==",
+ "requires": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/client-search": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "@algolia/client-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.9.1.tgz",
+ "integrity": "sha512-UziRTZ8km3qwoVPIyEre8TV6V+MX7UtbfVqPmSafZ0xu41UUZ+sL56YoKjOXkbKuybeIC9prXMGy/ID5bXkTqg==",
+ "requires": {
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "@algolia/client-recommendation": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.9.1.tgz",
+ "integrity": "sha512-Drtvvm1PNIOpYf4HFlkPFstFQ3IsN+TRmxur2F7y6Faplb5ybISa8ithu1tmlTdyTf3A78hQUQjgJet6qD2XZw==",
+ "requires": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "@algolia/client-search": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.9.1.tgz",
+ "integrity": "sha512-r9Cw2r8kJr45iYncFDht6EshARghU265wuY8Q8oHrpFHjAziEYdsUOdNmQKbsSH5J3gLjDPx1EI5DzVd6ivn3w==",
+ "requires": {
+ "@algolia/client-common": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "@algolia/logger-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.9.1.tgz",
+ "integrity": "sha512-9mPrbFlFyPT7or/7PXTiJjyOewWB9QRkZKVXkt5zHAUiUzGxmmdpJIGpPv3YQnDur8lXrXaRI0MHXUuIDMY1ng=="
+ },
+ "@algolia/logger-console": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.9.1.tgz",
+ "integrity": "sha512-74VUwjtFjFpjZpi3QoHIPv0kcr3vWUSHX/Vs8PJW3lPsD4CgyhFenQbG9v+ZnyH0JrJwiYTtzfmrVh7IMWZGrQ==",
+ "requires": {
+ "@algolia/logger-common": "4.9.1"
+ }
+ },
+ "@algolia/requester-browser-xhr": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.9.1.tgz",
+ "integrity": "sha512-zc46tk5o0ikOAz3uYiRAMxC2iVKAMFKT7nNZnLB5IzT0uqAh7pz/+D/UvIxP4bKmsllpBSnPcpfQF+OI4Ag/BA==",
+ "requires": {
+ "@algolia/requester-common": "4.9.1"
+ }
+ },
+ "@algolia/requester-common": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.9.1.tgz",
+ "integrity": "sha512-9hPgXnlCSbqJqF69M5x5WN3h51Dc+mk/iWNeJSVxExHGvCDfBBZd0v6S15i8q2a9cD1I2RnhMpbnX5BmGtabVA=="
+ },
+ "@algolia/requester-node-http": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.9.1.tgz",
+ "integrity": "sha512-vYNVbSCuyrCSCjHBQJk+tLZtWCjvvDf5tSbRJjyJYMqpnXuIuP7gZm24iHil4NPYBhbBj5NU2ZDAhc/gTn75Ag==",
+ "requires": {
+ "@algolia/requester-common": "4.9.1"
+ }
+ },
+ "@algolia/transporter": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.9.1.tgz",
+ "integrity": "sha512-AbjFfGzX+cAuj7Qyc536OxIQzjFOA5FU2ANGStx8LBH+AKXScwfkx67C05riuaRR5adSCLMSEbVvUscH0nF+6A==",
+ "requires": {
+ "@algolia/cache-common": "4.9.1",
+ "@algolia/logger-common": "4.9.1",
+ "@algolia/requester-common": "4.9.1"
+ }
+ },
+ "@babel/code-frame": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.13.tgz",
+ "integrity": "sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g==",
+ "requires": {
+ "@babel/highlight": "^7.12.13"
+ }
+ },
+ "@babel/compat-data": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.13.15.tgz",
+ "integrity": "sha512-ltnibHKR1VnrU4ymHyQ/CXtNXI6yZC0oJThyW78Hft8XndANwi+9H+UIklBDraIjFEJzw8wmcM427oDd9KS5wA=="
+ },
+ "@babel/core": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.13.16.tgz",
+ "integrity": "sha512-sXHpixBiWWFti0AV2Zq7avpTasr6sIAu7Y396c608541qAU2ui4a193m0KSQmfPSKFZLnQ3cvlKDOm3XkuXm3Q==",
+ "requires": {
+ "@babel/code-frame": "^7.12.13",
+ "@babel/generator": "^7.13.16",
+ "@babel/helper-compilation-targets": "^7.13.16",
+ "@babel/helper-module-transforms": "^7.13.14",
+ "@babel/helpers": "^7.13.16",
+ "@babel/parser": "^7.13.16",
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.15",
+ "@babel/types": "^7.13.16",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.1.2",
+ "semver": "^6.3.0",
+ "source-map": "^0.5.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "json5": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.0.tgz",
+ "integrity": "sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+ }
+ }
+ },
+ "@babel/generator": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.13.16.tgz",
+ "integrity": "sha512-grBBR75UnKOcUWMp8WoDxNsWCFl//XCK6HWTrBQKTr5SV9f5g0pNOjdyzi/DTBv12S9GnYPInIXQBTky7OXEMg==",
+ "requires": {
+ "@babel/types": "^7.13.16",
+ "jsesc": "^2.5.1",
+ "source-map": "^0.5.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+ }
+ }
+ },
+ "@babel/helper-annotate-as-pure": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz",
+ "integrity": "sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw==",
+ "requires": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/helper-builder-binary-assignment-operator-visitor": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.12.13.tgz",
+ "integrity": "sha512-CZOv9tGphhDRlVjVkAgm8Nhklm9RzSmWpX2my+t7Ua/KT616pEzXsQCjinzvkRvHWJ9itO4f296efroX23XCMA==",
+ "requires": {
+ "@babel/helper-explode-assignable-expression": "^7.12.13",
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/helper-compilation-targets": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.16.tgz",
+ "integrity": "sha512-3gmkYIrpqsLlieFwjkGgLaSHmhnvlAYzZLlYVjlW+QwI+1zE17kGxuJGmIqDQdYp56XdmGeD+Bswx0UTyG18xA==",
+ "requires": {
+ "@babel/compat-data": "^7.13.15",
+ "@babel/helper-validator-option": "^7.12.17",
+ "browserslist": "^4.14.5",
+ "semver": "^6.3.0"
+ }
+ },
+ "@babel/helper-create-class-features-plugin": {
+ "version": "7.13.11",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.11.tgz",
+ "integrity": "sha512-ays0I7XYq9xbjCSvT+EvysLgfc3tOkwCULHjrnscGT3A9qD4sk3wXnJ3of0MAWsWGjdinFvajHU2smYuqXKMrw==",
+ "requires": {
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-member-expression-to-functions": "^7.13.0",
+ "@babel/helper-optimise-call-expression": "^7.12.13",
+ "@babel/helper-replace-supers": "^7.13.0",
+ "@babel/helper-split-export-declaration": "^7.12.13"
+ }
+ },
+ "@babel/helper-create-regexp-features-plugin": {
+ "version": "7.12.17",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.17.tgz",
+ "integrity": "sha512-p2VGmBu9oefLZ2nQpgnEnG0ZlRPvL8gAGvPUMQwUdaE8k49rOMuZpOwdQoy5qJf6K8jL3bcAMhVUlHAjIgJHUg==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.12.13",
+ "regexpu-core": "^4.7.1"
+ }
+ },
+ "@babel/helper-define-polyfill-provider": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.2.0.tgz",
+ "integrity": "sha512-JT8tHuFjKBo8NnaUbblz7mIu1nnvUDiHVjXXkulZULyidvo/7P6TY7+YqpV37IfF+KUFxmlK04elKtGKXaiVgw==",
+ "requires": {
+ "@babel/helper-compilation-targets": "^7.13.0",
+ "@babel/helper-module-imports": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/traverse": "^7.13.0",
+ "debug": "^4.1.1",
+ "lodash.debounce": "^4.0.8",
+ "resolve": "^1.14.2",
+ "semver": "^6.1.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ }
+ }
+ },
+ "@babel/helper-explode-assignable-expression": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.13.0.tgz",
+ "integrity": "sha512-qS0peLTDP8kOisG1blKbaoBg/o9OSa1qoumMjTK5pM+KDTtpxpsiubnCGP34vK8BXGcb2M9eigwgvoJryrzwWA==",
+ "requires": {
+ "@babel/types": "^7.13.0"
+ }
+ },
+ "@babel/helper-function-name": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz",
+ "integrity": "sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA==",
+ "requires": {
+ "@babel/helper-get-function-arity": "^7.12.13",
+ "@babel/template": "^7.12.13",
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/helper-get-function-arity": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz",
+ "integrity": "sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg==",
+ "requires": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/helper-hoist-variables": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.13.16.tgz",
+ "integrity": "sha512-1eMtTrXtrwscjcAeO4BVK+vvkxaLJSPFz1w1KLawz6HLNi9bPFGBNwwDyVfiu1Tv/vRRFYfoGaKhmAQPGPn5Wg==",
+ "requires": {
+ "@babel/traverse": "^7.13.15",
+ "@babel/types": "^7.13.16"
+ }
+ },
+ "@babel/helper-member-expression-to-functions": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.12.tgz",
+ "integrity": "sha512-48ql1CLL59aKbU94Y88Xgb2VFy7a95ykGRbJJaaVv+LX5U8wFpLfiGXJJGUozsmA1oEh/o5Bp60Voq7ACyA/Sw==",
+ "requires": {
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "@babel/helper-module-imports": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.13.12.tgz",
+ "integrity": "sha512-4cVvR2/1B693IuOvSI20xqqa/+bl7lqAMR59R4iu39R9aOX8/JoYY1sFaNvUMyMBGnHdwvJgUrzNLoUZxXypxA==",
+ "requires": {
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "@babel/helper-module-transforms": {
+ "version": "7.13.14",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.13.14.tgz",
+ "integrity": "sha512-QuU/OJ0iAOSIatyVZmfqB0lbkVP0kDRiKj34xy+QNsnVZi/PA6BoSoreeqnxxa9EHFAIL0R9XOaAR/G9WlIy5g==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.13.12",
+ "@babel/helper-replace-supers": "^7.13.12",
+ "@babel/helper-simple-access": "^7.13.12",
+ "@babel/helper-split-export-declaration": "^7.12.13",
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.13",
+ "@babel/types": "^7.13.14"
+ }
+ },
+ "@babel/helper-optimise-call-expression": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz",
+ "integrity": "sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA==",
+ "requires": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/helper-plugin-utils": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz",
+ "integrity": "sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ=="
+ },
+ "@babel/helper-remap-async-to-generator": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.13.0.tgz",
+ "integrity": "sha512-pUQpFBE9JvC9lrQbpX0TmeNIy5s7GnZjna2lhhcHC7DzgBs6fWn722Y5cfwgrtrqc7NAJwMvOa0mKhq6XaE4jg==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.12.13",
+ "@babel/helper-wrap-function": "^7.13.0",
+ "@babel/types": "^7.13.0"
+ }
+ },
+ "@babel/helper-replace-supers": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.13.12.tgz",
+ "integrity": "sha512-Gz1eiX+4yDO8mT+heB94aLVNCL+rbuT2xy4YfyNqu8F+OI6vMvJK891qGBTqL9Uc8wxEvRW92Id6G7sDen3fFw==",
+ "requires": {
+ "@babel/helper-member-expression-to-functions": "^7.13.12",
+ "@babel/helper-optimise-call-expression": "^7.12.13",
+ "@babel/traverse": "^7.13.0",
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "@babel/helper-simple-access": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz",
+ "integrity": "sha512-7FEjbrx5SL9cWvXioDbnlYTppcZGuCY6ow3/D5vMggb2Ywgu4dMrpTJX0JdQAIcRRUElOIxF3yEooa9gUb9ZbA==",
+ "requires": {
+ "@babel/types": "^7.13.12"
+ }
+ },
+ "@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz",
+ "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==",
+ "requires": {
+ "@babel/types": "^7.12.1"
+ }
+ },
+ "@babel/helper-split-export-declaration": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz",
+ "integrity": "sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg==",
+ "requires": {
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/helper-validator-identifier": {
+ "version": "7.12.11",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz",
+ "integrity": "sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw=="
+ },
+ "@babel/helper-validator-option": {
+ "version": "7.12.17",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz",
+ "integrity": "sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw=="
+ },
+ "@babel/helper-wrap-function": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.13.0.tgz",
+ "integrity": "sha512-1UX9F7K3BS42fI6qd2A4BjKzgGjToscyZTdp1DjknHLCIvpgne6918io+aL5LXFcER/8QWiwpoY902pVEqgTXA==",
+ "requires": {
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.0",
+ "@babel/types": "^7.13.0"
+ }
+ },
+ "@babel/helpers": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.13.17.tgz",
+ "integrity": "sha512-Eal4Gce4kGijo1/TGJdqp3WuhllaMLSrW6XcL0ulyUAQOuxHcCafZE8KHg9857gcTehsm/v7RcOx2+jp0Ryjsg==",
+ "requires": {
+ "@babel/template": "^7.12.13",
+ "@babel/traverse": "^7.13.17",
+ "@babel/types": "^7.13.17"
+ }
+ },
+ "@babel/highlight": {
+ "version": "7.13.10",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.13.10.tgz",
+ "integrity": "sha512-5aPpe5XQPzflQrFwL1/QoeHkP2MsA4JCntcXHRhEsdsfPVkvPi2w7Qix4iV7t5S/oC9OodGrggd8aco1g3SZFg==",
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "chalk": "^2.0.0",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "@babel/parser": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.13.16.tgz",
+ "integrity": "sha512-6bAg36mCwuqLO0hbR+z7PHuqWiCeP7Dzg73OpQwsAB1Eb8HnGEz5xYBzCfbu+YjoaJsJs+qheDxVAuqbt3ILEw=="
+ },
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.13.12.tgz",
+ "integrity": "sha512-d0u3zWKcoZf379fOeJdr1a5WPDny4aOFZ6hlfKivgK0LY7ZxNfoaHL2fWwdGtHyVvra38FC+HVYkO+byfSA8AQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1",
+ "@babel/plugin-proposal-optional-chaining": "^7.13.12"
+ }
+ },
+ "@babel/plugin-proposal-async-generator-functions": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.13.15.tgz",
+ "integrity": "sha512-VapibkWzFeoa6ubXy/NgV5U2U4MVnUlvnx6wo1XhlsaTrLYWE0UFpDQsVrmn22q5CzeloqJ8gEMHSKxuee6ZdA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-remap-async-to-generator": "^7.13.0",
+ "@babel/plugin-syntax-async-generators": "^7.8.4"
+ }
+ },
+ "@babel/plugin-proposal-class-properties": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz",
+ "integrity": "sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg==",
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-proposal-decorators": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.13.15.tgz",
+ "integrity": "sha512-ibAMAqUm97yzi+LPgdr5Nqb9CMkeieGHvwPg1ywSGjZrZHQEGqE01HmOio8kxRpA/+VtOHouIVy2FMpBbtltjA==",
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.13.11",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-decorators": "^7.12.13"
+ }
+ },
+ "@babel/plugin-proposal-dynamic-import": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.13.8.tgz",
+ "integrity": "sha512-ONWKj0H6+wIRCkZi9zSbZtE/r73uOhMVHh256ys0UzfM7I3d4n+spZNWjOnJv2gzopumP2Wxi186vI8N0Y2JyQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-export-namespace-from": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.13.tgz",
+ "integrity": "sha512-INAgtFo4OnLN3Y/j0VwAgw3HDXcDtX+C/erMvWzuV9v71r7urb6iyMXu7eM9IgLr1ElLlOkaHjJ0SbCmdOQ3Iw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-json-strings": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.13.8.tgz",
+ "integrity": "sha512-w4zOPKUFPX1mgvTmL/fcEqy34hrQ1CRcGxdphBc6snDnnqJ47EZDIyop6IwXzAC8G916hsIuXB2ZMBCExC5k7Q==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-json-strings": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-logical-assignment-operators": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.13.8.tgz",
+ "integrity": "sha512-aul6znYB4N4HGweImqKn59Su9RS8lbUIqxtXTOcAGtNIDczoEFv+l1EhmX8rUBp3G1jMjKJm8m0jXVp63ZpS4A==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4"
+ }
+ },
+ "@babel/plugin-proposal-nullish-coalescing-operator": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.13.8.tgz",
+ "integrity": "sha512-iePlDPBn//UhxExyS9KyeYU7RM9WScAG+D3Hhno0PLJebAEpDZMocbDe64eqynhNAnwz/vZoL/q/QB2T1OH39A==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-numeric-separator": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.13.tgz",
+ "integrity": "sha512-O1jFia9R8BUCl3ZGB7eitaAPu62TXJRHn7rh+ojNERCFyqRwJMTmhz+tJ+k0CwI6CLjX/ee4qW74FSqlq9I35w==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4"
+ }
+ },
+ "@babel/plugin-proposal-object-rest-spread": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz",
+ "integrity": "sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g==",
+ "requires": {
+ "@babel/compat-data": "^7.13.8",
+ "@babel/helper-compilation-targets": "^7.13.8",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-transform-parameters": "^7.13.0"
+ }
+ },
+ "@babel/plugin-proposal-optional-catch-binding": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.13.8.tgz",
+ "integrity": "sha512-0wS/4DUF1CuTmGo+NiaHfHcVSeSLj5S3e6RivPTg/2k3wOv3jO35tZ6/ZWsQhQMvdgI7CwphjQa/ccarLymHVA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-optional-chaining": {
+ "version": "7.13.12",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.13.12.tgz",
+ "integrity": "sha512-fcEdKOkIB7Tf4IxrgEVeFC4zeJSTr78no9wTdBuZZbqF64kzllU0ybo2zrzm7gUQfxGhBgq4E39oRs8Zx/RMYQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3"
+ }
+ },
+ "@babel/plugin-proposal-private-methods": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.13.0.tgz",
+ "integrity": "sha512-MXyyKQd9inhx1kDYPkFRVOBXQ20ES8Pto3T7UZ92xj2mY0EVD8oAVzeyYuVfy/mxAdTSIayOvg+aVzcHV2bn6Q==",
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-proposal-unicode-property-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.13.tgz",
+ "integrity": "sha512-XyJmZidNfofEkqFV5VC/bLabGmO5QzenPO/YOfGuEbgU+2sSwMmio3YLb4WtBgcmmdwZHyVyv8on77IUjQ5Gvg==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-syntax-decorators": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz",
+ "integrity": "sha512-Rw6aIXGuqDLr6/LoBBYE57nKOzQpz/aDkKlMqEwH+Vp0MXbG6H/TfRjaY343LKxzAKAMXIHsQ8JzaZKuDZ9MwA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-syntax-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-export-namespace-from": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz",
+ "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-jsx": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz",
+ "integrity": "sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-top-level-await": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.13.tgz",
+ "integrity": "sha512-A81F9pDwyS7yM//KwbCSDqy3Uj4NMIurtplxphWxoYtNPov7cJsDkAFNNyVlIZ3jwGycVsurZ+LtOA8gZ376iQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-arrow-functions": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz",
+ "integrity": "sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-async-to-generator": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.13.0.tgz",
+ "integrity": "sha512-3j6E004Dx0K3eGmhxVJxwwI89CTJrce7lg3UrtFuDAVQ/2+SJ/h/aSFOeE6/n0WB1GsOffsJp6MnPQNQ8nmwhg==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-remap-async-to-generator": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-block-scoped-functions": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz",
+ "integrity": "sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-block-scoping": {
+ "version": "7.13.16",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.13.16.tgz",
+ "integrity": "sha512-ad3PHUxGnfWF4Efd3qFuznEtZKoBp0spS+DgqzVzRPV7urEBvPLue3y2j80w4Jf2YLzZHj8TOv/Lmvdmh3b2xg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-classes": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz",
+ "integrity": "sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.12.13",
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-optimise-call-expression": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-replace-supers": "^7.13.0",
+ "@babel/helper-split-export-declaration": "^7.12.13",
+ "globals": "^11.1.0"
+ }
+ },
+ "@babel/plugin-transform-computed-properties": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz",
+ "integrity": "sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-destructuring": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.17.tgz",
+ "integrity": "sha512-UAUqiLv+uRLO+xuBKKMEpC+t7YRNVRqBsWWq1yKXbBZBje/t3IXCiSinZhjn/DC3qzBfICeYd2EFGEbHsh5RLA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-dotall-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.13.tgz",
+ "integrity": "sha512-foDrozE65ZFdUC2OfgeOCrEPTxdB3yjqxpXh8CH+ipd9CHd4s/iq81kcUpyH8ACGNEPdFqbtzfgzbT/ZGlbDeQ==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-duplicate-keys": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.13.tgz",
+ "integrity": "sha512-NfADJiiHdhLBW3pulJlJI2NB0t4cci4WTZ8FtdIuNc2+8pslXdPtRRAEWqUY+m9kNOk2eRYbTAOipAxlrOcwwQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-exponentiation-operator": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.13.tgz",
+ "integrity": "sha512-fbUelkM1apvqez/yYx1/oICVnGo2KM5s63mhGylrmXUxK/IAXSIf87QIxVfZldWf4QsOafY6vV3bX8aMHSvNrA==",
+ "requires": {
+ "@babel/helper-builder-binary-assignment-operator-visitor": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-for-of": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz",
+ "integrity": "sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-function-name": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz",
+ "integrity": "sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ==",
+ "requires": {
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-literals": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz",
+ "integrity": "sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-member-expression-literals": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz",
+ "integrity": "sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-modules-amd": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.13.0.tgz",
+ "integrity": "sha512-EKy/E2NHhY/6Vw5d1k3rgoobftcNUmp9fGjb9XZwQLtTctsRBOTRO7RHHxfIky1ogMN5BxN7p9uMA3SzPfotMQ==",
+ "requires": {
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "babel-plugin-dynamic-import-node": "^2.3.3"
+ }
+ },
+ "@babel/plugin-transform-modules-commonjs": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz",
+ "integrity": "sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw==",
+ "requires": {
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-simple-access": "^7.12.13",
+ "babel-plugin-dynamic-import-node": "^2.3.3"
+ }
+ },
+ "@babel/plugin-transform-modules-systemjs": {
+ "version": "7.13.8",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.13.8.tgz",
+ "integrity": "sha512-hwqctPYjhM6cWvVIlOIe27jCIBgHCsdH2xCJVAYQm7V5yTMoilbVMi9f6wKg0rpQAOn6ZG4AOyvCqFF/hUh6+A==",
+ "requires": {
+ "@babel/helper-hoist-variables": "^7.13.0",
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "babel-plugin-dynamic-import-node": "^2.3.3"
+ }
+ },
+ "@babel/plugin-transform-modules-umd": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.13.0.tgz",
+ "integrity": "sha512-D/ILzAh6uyvkWjKKyFE/W0FzWwasv6vPTSqPcjxFqn6QpX3u8DjRVliq4F2BamO2Wee/om06Vyy+vPkNrd4wxw==",
+ "requires": {
+ "@babel/helper-module-transforms": "^7.13.0",
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-named-capturing-groups-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.13.tgz",
+ "integrity": "sha512-Xsm8P2hr5hAxyYblrfACXpQKdQbx4m2df9/ZZSQ8MAhsadw06+jW7s9zsSw6he+mJZXRlVMyEnVktJo4zjk1WA==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-new-target": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.13.tgz",
+ "integrity": "sha512-/KY2hbLxrG5GTQ9zzZSc3xWiOy379pIETEhbtzwZcw9rvuaVV4Fqy7BYGYOWZnaoXIQYbbJ0ziXLa/sKcGCYEQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-object-super": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz",
+ "integrity": "sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13",
+ "@babel/helper-replace-supers": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-parameters": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz",
+ "integrity": "sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-property-literals": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz",
+ "integrity": "sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-regenerator": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.13.15.tgz",
+ "integrity": "sha512-Bk9cOLSz8DiurcMETZ8E2YtIVJbFCPGW28DJWUakmyVWtQSm6Wsf0p4B4BfEr/eL2Nkhe/CICiUiMOCi1TPhuQ==",
+ "requires": {
+ "regenerator-transform": "^0.14.2"
+ }
+ },
+ "@babel/plugin-transform-reserved-words": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.13.tgz",
+ "integrity": "sha512-xhUPzDXxZN1QfiOy/I5tyye+TRz6lA7z6xaT4CLOjPRMVg1ldRf0LHw0TDBpYL4vG78556WuHdyO9oi5UmzZBg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-runtime": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.15.tgz",
+ "integrity": "sha512-d+ezl76gx6Jal08XngJUkXM4lFXK/5Ikl9Mh4HKDxSfGJXmZ9xG64XT2oivBzfxb/eQ62VfvoMkaCZUKJMVrBA==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.13.12",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "babel-plugin-polyfill-corejs2": "^0.2.0",
+ "babel-plugin-polyfill-corejs3": "^0.2.0",
+ "babel-plugin-polyfill-regenerator": "^0.2.0",
+ "semver": "^6.3.0"
+ }
+ },
+ "@babel/plugin-transform-shorthand-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz",
+ "integrity": "sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-spread": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz",
+ "integrity": "sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1"
+ }
+ },
+ "@babel/plugin-transform-sticky-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.13.tgz",
+ "integrity": "sha512-Jc3JSaaWT8+fr7GRvQP02fKDsYk4K/lYwWq38r/UGfaxo89ajud321NH28KRQ7xy1Ybc0VUE5Pz8psjNNDUglg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-template-literals": {
+ "version": "7.13.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz",
+ "integrity": "sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.13.0"
+ }
+ },
+ "@babel/plugin-transform-typeof-symbol": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.13.tgz",
+ "integrity": "sha512-eKv/LmUJpMnu4npgfvs3LiHhJua5fo/CysENxa45YCQXZwKnGCQKAg87bvoqSW1fFT+HA32l03Qxsm8ouTY3ZQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-unicode-escapes": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.13.tgz",
+ "integrity": "sha512-0bHEkdwJ/sN/ikBHfSmOXPypN/beiGqjo+o4/5K+vxEFNPRPdImhviPakMKG4x96l85emoa0Z6cDflsdBusZbw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-transform-unicode-regex": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.13.tgz",
+ "integrity": "sha512-mDRzSNY7/zopwisPZ5kM9XKCfhchqIYwAKRERtEnhYscZB79VRekuRSoYbN0+KVe3y8+q1h6A4svXtP7N+UoCA==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.12.13",
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/preset-env": {
+ "version": "7.13.15",
+ "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.13.15.tgz",
+ "integrity": "sha512-D4JAPMXcxk69PKe81jRJ21/fP/uYdcTZ3hJDF5QX2HSI9bBxxYw/dumdR6dGumhjxlprHPE4XWoPaqzZUVy2MA==",
+ "requires": {
+ "@babel/compat-data": "^7.13.15",
+ "@babel/helper-compilation-targets": "^7.13.13",
+ "@babel/helper-plugin-utils": "^7.13.0",
+ "@babel/helper-validator-option": "^7.12.17",
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.13.12",
+ "@babel/plugin-proposal-async-generator-functions": "^7.13.15",
+ "@babel/plugin-proposal-class-properties": "^7.13.0",
+ "@babel/plugin-proposal-dynamic-import": "^7.13.8",
+ "@babel/plugin-proposal-export-namespace-from": "^7.12.13",
+ "@babel/plugin-proposal-json-strings": "^7.13.8",
+ "@babel/plugin-proposal-logical-assignment-operators": "^7.13.8",
+ "@babel/plugin-proposal-nullish-coalescing-operator": "^7.13.8",
+ "@babel/plugin-proposal-numeric-separator": "^7.12.13",
+ "@babel/plugin-proposal-object-rest-spread": "^7.13.8",
+ "@babel/plugin-proposal-optional-catch-binding": "^7.13.8",
+ "@babel/plugin-proposal-optional-chaining": "^7.13.12",
+ "@babel/plugin-proposal-private-methods": "^7.13.0",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.12.13",
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-top-level-await": "^7.12.13",
+ "@babel/plugin-transform-arrow-functions": "^7.13.0",
+ "@babel/plugin-transform-async-to-generator": "^7.13.0",
+ "@babel/plugin-transform-block-scoped-functions": "^7.12.13",
+ "@babel/plugin-transform-block-scoping": "^7.12.13",
+ "@babel/plugin-transform-classes": "^7.13.0",
+ "@babel/plugin-transform-computed-properties": "^7.13.0",
+ "@babel/plugin-transform-destructuring": "^7.13.0",
+ "@babel/plugin-transform-dotall-regex": "^7.12.13",
+ "@babel/plugin-transform-duplicate-keys": "^7.12.13",
+ "@babel/plugin-transform-exponentiation-operator": "^7.12.13",
+ "@babel/plugin-transform-for-of": "^7.13.0",
+ "@babel/plugin-transform-function-name": "^7.12.13",
+ "@babel/plugin-transform-literals": "^7.12.13",
+ "@babel/plugin-transform-member-expression-literals": "^7.12.13",
+ "@babel/plugin-transform-modules-amd": "^7.13.0",
+ "@babel/plugin-transform-modules-commonjs": "^7.13.8",
+ "@babel/plugin-transform-modules-systemjs": "^7.13.8",
+ "@babel/plugin-transform-modules-umd": "^7.13.0",
+ "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.13",
+ "@babel/plugin-transform-new-target": "^7.12.13",
+ "@babel/plugin-transform-object-super": "^7.12.13",
+ "@babel/plugin-transform-parameters": "^7.13.0",
+ "@babel/plugin-transform-property-literals": "^7.12.13",
+ "@babel/plugin-transform-regenerator": "^7.13.15",
+ "@babel/plugin-transform-reserved-words": "^7.12.13",
+ "@babel/plugin-transform-shorthand-properties": "^7.12.13",
+ "@babel/plugin-transform-spread": "^7.13.0",
+ "@babel/plugin-transform-sticky-regex": "^7.12.13",
+ "@babel/plugin-transform-template-literals": "^7.13.0",
+ "@babel/plugin-transform-typeof-symbol": "^7.12.13",
+ "@babel/plugin-transform-unicode-escapes": "^7.12.13",
+ "@babel/plugin-transform-unicode-regex": "^7.12.13",
+ "@babel/preset-modules": "^0.1.4",
+ "@babel/types": "^7.13.14",
+ "babel-plugin-polyfill-corejs2": "^0.2.0",
+ "babel-plugin-polyfill-corejs3": "^0.2.0",
+ "babel-plugin-polyfill-regenerator": "^0.2.0",
+ "core-js-compat": "^3.9.0",
+ "semver": "^6.3.0"
+ }
+ },
+ "@babel/preset-modules": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz",
+ "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.4.4",
+ "@babel/plugin-transform-dotall-regex": "^7.4.4",
+ "@babel/types": "^7.4.4",
+ "esutils": "^2.0.2"
+ }
+ },
+ "@babel/runtime": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.13.17.tgz",
+ "integrity": "sha512-NCdgJEelPTSh+FEFylhnP1ylq848l1z9t9N0j1Lfbcw0+KXGjsTvUmkxy+voLLXB5SOKMbLLx4jxYliGrYQseA==",
+ "requires": {
+ "regenerator-runtime": "^0.13.4"
+ }
+ },
+ "@babel/template": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.12.13.tgz",
+ "integrity": "sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA==",
+ "requires": {
+ "@babel/code-frame": "^7.12.13",
+ "@babel/parser": "^7.12.13",
+ "@babel/types": "^7.12.13"
+ }
+ },
+ "@babel/traverse": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.13.17.tgz",
+ "integrity": "sha512-BMnZn0R+X6ayqm3C3To7o1j7Q020gWdqdyP50KEoVqaCO2c/Im7sYZSmVgvefp8TTMQ+9CtwuBp0Z1CZ8V3Pvg==",
+ "requires": {
+ "@babel/code-frame": "^7.12.13",
+ "@babel/generator": "^7.13.16",
+ "@babel/helper-function-name": "^7.12.13",
+ "@babel/helper-split-export-declaration": "^7.12.13",
+ "@babel/parser": "^7.13.16",
+ "@babel/types": "^7.13.17",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ }
+ }
+ },
+ "@babel/types": {
+ "version": "7.13.17",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.13.17.tgz",
+ "integrity": "sha512-RawydLgxbOPDlTLJNtoIypwdmAy//uQIzlKt2+iBiJaRlVuI6QLUxVAyWGNfOzp8Yu4L4lLIacoCyTNtpb4wiA==",
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.12.11",
+ "to-fast-properties": "^2.0.0"
+ }
+ },
+ "@cosmos-ui/vue": {
+ "version": "0.35.0",
+ "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.35.0.tgz",
+ "integrity": "sha512-WTCJBWSoiDckgvXWPByKkQ7ZVSf9LSMsizIAHBnsi0Zp3GOaEqPNBpgjGt2JEhpDPr7+YwyIgmqQ0S3D+Hq5iQ==",
+ "requires": {
+ "algoliasearch": "^4.1.0",
+ "axios": "^0.19.2",
+ "clipboard-copy": "^3.1.0",
+ "fuse.js": "^3.4.6",
+ "hotkeys-js": "^3.7.3",
+ "js-base64": "^2.5.2",
+ "lodash": "^4.17.15",
+ "markdown-it": "^10.0.0",
+ "prismjs": "^1.19.0",
+ "querystring": "^0.2.0",
+ "tiny-cookie": "^2.3.1",
+ "vue": "^2.6.10"
+ },
+ "dependencies": {
+ "axios": {
+ "version": "0.19.2",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz",
+ "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==",
+ "requires": {
+ "follow-redirects": "1.5.10"
+ }
+ },
+ "debug": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+ "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "entities": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz",
+ "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ=="
+ },
+ "follow-redirects": {
+ "version": "1.5.10",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.10.tgz",
+ "integrity": "sha512-0V5l4Cizzvqt5D44aTXbFZz+FtyXV1vrDN6qrelxtfYQKW0KO0W2T/hkE8xvGa/540LkZlkaUjO4ailYTFtHVQ==",
+ "requires": {
+ "debug": "=3.1.0"
+ }
+ },
+ "linkify-it": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz",
+ "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==",
+ "requires": {
+ "uc.micro": "^1.0.1"
+ }
+ },
+ "markdown-it": {
+ "version": "10.0.0",
+ "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz",
+ "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==",
+ "requires": {
+ "argparse": "^1.0.7",
+ "entities": "~2.0.0",
+ "linkify-it": "^2.0.0",
+ "mdurl": "^1.0.1",
+ "uc.micro": "^1.0.5"
+ }
+ }
+ }
+ },
+ "@mrmlnc/readdir-enhanced": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz",
+ "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==",
+ "requires": {
+ "call-me-maybe": "^1.0.1",
+ "glob-to-regexp": "^0.3.0"
+ }
+ },
+ "@nodelib/fs.stat": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz",
+ "integrity": "sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw=="
+ },
+ "@sindresorhus/is": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz",
+ "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ=="
+ },
+ "@szmarczak/http-timer": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz",
+ "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==",
+ "requires": {
+ "defer-to-connect": "^1.0.1"
+ }
+ },
+ "@types/glob": {
+ "version": "7.1.3",
+ "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz",
+ "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==",
+ "requires": {
+ "@types/minimatch": "*",
+ "@types/node": "*"
+ }
+ },
+ "@types/json-schema": {
+ "version": "7.0.7",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.7.tgz",
+ "integrity": "sha512-cxWFQVseBm6O9Gbw1IWb8r6OS4OhSt3hPZLkFApLjM8TEXROBuQGLAH2i2gZpcXdLBIrpXuTDhH7Vbm1iXmNGA=="
+ },
+ "@types/minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-1z8k4wzFnNjVK/tlxvrWuK5WMt6mydWWP7+zvH5eFep4oj+UkrfiJTRtjCeBXNpwaA/FYqqtb4/QS4ianFpIRA=="
+ },
+ "@types/node": {
+ "version": "15.0.1",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-15.0.1.tgz",
+ "integrity": "sha512-TMkXt0Ck1y0KKsGr9gJtWGjttxlZnnvDtphxUOSd0bfaR6Q1jle+sPvrzNR1urqYTWMinoKvjKfXUGsumaO1PA=="
+ },
+ "@types/q": {
+ "version": "1.5.4",
+ "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz",
+ "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug=="
+ },
+ "@vue/babel-helper-vue-jsx-merge-props": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz",
+ "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA=="
+ },
+ "@vue/babel-helper-vue-transform-on": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.0.2.tgz",
+ "integrity": "sha512-hz4R8tS5jMn8lDq6iD+yWL6XNB699pGIVLk7WSJnn1dbpjaazsjZQkieJoRX6gW5zpYSCFqQ7jUquPNY65tQYA=="
+ },
+ "@vue/babel-plugin-jsx": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.5.tgz",
+ "integrity": "sha512-Jtipy7oI0am5e1q5Ahunm/cCcCh5ssf5VkMQsLR383S3un5Qh7NBfxgSK9kmWf4IXJEhDeYp9kHv8G/EnMai9A==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.0.0",
+ "@babel/plugin-syntax-jsx": "^7.0.0",
+ "@babel/template": "^7.0.0",
+ "@babel/traverse": "^7.0.0",
+ "@babel/types": "^7.0.0",
+ "@vue/babel-helper-vue-transform-on": "^1.0.2",
+ "camelcase": "^6.0.0",
+ "html-tags": "^3.1.0",
+ "svg-tags": "^1.0.0"
+ }
+ },
+ "@vue/babel-plugin-transform-vue-jsx": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz",
+ "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.0.0",
+ "@babel/plugin-syntax-jsx": "^7.2.0",
+ "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1",
+ "html-tags": "^2.0.0",
+ "lodash.kebabcase": "^4.1.1",
+ "svg-tags": "^1.0.0"
+ },
+ "dependencies": {
+ "html-tags": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz",
+ "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos="
+ }
+ }
+ },
+ "@vue/babel-preset-app": {
+ "version": "4.5.12",
+ "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.12.tgz",
+ "integrity": "sha512-8q67ORQ9O0Ms0nlqsXTVhaBefRBaLrzPxOewAZhdcO7onHwcO5/wRdWtHhZgfpCZlhY7NogkU16z3WnorSSkEA==",
+ "requires": {
+ "@babel/helper-compilation-targets": "^7.9.6",
+ "@babel/helper-module-imports": "^7.8.3",
+ "@babel/plugin-proposal-class-properties": "^7.8.3",
+ "@babel/plugin-proposal-decorators": "^7.8.3",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-jsx": "^7.8.3",
+ "@babel/plugin-transform-runtime": "^7.11.0",
+ "@babel/preset-env": "^7.11.0",
+ "@babel/runtime": "^7.11.0",
+ "@vue/babel-plugin-jsx": "^1.0.3",
+ "@vue/babel-preset-jsx": "^1.2.4",
+ "babel-plugin-dynamic-import-node": "^2.3.3",
+ "core-js-compat": "^3.6.5",
+ "semver": "^6.1.0"
+ }
+ },
+ "@vue/babel-preset-jsx": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz",
+ "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==",
+ "requires": {
+ "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1",
+ "@vue/babel-plugin-transform-vue-jsx": "^1.2.1",
+ "@vue/babel-sugar-composition-api-inject-h": "^1.2.1",
+ "@vue/babel-sugar-composition-api-render-instance": "^1.2.4",
+ "@vue/babel-sugar-functional-vue": "^1.2.2",
+ "@vue/babel-sugar-inject-h": "^1.2.2",
+ "@vue/babel-sugar-v-model": "^1.2.3",
+ "@vue/babel-sugar-v-on": "^1.2.3"
+ }
+ },
+ "@vue/babel-sugar-composition-api-inject-h": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz",
+ "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==",
+ "requires": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ }
+ },
+ "@vue/babel-sugar-composition-api-render-instance": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz",
+ "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==",
+ "requires": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ }
+ },
+ "@vue/babel-sugar-functional-vue": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz",
+ "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==",
+ "requires": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ }
+ },
+ "@vue/babel-sugar-inject-h": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz",
+ "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==",
+ "requires": {
+ "@babel/plugin-syntax-jsx": "^7.2.0"
+ }
+ },
+ "@vue/babel-sugar-v-model": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz",
+ "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==",
+ "requires": {
+ "@babel/plugin-syntax-jsx": "^7.2.0",
+ "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1",
+ "@vue/babel-plugin-transform-vue-jsx": "^1.2.1",
+ "camelcase": "^5.0.0",
+ "html-tags": "^2.0.0",
+ "svg-tags": "^1.0.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="
+ },
+ "html-tags": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz",
+ "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos="
+ }
+ }
+ },
+ "@vue/babel-sugar-v-on": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz",
+ "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==",
+ "requires": {
+ "@babel/plugin-syntax-jsx": "^7.2.0",
+ "@vue/babel-plugin-transform-vue-jsx": "^1.2.1",
+ "camelcase": "^5.0.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="
+ }
+ }
+ },
+ "@vue/component-compiler-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz",
+ "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==",
+ "requires": {
+ "consolidate": "^0.15.1",
+ "hash-sum": "^1.0.2",
+ "lru-cache": "^4.1.2",
+ "merge-source-map": "^1.1.0",
+ "postcss": "^7.0.14",
+ "postcss-selector-parser": "^6.0.2",
+ "prettier": "^1.18.2",
+ "source-map": "~0.6.1",
+ "vue-template-es2015-compiler": "^1.9.0"
+ },
+ "dependencies": {
+ "lru-cache": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz",
+ "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==",
+ "requires": {
+ "pseudomap": "^1.0.2",
+ "yallist": "^2.1.2"
+ }
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ },
+ "yallist": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz",
+ "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI="
+ }
+ }
+ },
+ "@vuepress/core": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.8.2.tgz",
+ "integrity": "sha512-lh9BLC06k9s0wxTuWtCkiNj49fkbW87enp0XSrFZHEoyDGSGndQjZmMMErcHc5Hx7nrW1nzc33sPH1NNtJl0hw==",
+ "requires": {
+ "@babel/core": "^7.8.4",
+ "@vue/babel-preset-app": "^4.1.2",
+ "@vuepress/markdown": "1.8.2",
+ "@vuepress/markdown-loader": "1.8.2",
+ "@vuepress/plugin-last-updated": "1.8.2",
+ "@vuepress/plugin-register-components": "1.8.2",
+ "@vuepress/shared-utils": "1.8.2",
+ "autoprefixer": "^9.5.1",
+ "babel-loader": "^8.0.4",
+ "cache-loader": "^3.0.0",
+ "chokidar": "^2.0.3",
+ "connect-history-api-fallback": "^1.5.0",
+ "copy-webpack-plugin": "^5.0.2",
+ "core-js": "^3.6.4",
+ "cross-spawn": "^6.0.5",
+ "css-loader": "^2.1.1",
+ "file-loader": "^3.0.1",
+ "js-yaml": "^3.13.1",
+ "lru-cache": "^5.1.1",
+ "mini-css-extract-plugin": "0.6.0",
+ "optimize-css-assets-webpack-plugin": "^5.0.1",
+ "portfinder": "^1.0.13",
+ "postcss-loader": "^3.0.0",
+ "postcss-safe-parser": "^4.0.1",
+ "toml": "^3.0.0",
+ "url-loader": "^1.0.1",
+ "vue": "^2.6.10",
+ "vue-loader": "^15.7.1",
+ "vue-router": "^3.4.5",
+ "vue-server-renderer": "^2.6.10",
+ "vue-template-compiler": "^2.6.10",
+ "vuepress-html-webpack-plugin": "^3.2.0",
+ "vuepress-plugin-container": "^2.0.2",
+ "webpack": "^4.8.1",
+ "webpack-chain": "^6.0.0",
+ "webpack-dev-server": "^3.5.1",
+ "webpack-merge": "^4.1.2",
+ "webpackbar": "3.2.0"
+ }
+ },
+ "@vuepress/markdown": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.8.2.tgz",
+ "integrity": "sha512-zznBHVqW+iBkznF/BO/GY9RFu53khyl0Ey0PnGqvwCJpRLNan6y5EXgYumtjw2GSYn5nDTTALYxtyNBdz64PKg==",
+ "requires": {
+ "@vuepress/shared-utils": "1.8.2",
+ "markdown-it": "^8.4.1",
+ "markdown-it-anchor": "^5.0.2",
+ "markdown-it-chain": "^1.3.0",
+ "markdown-it-emoji": "^1.4.0",
+ "markdown-it-table-of-contents": "^0.4.0",
+ "prismjs": "^1.13.0"
+ },
+ "dependencies": {
+ "entities": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz",
+ "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w=="
+ },
+ "linkify-it": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz",
+ "integrity": "sha512-GnAl/knGn+i1U/wjBz3akz2stz+HrHLsxMwHQGofCDfPvlf+gDKN58UtfmUquTY4/MXeE2x7k19KQmeoZi94Iw==",
+ "requires": {
+ "uc.micro": "^1.0.1"
+ }
+ },
+ "markdown-it": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-8.4.2.tgz",
+ "integrity": "sha512-GcRz3AWTqSUphY3vsUqQSFMbgR38a4Lh3GWlHRh/7MRwz8mcu9n2IO7HOh+bXHrR9kOPDl5RNCaEsrneb+xhHQ==",
+ "requires": {
+ "argparse": "^1.0.7",
+ "entities": "~1.1.1",
+ "linkify-it": "^2.0.0",
+ "mdurl": "^1.0.1",
+ "uc.micro": "^1.0.5"
+ }
+ }
+ }
+ },
+ "@vuepress/markdown-loader": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.8.2.tgz",
+ "integrity": "sha512-mWzFXikCUcAN/chpKkqZpRYKdo0312hMv8cBea2hvrJYV6y4ODB066XKvXN8JwOcxuCjxWYJkhWGr+pXq1oTtw==",
+ "requires": {
+ "@vuepress/markdown": "1.8.2",
+ "loader-utils": "^1.1.0",
+ "lru-cache": "^5.1.1"
+ }
+ },
+ "@vuepress/plugin-active-header-links": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.8.2.tgz",
+ "integrity": "sha512-JmXAQg8D7J8mcKe2Ue3BZ9dOCzJMJXP4Cnkkc/IrqfDg0ET0l96gYWZohCqlvRIWt4f0VPiFAO4FLYrW+hko+g==",
+ "requires": {
+ "lodash.debounce": "^4.0.8"
+ }
+ },
+ "@vuepress/plugin-google-analytics": {
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz",
+ "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ=="
+ },
+ "@vuepress/plugin-last-updated": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.8.2.tgz",
+ "integrity": "sha512-pYIRZi52huO9b6HY3JQNPKNERCLzMHejjBRt9ekdnJ1xhLs4MmRvt37BoXjI/qzvXkYtr7nmGgnKThNBVRTZuA==",
+ "requires": {
+ "cross-spawn": "^6.0.5"
+ }
+ },
+ "@vuepress/plugin-nprogress": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.8.2.tgz",
+ "integrity": "sha512-3TOBee2NM3WLr1tdjDTGfrAMggjN+OlEPyKyv8FqThsVkDYhw48O3HwqlThp9KX7UbL3ExxIFBwWRFLC+kYrdw==",
+ "requires": {
+ "nprogress": "^0.2.0"
+ }
+ },
+ "@vuepress/plugin-register-components": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.8.2.tgz",
+ "integrity": "sha512-6SUq3nHFMEh9qKFnjA8QnrNxj0kLs7+Gspq1OBU8vtu0NQmSvLFZVaMV7pzT/9zN2nO5Pld5qhsUJv1g71MrEA==",
+ "requires": {
+ "@vuepress/shared-utils": "1.8.2"
+ }
+ },
+ "@vuepress/plugin-search": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.8.2.tgz",
+ "integrity": "sha512-JrSJr9o0Kar14lVtZ4wfw39pplxvvMh8vDBD9oW09a+6Zi/4bySPGdcdaqdqGW+OHSiZNvG+6uyfKSBBBqF6PA=="
+ },
+ "@vuepress/shared-utils": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.8.2.tgz",
+ "integrity": "sha512-6kGubc7iBDWruEBUU7yR+sQ++SOhMuvKWvWeTZJKRZedthycdzYz7QVpua0FaZSAJm5/dIt8ymU4WQvxTtZgTQ==",
+ "requires": {
+ "chalk": "^2.3.2",
+ "escape-html": "^1.0.3",
+ "fs-extra": "^7.0.1",
+ "globby": "^9.2.0",
+ "gray-matter": "^4.0.1",
+ "hash-sum": "^1.0.2",
+ "semver": "^6.0.0",
+ "toml": "^3.0.0",
+ "upath": "^1.1.0"
+ }
+ },
+ "@vuepress/theme-default": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.8.2.tgz",
+ "integrity": "sha512-rE7M1rs3n2xp4a/GrweO8EGwqFn3EA5gnFWdVmVIHyr7C1nix+EqjpPQF1SVWNnIrDdQuCw38PqS+oND1K2vYw==",
+ "requires": {
+ "@vuepress/plugin-active-header-links": "1.8.2",
+ "@vuepress/plugin-nprogress": "1.8.2",
+ "@vuepress/plugin-search": "1.8.2",
+ "docsearch.js": "^2.5.2",
+ "lodash": "^4.17.15",
+ "stylus": "^0.54.8",
+ "stylus-loader": "^3.0.2",
+ "vuepress-plugin-container": "^2.0.2",
+ "vuepress-plugin-smooth-scroll": "^0.0.3"
+ }
+ },
+ "@webassemblyjs/ast": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz",
+ "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==",
+ "requires": {
+ "@webassemblyjs/helper-module-context": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/wast-parser": "1.9.0"
+ }
+ },
+ "@webassemblyjs/floating-point-hex-parser": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz",
+ "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA=="
+ },
+ "@webassemblyjs/helper-api-error": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz",
+ "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw=="
+ },
+ "@webassemblyjs/helper-buffer": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz",
+ "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA=="
+ },
+ "@webassemblyjs/helper-code-frame": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz",
+ "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==",
+ "requires": {
+ "@webassemblyjs/wast-printer": "1.9.0"
+ }
+ },
+ "@webassemblyjs/helper-fsm": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz",
+ "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw=="
+ },
+ "@webassemblyjs/helper-module-context": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz",
+ "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0"
+ }
+ },
+ "@webassemblyjs/helper-wasm-bytecode": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz",
+ "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw=="
+ },
+ "@webassemblyjs/helper-wasm-section": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz",
+ "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-buffer": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/wasm-gen": "1.9.0"
+ }
+ },
+ "@webassemblyjs/ieee754": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz",
+ "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==",
+ "requires": {
+ "@xtuc/ieee754": "^1.2.0"
+ }
+ },
+ "@webassemblyjs/leb128": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz",
+ "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==",
+ "requires": {
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webassemblyjs/utf8": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz",
+ "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w=="
+ },
+ "@webassemblyjs/wasm-edit": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz",
+ "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-buffer": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/helper-wasm-section": "1.9.0",
+ "@webassemblyjs/wasm-gen": "1.9.0",
+ "@webassemblyjs/wasm-opt": "1.9.0",
+ "@webassemblyjs/wasm-parser": "1.9.0",
+ "@webassemblyjs/wast-printer": "1.9.0"
+ }
+ },
+ "@webassemblyjs/wasm-gen": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz",
+ "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/ieee754": "1.9.0",
+ "@webassemblyjs/leb128": "1.9.0",
+ "@webassemblyjs/utf8": "1.9.0"
+ }
+ },
+ "@webassemblyjs/wasm-opt": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz",
+ "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-buffer": "1.9.0",
+ "@webassemblyjs/wasm-gen": "1.9.0",
+ "@webassemblyjs/wasm-parser": "1.9.0"
+ }
+ },
+ "@webassemblyjs/wasm-parser": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz",
+ "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-api-error": "1.9.0",
+ "@webassemblyjs/helper-wasm-bytecode": "1.9.0",
+ "@webassemblyjs/ieee754": "1.9.0",
+ "@webassemblyjs/leb128": "1.9.0",
+ "@webassemblyjs/utf8": "1.9.0"
+ }
+ },
+ "@webassemblyjs/wast-parser": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz",
+ "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/floating-point-hex-parser": "1.9.0",
+ "@webassemblyjs/helper-api-error": "1.9.0",
+ "@webassemblyjs/helper-code-frame": "1.9.0",
+ "@webassemblyjs/helper-fsm": "1.9.0",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webassemblyjs/wast-printer": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz",
+ "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/wast-parser": "1.9.0",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@xtuc/ieee754": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
+ "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA=="
+ },
+ "@xtuc/long": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
+ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ=="
+ },
+ "abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q=="
+ },
+ "accepts": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz",
+ "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==",
+ "requires": {
+ "mime-types": "~2.1.24",
+ "negotiator": "0.6.2"
+ }
+ },
+ "acorn": {
+ "version": "7.4.1",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
+ "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A=="
+ },
+ "agentkeepalive": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-2.2.0.tgz",
+ "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8="
+ },
+ "ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-errors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz",
+ "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==",
+ "requires": {}
+ },
+ "ajv-keywords": {
+ "version": "3.5.2",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz",
+ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==",
+ "requires": {}
+ },
+ "algoliasearch": {
+ "version": "4.9.1",
+ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.9.1.tgz",
+ "integrity": "sha512-EeJUYXzBEhZSsL6tXc3hseLBCtlNLa1MZ4mlMK6EeX38yRjY5vgnFcNNml6uUhlOjvheKxgkKRpPWkxgL8Cqkg==",
+ "requires": {
+ "@algolia/cache-browser-local-storage": "4.9.1",
+ "@algolia/cache-common": "4.9.1",
+ "@algolia/cache-in-memory": "4.9.1",
+ "@algolia/client-account": "4.9.1",
+ "@algolia/client-analytics": "4.9.1",
+ "@algolia/client-common": "4.9.1",
+ "@algolia/client-recommendation": "4.9.1",
+ "@algolia/client-search": "4.9.1",
+ "@algolia/logger-common": "4.9.1",
+ "@algolia/logger-console": "4.9.1",
+ "@algolia/requester-browser-xhr": "4.9.1",
+ "@algolia/requester-common": "4.9.1",
+ "@algolia/requester-node-http": "4.9.1",
+ "@algolia/transporter": "4.9.1"
+ }
+ },
+ "alphanum-sort": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/alphanum-sort/-/alphanum-sort-1.0.2.tgz",
+ "integrity": "sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM="
+ },
+ "ansi-align": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz",
+ "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==",
+ "requires": {
+ "string-width": "^3.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg=="
+ },
+ "emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8="
+ },
+ "string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "requires": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ }
+ }
+ },
+ "ansi-colors": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-3.2.4.tgz",
+ "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA=="
+ },
+ "ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "requires": {
+ "type-fest": "^0.21.3"
+ },
+ "dependencies": {
+ "type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w=="
+ }
+ }
+ },
+ "ansi-html": {
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/ansi-html/-/ansi-html-0.0.7.tgz",
+ "integrity": "sha1-gTWEAhliqenm/QOflA0S9WynhZ4="
+ },
+ "ansi-regex": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
+ "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg=="
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "anymatch": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz",
+ "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==",
+ "requires": {
+ "micromatch": "^3.1.4",
+ "normalize-path": "^2.1.1"
+ },
+ "dependencies": {
+ "normalize-path": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
+ "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=",
+ "requires": {
+ "remove-trailing-separator": "^1.0.1"
+ }
+ }
+ }
+ },
+ "aproba": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz",
+ "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw=="
+ },
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "arr-diff": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+ "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA="
+ },
+ "arr-flatten": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+ "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg=="
+ },
+ "arr-union": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ="
+ },
+ "array-flatten": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz",
+ "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ=="
+ },
+ "array-union": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz",
+ "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=",
+ "requires": {
+ "array-uniq": "^1.0.1"
+ }
+ },
+ "array-uniq": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz",
+ "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY="
+ },
+ "array-unique": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+ "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg="
+ },
+ "asap": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz",
+ "integrity": "sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY="
+ },
+ "asn1": {
+ "version": "0.2.4",
+ "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
+ "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
+ "requires": {
+ "safer-buffer": "~2.1.0"
+ }
+ },
+ "asn1.js": {
+ "version": "5.4.1",
+ "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz",
+ "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==",
+ "requires": {
+ "bn.js": "^4.0.0",
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0",
+ "safer-buffer": "^2.1.0"
+ },
+ "dependencies": {
+ "bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ }
+ }
+ },
+ "assert": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/assert/-/assert-1.5.0.tgz",
+ "integrity": "sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA==",
+ "requires": {
+ "object-assign": "^4.1.1",
+ "util": "0.10.3"
+ },
+ "dependencies": {
+ "inherits": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.1.tgz",
+ "integrity": "sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE="
+ },
+ "util": {
+ "version": "0.10.3",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.10.3.tgz",
+ "integrity": "sha1-evsa/lCAUkZInj23/g7TeTNqwPk=",
+ "requires": {
+ "inherits": "2.0.1"
+ }
+ }
+ }
+ },
+ "assert-never": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.2.1.tgz",
+ "integrity": "sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw=="
+ },
+ "assert-plus": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
+ "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU="
+ },
+ "assign-symbols": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c="
+ },
+ "async": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz",
+ "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==",
+ "requires": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "async-each": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz",
+ "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ=="
+ },
+ "async-limiter": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz",
+ "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ=="
+ },
+ "asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k="
+ },
+ "atob": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+ "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg=="
+ },
+ "autocomplete.js": {
+ "version": "0.36.0",
+ "resolved": "https://registry.npmjs.org/autocomplete.js/-/autocomplete.js-0.36.0.tgz",
+ "integrity": "sha512-jEwUXnVMeCHHutUt10i/8ZiRaCb0Wo+ZyKxeGsYwBDtw6EJHqEeDrq4UwZRD8YBSvp3g6klP678il2eeiVXN2Q==",
+ "requires": {
+ "immediate": "^3.2.3"
+ }
+ },
+ "autoprefixer": {
+ "version": "9.8.6",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz",
+ "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==",
+ "requires": {
+ "browserslist": "^4.12.0",
+ "caniuse-lite": "^1.0.30001109",
+ "colorette": "^1.2.1",
+ "normalize-range": "^0.1.2",
+ "num2fraction": "^1.2.2",
+ "postcss": "^7.0.32",
+ "postcss-value-parser": "^4.1.0"
+ }
+ },
+ "aws-sign2": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
+ "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg="
+ },
+ "aws4": {
+ "version": "1.11.0",
+ "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz",
+ "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA=="
+ },
+ "axios": {
+ "version": "0.21.1",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz",
+ "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==",
+ "requires": {
+ "follow-redirects": "^1.10.0"
+ }
+ },
+ "babel-loader": {
+ "version": "8.2.2",
+ "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.2.2.tgz",
+ "integrity": "sha512-JvTd0/D889PQBtUXJ2PXaKU/pjZDMtHA9V2ecm+eNRmmBCMR09a+fmpGTNwnJtFmFl5Ei7Vy47LjBb+L0wQ99g==",
+ "requires": {
+ "find-cache-dir": "^3.3.1",
+ "loader-utils": "^1.4.0",
+ "make-dir": "^3.1.0",
+ "schema-utils": "^2.6.5"
+ }
+ },
+ "babel-plugin-dynamic-import-node": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz",
+ "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==",
+ "requires": {
+ "object.assign": "^4.1.0"
+ }
+ },
+ "babel-plugin-polyfill-corejs2": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.2.0.tgz",
+ "integrity": "sha512-9bNwiR0dS881c5SHnzCmmGlMkJLl0OUZvxrxHo9w/iNoRuqaPjqlvBf4HrovXtQs/au5yKkpcdgfT1cC5PAZwg==",
+ "requires": {
+ "@babel/compat-data": "^7.13.11",
+ "@babel/helper-define-polyfill-provider": "^0.2.0",
+ "semver": "^6.1.1"
+ }
+ },
+ "babel-plugin-polyfill-corejs3": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.2.0.tgz",
+ "integrity": "sha512-zZyi7p3BCUyzNxLx8KV61zTINkkV65zVkDAFNZmrTCRVhjo1jAS+YLvDJ9Jgd/w2tsAviCwFHReYfxO3Iql8Yg==",
+ "requires": {
+ "@babel/helper-define-polyfill-provider": "^0.2.0",
+ "core-js-compat": "^3.9.1"
+ }
+ },
+ "babel-plugin-polyfill-regenerator": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.2.0.tgz",
+ "integrity": "sha512-J7vKbCuD2Xi/eEHxquHN14bXAW9CXtecwuLrOIDJtcZzTaPzV1VdEfoUf9AzcRBMolKUQKM9/GVojeh0hFiqMg==",
+ "requires": {
+ "@babel/helper-define-polyfill-provider": "^0.2.0"
+ }
+ },
+ "babel-walk": {
+ "version": "3.0.0-canary-5",
+ "resolved": "https://registry.npmjs.org/babel-walk/-/babel-walk-3.0.0-canary-5.tgz",
+ "integrity": "sha512-GAwkz0AihzY5bkwIY5QDR+LvsRQgB/B+1foMPvi0FZPMl5fjD7ICiznUiBdLYMH1QYe6vqu4gWYytZOccLouFw==",
+ "requires": {
+ "@babel/types": "^7.9.6"
+ }
+ },
+ "balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "base": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+ "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+ "requires": {
+ "cache-base": "^1.0.1",
+ "class-utils": "^0.3.5",
+ "component-emitter": "^1.2.1",
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.1",
+ "mixin-deep": "^1.2.0",
+ "pascalcase": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ }
+ }
+ },
+ "base64-js": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
+ "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="
+ },
+ "batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY="
+ },
+ "bcrypt-pbkdf": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
+ "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
+ "requires": {
+ "tweetnacl": "^0.14.3"
+ }
+ },
+ "big.js": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
+ "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ=="
+ },
+ "binary-extensions": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz",
+ "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw=="
+ },
+ "bindings": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
+ "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
+ "optional": true,
+ "requires": {
+ "file-uri-to-path": "1.0.0"
+ }
+ },
+ "bluebird": {
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz",
+ "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg=="
+ },
+ "bn.js": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.0.tgz",
+ "integrity": "sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw=="
+ },
+ "body-parser": {
+ "version": "1.19.0",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz",
+ "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==",
+ "requires": {
+ "bytes": "3.1.0",
+ "content-type": "~1.0.4",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "on-finished": "~2.3.0",
+ "qs": "6.7.0",
+ "raw-body": "2.4.0",
+ "type-is": "~1.6.17"
+ },
+ "dependencies": {
+ "bytes": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
+ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
+ },
+ "qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
+ }
+ }
+ },
+ "bonjour": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/bonjour/-/bonjour-3.5.0.tgz",
+ "integrity": "sha1-jokKGD2O6aI5OzhExpGkK897yfU=",
+ "requires": {
+ "array-flatten": "^2.1.0",
+ "deep-equal": "^1.0.1",
+ "dns-equal": "^1.0.0",
+ "dns-txt": "^2.0.2",
+ "multicast-dns": "^6.0.1",
+ "multicast-dns-service-types": "^1.1.0"
+ }
+ },
+ "boolbase": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
+ "integrity": "sha1-aN/1++YMUes3cl6p4+0xDcwed24="
+ },
+ "boxen": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz",
+ "integrity": "sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ==",
+ "requires": {
+ "ansi-align": "^3.0.0",
+ "camelcase": "^5.3.1",
+ "chalk": "^3.0.0",
+ "cli-boxes": "^2.2.0",
+ "string-width": "^4.1.0",
+ "term-size": "^2.1.0",
+ "type-fest": "^0.8.1",
+ "widest-line": "^3.1.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="
+ },
+ "chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "braces": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+ "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+ "requires": {
+ "arr-flatten": "^1.1.0",
+ "array-unique": "^0.3.2",
+ "extend-shallow": "^2.0.1",
+ "fill-range": "^4.0.0",
+ "isobject": "^3.0.1",
+ "repeat-element": "^1.1.2",
+ "snapdragon": "^0.8.1",
+ "snapdragon-node": "^2.0.1",
+ "split-string": "^3.0.2",
+ "to-regex": "^3.0.1"
+ }
+ },
+ "brorand": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/brorand/-/brorand-1.1.0.tgz",
+ "integrity": "sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8="
+ },
+ "browserify-aes": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/browserify-aes/-/browserify-aes-1.2.0.tgz",
+ "integrity": "sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA==",
+ "requires": {
+ "buffer-xor": "^1.0.3",
+ "cipher-base": "^1.0.0",
+ "create-hash": "^1.1.0",
+ "evp_bytestokey": "^1.0.3",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "browserify-cipher": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/browserify-cipher/-/browserify-cipher-1.0.1.tgz",
+ "integrity": "sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w==",
+ "requires": {
+ "browserify-aes": "^1.0.4",
+ "browserify-des": "^1.0.0",
+ "evp_bytestokey": "^1.0.0"
+ }
+ },
+ "browserify-des": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/browserify-des/-/browserify-des-1.0.2.tgz",
+ "integrity": "sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A==",
+ "requires": {
+ "cipher-base": "^1.0.1",
+ "des.js": "^1.0.0",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "browserify-rsa": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/browserify-rsa/-/browserify-rsa-4.1.0.tgz",
+ "integrity": "sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog==",
+ "requires": {
+ "bn.js": "^5.0.0",
+ "randombytes": "^2.0.1"
+ }
+ },
+ "browserify-sign": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz",
+ "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==",
+ "requires": {
+ "bn.js": "^5.1.1",
+ "browserify-rsa": "^4.0.1",
+ "create-hash": "^1.2.0",
+ "create-hmac": "^1.1.7",
+ "elliptic": "^6.5.3",
+ "inherits": "^2.0.4",
+ "parse-asn1": "^5.1.5",
+ "readable-stream": "^3.6.0",
+ "safe-buffer": "^5.2.0"
+ },
+ "dependencies": {
+ "readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
+ }
+ }
+ },
+ "browserify-zlib": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.2.0.tgz",
+ "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==",
+ "requires": {
+ "pako": "~1.0.5"
+ }
+ },
+ "browserslist": {
+ "version": "4.16.5",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.5.tgz",
+ "integrity": "sha512-C2HAjrM1AI/djrpAUU/tr4pml1DqLIzJKSLDBXBrNErl9ZCCTXdhwxdJjYc16953+mBWf7Lw+uUJgpgb8cN71A==",
+ "requires": {
+ "caniuse-lite": "^1.0.30001214",
+ "colorette": "^1.2.2",
+ "electron-to-chromium": "^1.3.719",
+ "escalade": "^3.1.1",
+ "node-releases": "^1.1.71"
+ }
+ },
+ "buffer": {
+ "version": "4.9.2",
+ "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz",
+ "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==",
+ "requires": {
+ "base64-js": "^1.0.2",
+ "ieee754": "^1.1.4",
+ "isarray": "^1.0.0"
+ }
+ },
+ "buffer-from": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+ "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A=="
+ },
+ "buffer-indexof": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/buffer-indexof/-/buffer-indexof-1.1.1.tgz",
+ "integrity": "sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g=="
+ },
+ "buffer-json": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/buffer-json/-/buffer-json-2.0.0.tgz",
+ "integrity": "sha512-+jjPFVqyfF1esi9fvfUs3NqM0pH1ziZ36VP4hmA/y/Ssfo/5w5xHKfTw9BwQjoJ1w/oVtpLomqwUHKdefGyuHw=="
+ },
+ "buffer-xor": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/buffer-xor/-/buffer-xor-1.0.3.tgz",
+ "integrity": "sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk="
+ },
+ "builtin-status-codes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz",
+ "integrity": "sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug="
+ },
+ "bytes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
+ },
+ "cac": {
+ "version": "6.7.3",
+ "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.3.tgz",
+ "integrity": "sha512-ECVqVZh74qgSuZG9YOt2OJPI3wGcf+EwwuF/XIOYqZBD0KZYLtgPWqFPxmDPQ6joxI1nOlvVgRV6VT53Ooyocg=="
+ },
+ "cacache": {
+ "version": "12.0.4",
+ "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz",
+ "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==",
+ "requires": {
+ "bluebird": "^3.5.5",
+ "chownr": "^1.1.1",
+ "figgy-pudding": "^3.5.1",
+ "glob": "^7.1.4",
+ "graceful-fs": "^4.1.15",
+ "infer-owner": "^1.0.3",
+ "lru-cache": "^5.1.1",
+ "mississippi": "^3.0.0",
+ "mkdirp": "^0.5.1",
+ "move-concurrently": "^1.0.1",
+ "promise-inflight": "^1.0.1",
+ "rimraf": "^2.6.3",
+ "ssri": "^6.0.1",
+ "unique-filename": "^1.1.1",
+ "y18n": "^4.0.0"
+ },
+ "dependencies": {
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ }
+ }
+ },
+ "cache-base": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+ "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+ "requires": {
+ "collection-visit": "^1.0.0",
+ "component-emitter": "^1.2.1",
+ "get-value": "^2.0.6",
+ "has-value": "^1.0.0",
+ "isobject": "^3.0.1",
+ "set-value": "^2.0.0",
+ "to-object-path": "^0.3.0",
+ "union-value": "^1.0.0",
+ "unset-value": "^1.0.0"
+ }
+ },
+ "cache-loader": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/cache-loader/-/cache-loader-3.0.1.tgz",
+ "integrity": "sha512-HzJIvGiGqYsFUrMjAJNDbVZoG7qQA+vy9AIoKs7s9DscNfki0I589mf2w6/tW+kkFH3zyiknoWV5Jdynu6b/zw==",
+ "requires": {
+ "buffer-json": "^2.0.0",
+ "find-cache-dir": "^2.1.0",
+ "loader-utils": "^1.2.3",
+ "mkdirp": "^0.5.1",
+ "neo-async": "^2.6.1",
+ "schema-utils": "^1.0.0"
+ },
+ "dependencies": {
+ "find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "requires": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ }
+ },
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "requires": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU="
+ },
+ "pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "requires": {
+ "find-up": "^3.0.0"
+ }
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
+ }
+ }
+ },
+ "cacheable-request": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz",
+ "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==",
+ "requires": {
+ "clone-response": "^1.0.2",
+ "get-stream": "^5.1.0",
+ "http-cache-semantics": "^4.0.0",
+ "keyv": "^3.0.0",
+ "lowercase-keys": "^2.0.0",
+ "normalize-url": "^4.1.0",
+ "responselike": "^1.0.2"
+ },
+ "dependencies": {
+ "get-stream": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
+ "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "lowercase-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz",
+ "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA=="
+ },
+ "normalize-url": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz",
+ "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ=="
+ }
+ }
+ },
+ "call-bind": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
+ "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
+ "requires": {
+ "function-bind": "^1.1.1",
+ "get-intrinsic": "^1.0.2"
+ }
+ },
+ "call-me-maybe": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz",
+ "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms="
+ },
+ "caller-callsite": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/caller-callsite/-/caller-callsite-2.0.0.tgz",
+ "integrity": "sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ=",
+ "requires": {
+ "callsites": "^2.0.0"
+ }
+ },
+ "caller-path": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/caller-path/-/caller-path-2.0.0.tgz",
+ "integrity": "sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ=",
+ "requires": {
+ "caller-callsite": "^2.0.0"
+ }
+ },
+ "callsites": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-2.0.0.tgz",
+ "integrity": "sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA="
+ },
+ "camel-case": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-3.0.0.tgz",
+ "integrity": "sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M=",
+ "requires": {
+ "no-case": "^2.2.0",
+ "upper-case": "^1.1.1"
+ }
+ },
+ "camelcase": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz",
+ "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg=="
+ },
+ "caniuse-api": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz",
+ "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "caniuse-lite": "^1.0.0",
+ "lodash.memoize": "^4.1.2",
+ "lodash.uniq": "^4.5.0"
+ }
+ },
+ "caniuse-lite": {
+ "version": "1.0.30001219",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001219.tgz",
+ "integrity": "sha512-c0yixVG4v9KBc/tQ2rlbB3A/bgBFRvl8h8M4IeUbqCca4gsiCfvtaheUssbnux/Mb66Vjz7x8yYjDgYcNQOhyQ=="
+ },
+ "caseless": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
+ "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw="
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "character-parser": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz",
+ "integrity": "sha1-x84o821LzZdE5f/CxfzeHHMmH8A=",
+ "requires": {
+ "is-regex": "^1.0.3"
+ }
+ },
+ "cheerio": {
+ "version": "1.0.0-rc.6",
+ "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.6.tgz",
+ "integrity": "sha512-hjx1XE1M/D5pAtMgvWwE21QClmAEeGHOIDfycgmndisdNgI6PE1cGRQkMGBcsbUbmEQyWu5PJLUcAOjtQS8DWw==",
+ "requires": {
+ "cheerio-select": "^1.3.0",
+ "dom-serializer": "^1.3.1",
+ "domhandler": "^4.1.0",
+ "htmlparser2": "^6.1.0",
+ "parse5": "^6.0.1",
+ "parse5-htmlparser2-tree-adapter": "^6.0.1"
+ }
+ },
+ "cheerio-select": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-1.4.0.tgz",
+ "integrity": "sha512-sobR3Yqz27L553Qa7cK6rtJlMDbiKPdNywtR95Sj/YgfpLfy0u6CGJuaBKe5YE/vTc23SCRKxWSdlon/w6I/Ew==",
+ "requires": {
+ "css-select": "^4.1.2",
+ "css-what": "^5.0.0",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0",
+ "domutils": "^2.6.0"
+ }
+ },
+ "chokidar": {
+ "version": "2.1.8",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz",
+ "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==",
+ "requires": {
+ "anymatch": "^2.0.0",
+ "async-each": "^1.0.1",
+ "braces": "^2.3.2",
+ "fsevents": "^1.2.7",
+ "glob-parent": "^3.1.0",
+ "inherits": "^2.0.3",
+ "is-binary-path": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "normalize-path": "^3.0.0",
+ "path-is-absolute": "^1.0.0",
+ "readdirp": "^2.2.1",
+ "upath": "^1.1.1"
+ }
+ },
+ "chownr": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
+ "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg=="
+ },
+ "chrome-trace-event": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz",
+ "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg=="
+ },
+ "ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ=="
+ },
+ "cipher-base": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/cipher-base/-/cipher-base-1.0.4.tgz",
+ "integrity": "sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q==",
+ "requires": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "class-utils": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+ "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+ "requires": {
+ "arr-union": "^3.1.0",
+ "define-property": "^0.2.5",
+ "isobject": "^3.0.0",
+ "static-extend": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ }
+ },
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+ }
+ }
+ },
+ "clean-css": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.3.tgz",
+ "integrity": "sha512-VcMWDN54ZN/DS+g58HYL5/n4Zrqe8vHJpGA8KdgUXFU4fuP/aHNw8eld9SyEIyabIMJX/0RaY/fplOo5hYLSFA==",
+ "requires": {
+ "source-map": "~0.6.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "cli-boxes": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz",
+ "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw=="
+ },
+ "clipboard": {
+ "version": "2.0.8",
+ "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.8.tgz",
+ "integrity": "sha512-Y6WO0unAIQp5bLmk1zdThRhgJt/x3ks6f30s3oE3H1mgIEU33XyQjEf8gsf6DxC7NPX8Y1SsNWjUjL/ywLnnbQ==",
+ "optional": true,
+ "requires": {
+ "good-listener": "^1.2.2",
+ "select": "^1.1.2",
+ "tiny-emitter": "^2.0.0"
+ }
+ },
+ "clipboard-copy": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz",
+ "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ=="
+ },
+ "cliui": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz",
+ "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==",
+ "requires": {
+ "string-width": "^3.1.0",
+ "strip-ansi": "^5.2.0",
+ "wrap-ansi": "^5.1.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg=="
+ },
+ "emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8="
+ },
+ "string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "requires": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ }
+ }
+ },
+ "clone-response": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz",
+ "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=",
+ "requires": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "coa": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/coa/-/coa-2.0.2.tgz",
+ "integrity": "sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA==",
+ "requires": {
+ "@types/q": "^1.5.1",
+ "chalk": "^2.4.1",
+ "q": "^1.1.2"
+ }
+ },
+ "collection-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+ "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+ "requires": {
+ "map-visit": "^1.0.0",
+ "object-visit": "^1.0.0"
+ }
+ },
+ "color": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz",
+ "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==",
+ "requires": {
+ "color-convert": "^1.9.1",
+ "color-string": "^1.5.4"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
+ },
+ "color-string": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.5.tgz",
+ "integrity": "sha512-jgIoum0OfQfq9Whcfc2z/VhCNcmQjWbey6qBX0vqt7YICflUmBCh9E9CiQD5GSJ+Uehixm3NUwHVhqUAWRivZg==",
+ "requires": {
+ "color-name": "^1.0.0",
+ "simple-swizzle": "^0.2.2"
+ }
+ },
+ "colorette": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz",
+ "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w=="
+ },
+ "combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "requires": {
+ "delayed-stream": "~1.0.0"
+ }
+ },
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
+ },
+ "commondir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
+ "integrity": "sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs="
+ },
+ "component-emitter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
+ "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg=="
+ },
+ "compressible": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
+ "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
+ "requires": {
+ "mime-db": ">= 1.43.0 < 2"
+ }
+ },
+ "compression": {
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz",
+ "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==",
+ "requires": {
+ "accepts": "~1.3.5",
+ "bytes": "3.0.0",
+ "compressible": "~2.0.16",
+ "debug": "2.6.9",
+ "on-headers": "~1.0.2",
+ "safe-buffer": "5.1.2",
+ "vary": "~1.1.2"
+ }
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
+ },
+ "concat-stream": {
+ "version": "1.6.2",
+ "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz",
+ "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==",
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.2.2",
+ "typedarray": "^0.0.6"
+ }
+ },
+ "configstore": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz",
+ "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==",
+ "requires": {
+ "dot-prop": "^5.2.0",
+ "graceful-fs": "^4.1.2",
+ "make-dir": "^3.0.0",
+ "unique-string": "^2.0.0",
+ "write-file-atomic": "^3.0.0",
+ "xdg-basedir": "^4.0.0"
+ }
+ },
+ "connect-history-api-fallback": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz",
+ "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg=="
+ },
+ "consola": {
+ "version": "2.15.3",
+ "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz",
+ "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw=="
+ },
+ "console-browserify": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.2.0.tgz",
+ "integrity": "sha512-ZMkYO/LkF17QvCPqM0gxw8yUzigAOZOSWSHg91FH6orS7vcEj5dVZTidN2fQ14yBSdg97RqhSNwLUXInd52OTA=="
+ },
+ "consolidate": {
+ "version": "0.15.1",
+ "resolved": "https://registry.npmjs.org/consolidate/-/consolidate-0.15.1.tgz",
+ "integrity": "sha512-DW46nrsMJgy9kqAbPt5rKaCr7uFtpo4mSUvLHIUbJEjm0vo+aY5QLwBUq3FK4tRnJr/X0Psc0C4jf/h+HtXSMw==",
+ "requires": {
+ "bluebird": "^3.1.1"
+ }
+ },
+ "constantinople": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-4.0.1.tgz",
+ "integrity": "sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==",
+ "requires": {
+ "@babel/parser": "^7.6.0",
+ "@babel/types": "^7.6.1"
+ }
+ },
+ "constants-browserify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/constants-browserify/-/constants-browserify-1.0.0.tgz",
+ "integrity": "sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U="
+ },
+ "content-disposition": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz",
+ "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==",
+ "requires": {
+ "safe-buffer": "5.1.2"
+ }
+ },
+ "content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
+ },
+ "convert-source-map": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz",
+ "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==",
+ "requires": {
+ "safe-buffer": "~5.1.1"
+ }
+ },
+ "cookie": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz",
+ "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg=="
+ },
+ "cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
+ },
+ "copy-concurrently": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz",
+ "integrity": "sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A==",
+ "requires": {
+ "aproba": "^1.1.1",
+ "fs-write-stream-atomic": "^1.0.8",
+ "iferr": "^0.1.5",
+ "mkdirp": "^0.5.1",
+ "rimraf": "^2.5.4",
+ "run-queue": "^1.0.0"
+ },
+ "dependencies": {
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ }
+ }
+ },
+ "copy-descriptor": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40="
+ },
+ "copy-webpack-plugin": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.2.tgz",
+ "integrity": "sha512-Uh7crJAco3AjBvgAy9Z75CjK8IG+gxaErro71THQ+vv/bl4HaQcpkexAY8KVW/T6D2W2IRr+couF/knIRkZMIQ==",
+ "requires": {
+ "cacache": "^12.0.3",
+ "find-cache-dir": "^2.1.0",
+ "glob-parent": "^3.1.0",
+ "globby": "^7.1.1",
+ "is-glob": "^4.0.1",
+ "loader-utils": "^1.2.3",
+ "minimatch": "^3.0.4",
+ "normalize-path": "^3.0.0",
+ "p-limit": "^2.2.1",
+ "schema-utils": "^1.0.0",
+ "serialize-javascript": "^4.0.0",
+ "webpack-log": "^2.0.0"
+ },
+ "dependencies": {
+ "find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "requires": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ }
+ },
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "globby": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-7.1.1.tgz",
+ "integrity": "sha1-+yzP+UAfhgCUXfral0QMypcrhoA=",
+ "requires": {
+ "array-union": "^1.0.1",
+ "dir-glob": "^2.0.0",
+ "glob": "^7.1.2",
+ "ignore": "^3.3.5",
+ "pify": "^3.0.0",
+ "slash": "^1.0.0"
+ },
+ "dependencies": {
+ "pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY="
+ }
+ }
+ },
+ "ignore": {
+ "version": "3.3.10",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz",
+ "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug=="
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "requires": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU="
+ },
+ "pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "requires": {
+ "find-up": "^3.0.0"
+ }
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
+ },
+ "slash": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz",
+ "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU="
+ }
+ }
+ },
+ "core-js": {
+ "version": "3.11.1",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.11.1.tgz",
+ "integrity": "sha512-k93Isqg7e4txZWMGNYwevZL9MiogLk8pd1PtwrmFmi8IBq4GXqUaVW/a33Llt6amSI36uSjd0GWwc9pTT9ALlQ=="
+ },
+ "core-js-compat": {
+ "version": "3.11.1",
+ "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.11.1.tgz",
+ "integrity": "sha512-aZ0e4tmlG/aOBHj92/TuOuZwp6jFvn1WNabU5VOVixzhu5t5Ao+JZkQOPlgNXu6ynwLrwJxklT4Gw1G1VGEh+g==",
+ "requires": {
+ "browserslist": "^4.16.5",
+ "semver": "7.0.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz",
+ "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A=="
+ }
+ }
+ },
+ "core-util-is": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+ "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
+ },
+ "cosmiconfig": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-5.2.1.tgz",
+ "integrity": "sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA==",
+ "requires": {
+ "import-fresh": "^2.0.0",
+ "is-directory": "^0.3.1",
+ "js-yaml": "^3.13.1",
+ "parse-json": "^4.0.0"
+ }
+ },
+ "create-ecdh": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz",
+ "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==",
+ "requires": {
+ "bn.js": "^4.1.0",
+ "elliptic": "^6.5.3"
+ },
+ "dependencies": {
+ "bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ }
+ }
+ },
+ "create-hash": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.2.0.tgz",
+ "integrity": "sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg==",
+ "requires": {
+ "cipher-base": "^1.0.1",
+ "inherits": "^2.0.1",
+ "md5.js": "^1.3.4",
+ "ripemd160": "^2.0.1",
+ "sha.js": "^2.4.0"
+ }
+ },
+ "create-hmac": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/create-hmac/-/create-hmac-1.1.7.tgz",
+ "integrity": "sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg==",
+ "requires": {
+ "cipher-base": "^1.0.3",
+ "create-hash": "^1.1.0",
+ "inherits": "^2.0.1",
+ "ripemd160": "^2.0.0",
+ "safe-buffer": "^5.0.1",
+ "sha.js": "^2.4.8"
+ }
+ },
+ "cross-spawn": {
+ "version": "6.0.5",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
+ "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
+ "requires": {
+ "nice-try": "^1.0.4",
+ "path-key": "^2.0.1",
+ "semver": "^5.5.0",
+ "shebang-command": "^1.2.0",
+ "which": "^1.2.9"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
+ }
+ }
+ },
+ "crypto-browserify": {
+ "version": "3.12.0",
+ "resolved": "https://registry.npmjs.org/crypto-browserify/-/crypto-browserify-3.12.0.tgz",
+ "integrity": "sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==",
+ "requires": {
+ "browserify-cipher": "^1.0.0",
+ "browserify-sign": "^4.0.0",
+ "create-ecdh": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "create-hmac": "^1.1.0",
+ "diffie-hellman": "^5.0.0",
+ "inherits": "^2.0.1",
+ "pbkdf2": "^3.0.3",
+ "public-encrypt": "^4.0.0",
+ "randombytes": "^2.0.0",
+ "randomfill": "^1.0.3"
+ }
+ },
+ "crypto-random-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
+ "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA=="
+ },
+ "css": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/css/-/css-2.2.4.tgz",
+ "integrity": "sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "source-map": "^0.6.1",
+ "source-map-resolve": "^0.5.2",
+ "urix": "^0.1.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "css-color-names": {
+ "version": "0.0.4",
+ "resolved": "https://registry.npmjs.org/css-color-names/-/css-color-names-0.0.4.tgz",
+ "integrity": "sha1-gIrcLnnPhHOAabZGyyDsJ762KeA="
+ },
+ "css-declaration-sorter": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz",
+ "integrity": "sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA==",
+ "requires": {
+ "postcss": "^7.0.1",
+ "timsort": "^0.3.0"
+ }
+ },
+ "css-loader": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-2.1.1.tgz",
+ "integrity": "sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w==",
+ "requires": {
+ "camelcase": "^5.2.0",
+ "icss-utils": "^4.1.0",
+ "loader-utils": "^1.2.3",
+ "normalize-path": "^3.0.0",
+ "postcss": "^7.0.14",
+ "postcss-modules-extract-imports": "^2.0.0",
+ "postcss-modules-local-by-default": "^2.0.6",
+ "postcss-modules-scope": "^2.1.0",
+ "postcss-modules-values": "^2.0.0",
+ "postcss-value-parser": "^3.3.0",
+ "schema-utils": "^1.0.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="
+ },
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "css-parse": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/css-parse/-/css-parse-2.0.0.tgz",
+ "integrity": "sha1-pGjuZnwW2BzPBcWMONKpfHgNv9Q=",
+ "requires": {
+ "css": "^2.0.0"
+ }
+ },
+ "css-select": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.1.2.tgz",
+ "integrity": "sha512-nu5ye2Hg/4ISq4XqdLY2bEatAcLIdt3OYGFc9Tm9n7VSlFBcfRv0gBNksHRgSdUDQGtN3XrZ94ztW+NfzkFSUw==",
+ "requires": {
+ "boolbase": "^1.0.0",
+ "css-what": "^5.0.0",
+ "domhandler": "^4.2.0",
+ "domutils": "^2.6.0",
+ "nth-check": "^2.0.0"
+ }
+ },
+ "css-select-base-adapter": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz",
+ "integrity": "sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w=="
+ },
+ "css-tree": {
+ "version": "1.0.0-alpha.37",
+ "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.37.tgz",
+ "integrity": "sha512-DMxWJg0rnz7UgxKT0Q1HU/L9BeJI0M6ksor0OgqOnF+aRCDWg/N2641HmVyU9KVIu0OVVWOb2IpC9A+BJRnejg==",
+ "requires": {
+ "mdn-data": "2.0.4",
+ "source-map": "^0.6.1"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "css-what": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-5.0.0.tgz",
+ "integrity": "sha512-qxyKHQvgKwzwDWC/rGbT821eJalfupxYW2qbSJSAtdSTimsr/MlaGONoNLllaUPZWf8QnbcKM/kPVYUQuEKAFA=="
+ },
+ "cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="
+ },
+ "cssnano": {
+ "version": "4.1.11",
+ "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-4.1.11.tgz",
+ "integrity": "sha512-6gZm2htn7xIPJOHY824ERgj8cNPgPxyCSnkXc4v7YvNW+TdVfzgngHcEhy/8D11kUWRUMbke+tC+AUcUsnMz2g==",
+ "requires": {
+ "cosmiconfig": "^5.0.0",
+ "cssnano-preset-default": "^4.0.8",
+ "is-resolvable": "^1.0.0",
+ "postcss": "^7.0.0"
+ }
+ },
+ "cssnano-preset-default": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-4.0.8.tgz",
+ "integrity": "sha512-LdAyHuq+VRyeVREFmuxUZR1TXjQm8QQU/ktoo/x7bz+SdOge1YKc5eMN6pRW7YWBmyq59CqYba1dJ5cUukEjLQ==",
+ "requires": {
+ "css-declaration-sorter": "^4.0.1",
+ "cssnano-util-raw-cache": "^4.0.1",
+ "postcss": "^7.0.0",
+ "postcss-calc": "^7.0.1",
+ "postcss-colormin": "^4.0.3",
+ "postcss-convert-values": "^4.0.1",
+ "postcss-discard-comments": "^4.0.2",
+ "postcss-discard-duplicates": "^4.0.2",
+ "postcss-discard-empty": "^4.0.1",
+ "postcss-discard-overridden": "^4.0.1",
+ "postcss-merge-longhand": "^4.0.11",
+ "postcss-merge-rules": "^4.0.3",
+ "postcss-minify-font-values": "^4.0.2",
+ "postcss-minify-gradients": "^4.0.2",
+ "postcss-minify-params": "^4.0.2",
+ "postcss-minify-selectors": "^4.0.2",
+ "postcss-normalize-charset": "^4.0.1",
+ "postcss-normalize-display-values": "^4.0.2",
+ "postcss-normalize-positions": "^4.0.2",
+ "postcss-normalize-repeat-style": "^4.0.2",
+ "postcss-normalize-string": "^4.0.2",
+ "postcss-normalize-timing-functions": "^4.0.2",
+ "postcss-normalize-unicode": "^4.0.1",
+ "postcss-normalize-url": "^4.0.1",
+ "postcss-normalize-whitespace": "^4.0.2",
+ "postcss-ordered-values": "^4.1.2",
+ "postcss-reduce-initial": "^4.0.3",
+ "postcss-reduce-transforms": "^4.0.2",
+ "postcss-svgo": "^4.0.3",
+ "postcss-unique-selectors": "^4.0.1"
+ }
+ },
+ "cssnano-util-get-arguments": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz",
+ "integrity": "sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8="
+ },
+ "cssnano-util-get-match": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz",
+ "integrity": "sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0="
+ },
+ "cssnano-util-raw-cache": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz",
+ "integrity": "sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA==",
+ "requires": {
+ "postcss": "^7.0.0"
+ }
+ },
+ "cssnano-util-same-parent": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz",
+ "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q=="
+ },
+ "csso": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz",
+ "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==",
+ "requires": {
+ "css-tree": "^1.1.2"
+ },
+ "dependencies": {
+ "css-tree": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz",
+ "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==",
+ "requires": {
+ "mdn-data": "2.0.14",
+ "source-map": "^0.6.1"
+ }
+ },
+ "mdn-data": {
+ "version": "2.0.14",
+ "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz",
+ "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow=="
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "cyclist": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cyclist/-/cyclist-1.0.1.tgz",
+ "integrity": "sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk="
+ },
+ "dashdash": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
+ "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
+ "requires": {
+ "assert-plus": "^1.0.0"
+ }
+ },
+ "de-indent": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz",
+ "integrity": "sha1-sgOOhG3DO6pXlhKNCAS0VbjB4h0="
+ },
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "decamelize": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
+ "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA="
+ },
+ "decode-uri-component": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+ "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU="
+ },
+ "decompress-response": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz",
+ "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=",
+ "requires": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "deep-equal": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.1.1.tgz",
+ "integrity": "sha512-yd9c5AdiqVcR+JjcwUQb9DkhJc8ngNr0MahEBGvDiJw8puWab2yZlh+nkasOnZP+EGTAP6rRp2JzJhJZzvNF8g==",
+ "requires": {
+ "is-arguments": "^1.0.4",
+ "is-date-object": "^1.0.1",
+ "is-regex": "^1.0.4",
+ "object-is": "^1.0.1",
+ "object-keys": "^1.1.1",
+ "regexp.prototype.flags": "^1.2.0"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="
+ },
+ "deepmerge": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-1.5.2.tgz",
+ "integrity": "sha512-95k0GDqvBjZavkuvzx/YqVLv/6YYa17fz6ILMSf7neqQITCPbnfEnQvEgMPNjH4kgobe7+WIL0yJEHku+H3qtQ=="
+ },
+ "default-gateway": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-4.2.0.tgz",
+ "integrity": "sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA==",
+ "requires": {
+ "execa": "^1.0.0",
+ "ip-regex": "^2.1.0"
+ }
+ },
+ "defer-to-connect": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz",
+ "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ=="
+ },
+ "define-properties": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz",
+ "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==",
+ "requires": {
+ "object-keys": "^1.0.12"
+ }
+ },
+ "define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "requires": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ }
+ },
+ "del": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/del/-/del-4.1.1.tgz",
+ "integrity": "sha512-QwGuEUouP2kVwQenAsOof5Fv8K9t3D8Ca8NxcXKrIpEHjTXK5J2nXLdP+ALI1cgv8wj7KuwBhTwBkOZSJKM5XQ==",
+ "requires": {
+ "@types/glob": "^7.1.1",
+ "globby": "^6.1.0",
+ "is-path-cwd": "^2.0.0",
+ "is-path-in-cwd": "^2.0.0",
+ "p-map": "^2.0.0",
+ "pify": "^4.0.1",
+ "rimraf": "^2.6.3"
+ },
+ "dependencies": {
+ "globby": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-6.1.0.tgz",
+ "integrity": "sha1-9abXDoOV4hyFj7BInWTfAkJNUGw=",
+ "requires": {
+ "array-union": "^1.0.1",
+ "glob": "^7.0.3",
+ "object-assign": "^4.0.1",
+ "pify": "^2.0.0",
+ "pinkie-promise": "^2.0.0"
+ },
+ "dependencies": {
+ "pify": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
+ "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw="
+ }
+ }
+ }
+ }
+ },
+ "delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk="
+ },
+ "delegate": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/delegate/-/delegate-3.2.0.tgz",
+ "integrity": "sha512-IofjkYBZaZivn0V8nnsMJGBr4jVLxHDheKSW88PyxS5QC4Vo9ZbZVvhzlSxY87fVq3STR6r+4cGepyHkcWOQSw==",
+ "optional": true
+ },
+ "depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
+ },
+ "des.js": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/des.js/-/des.js-1.0.1.tgz",
+ "integrity": "sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA==",
+ "requires": {
+ "inherits": "^2.0.1",
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "destroy": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+ "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
+ },
+ "detect-node": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.5.tgz",
+ "integrity": "sha512-qi86tE6hRcFHy8jI1m2VG+LaPUR1LhqDa5G8tVjuUXmOrpuAgqsA1pN0+ldgr3aKUH+QLI9hCY/OcRYisERejw=="
+ },
+ "diffie-hellman": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz",
+ "integrity": "sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg==",
+ "requires": {
+ "bn.js": "^4.1.0",
+ "miller-rabin": "^4.0.0",
+ "randombytes": "^2.0.0"
+ },
+ "dependencies": {
+ "bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ }
+ }
+ },
+ "dir-glob": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.2.2.tgz",
+ "integrity": "sha512-f9LBi5QWzIW3I6e//uxZoLBlUt9kcp66qo0sSCxL6YZKc75R1c4MFCoe/LaZiBGmgujvQdxc5Bn3QhfyvK5Hsw==",
+ "requires": {
+ "path-type": "^3.0.0"
+ }
+ },
+ "dns-equal": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz",
+ "integrity": "sha1-s55/HabrCnW6nBcySzR1PEfgZU0="
+ },
+ "dns-packet": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.1.tgz",
+ "integrity": "sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg==",
+ "requires": {
+ "ip": "^1.1.0",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "dns-txt": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/dns-txt/-/dns-txt-2.0.2.tgz",
+ "integrity": "sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY=",
+ "requires": {
+ "buffer-indexof": "^1.0.0"
+ }
+ },
+ "docsearch.js": {
+ "version": "2.6.3",
+ "resolved": "https://registry.npmjs.org/docsearch.js/-/docsearch.js-2.6.3.tgz",
+ "integrity": "sha512-GN+MBozuyz664ycpZY0ecdQE0ND/LSgJKhTLA0/v3arIS3S1Rpf2OJz6A35ReMsm91V5apcmzr5/kM84cvUg+A==",
+ "requires": {
+ "algoliasearch": "^3.24.5",
+ "autocomplete.js": "0.36.0",
+ "hogan.js": "^3.0.2",
+ "request": "^2.87.0",
+ "stack-utils": "^1.0.1",
+ "to-factory": "^1.0.0",
+ "zepto": "^1.2.0"
+ },
+ "dependencies": {
+ "algoliasearch": {
+ "version": "3.35.1",
+ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz",
+ "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==",
+ "requires": {
+ "agentkeepalive": "^2.2.0",
+ "debug": "^2.6.9",
+ "envify": "^4.0.0",
+ "es6-promise": "^4.1.0",
+ "events": "^1.1.0",
+ "foreach": "^2.0.5",
+ "global": "^4.3.2",
+ "inherits": "^2.0.1",
+ "isarray": "^2.0.1",
+ "load-script": "^1.0.0",
+ "object-keys": "^1.0.11",
+ "querystring-es3": "^0.2.1",
+ "reduce": "^1.0.1",
+ "semver": "^5.1.0",
+ "tunnel-agent": "^0.6.0"
+ }
+ },
+ "events": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz",
+ "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ="
+ },
+ "isarray": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+ "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw=="
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
+ }
+ }
+ },
+ "doctypes": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz",
+ "integrity": "sha1-6oCxBqh1OHdOijpKWv4pPeSJ4Kk="
+ },
+ "dom-converter": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz",
+ "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==",
+ "requires": {
+ "utila": "~0.4"
+ }
+ },
+ "dom-serializer": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.3.1.tgz",
+ "integrity": "sha512-Pv2ZluG5ife96udGgEDovOOOA5UELkltfJpnIExPrAk1LTvecolUGn6lIaoLh86d83GiB86CjzciMd9BuRB71Q==",
+ "requires": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "entities": "^2.0.0"
+ }
+ },
+ "dom-walk": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz",
+ "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w=="
+ },
+ "domain-browser": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/domain-browser/-/domain-browser-1.2.0.tgz",
+ "integrity": "sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA=="
+ },
+ "domelementtype": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.2.0.tgz",
+ "integrity": "sha512-DtBMo82pv1dFtUmHyr48beiuq792Sxohr+8Hm9zoxklYPfa6n0Z3Byjj2IV7bmr2IyqClnqEQhfgHJJ5QF0R5A=="
+ },
+ "domhandler": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.2.0.tgz",
+ "integrity": "sha512-zk7sgt970kzPks2Bf+dwT/PLzghLnsivb9CcxkvR8Mzr66Olr0Ofd8neSbglHJHaHa2MadfoSdNlKYAaafmWfA==",
+ "requires": {
+ "domelementtype": "^2.2.0"
+ }
+ },
+ "domutils": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.6.0.tgz",
+ "integrity": "sha512-y0BezHuy4MDYxh6OvolXYsH+1EMGmFbwv5FKW7ovwMG6zTPWqNPq3WF9ayZssFq+UlKdffGLbOEaghNdaOm1WA==",
+ "requires": {
+ "dom-serializer": "^1.0.1",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0"
+ }
+ },
+ "dot-prop": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
+ "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
+ "requires": {
+ "is-obj": "^2.0.0"
+ }
+ },
+ "duplexer3": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz",
+ "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI="
+ },
+ "duplexify": {
+ "version": "3.7.1",
+ "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz",
+ "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==",
+ "requires": {
+ "end-of-stream": "^1.0.0",
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.0.0",
+ "stream-shift": "^1.0.0"
+ }
+ },
+ "ecc-jsbn": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
+ "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
+ "requires": {
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.1.0"
+ }
+ },
+ "ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
+ },
+ "electron-to-chromium": {
+ "version": "1.3.723",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.723.tgz",
+ "integrity": "sha512-L+WXyXI7c7+G1V8ANzRsPI5giiimLAUDC6Zs1ojHHPhYXb3k/iTABFmWjivEtsWrRQymjnO66/rO2ZTABGdmWg=="
+ },
+ "elliptic": {
+ "version": "6.5.4",
+ "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.4.tgz",
+ "integrity": "sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ==",
+ "requires": {
+ "bn.js": "^4.11.9",
+ "brorand": "^1.1.0",
+ "hash.js": "^1.0.0",
+ "hmac-drbg": "^1.0.1",
+ "inherits": "^2.0.4",
+ "minimalistic-assert": "^1.0.1",
+ "minimalistic-crypto-utils": "^1.0.1"
+ },
+ "dependencies": {
+ "bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ }
+ }
+ },
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "emojis-list": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz",
+ "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q=="
+ },
+ "encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
+ },
+ "end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "requires": {
+ "once": "^1.4.0"
+ }
+ },
+ "enhanced-resolve": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz",
+ "integrity": "sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg==",
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "memory-fs": "^0.5.0",
+ "tapable": "^1.0.0"
+ },
+ "dependencies": {
+ "memory-fs": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz",
+ "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==",
+ "requires": {
+ "errno": "^0.1.3",
+ "readable-stream": "^2.0.1"
+ }
+ }
+ }
+ },
+ "entities": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz",
+ "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w=="
+ },
+ "envify": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/envify/-/envify-4.1.0.tgz",
+ "integrity": "sha512-IKRVVoAYr4pIx4yIWNsz9mOsboxlNXiu7TNBnem/K/uTHdkyzXWDzHCK7UTolqBbgaBz0tQHsD3YNls0uIIjiw==",
+ "requires": {
+ "esprima": "^4.0.0",
+ "through": "~2.3.4"
+ }
+ },
+ "envinfo": {
+ "version": "7.8.1",
+ "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz",
+ "integrity": "sha512-/o+BXHmB7ocbHEAs6F2EnG0ogybVVUdkRunTT2glZU9XAaGmhqskrvKwqXuDfNjEO0LZKWdejEEpnq8aM0tOaw=="
+ },
+ "errno": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.8.tgz",
+ "integrity": "sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A==",
+ "requires": {
+ "prr": "~1.0.1"
+ }
+ },
+ "error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "requires": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "es-abstract": {
+ "version": "1.18.0",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0.tgz",
+ "integrity": "sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "es-to-primitive": "^1.2.1",
+ "function-bind": "^1.1.1",
+ "get-intrinsic": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.2",
+ "is-callable": "^1.2.3",
+ "is-negative-zero": "^2.0.1",
+ "is-regex": "^1.1.2",
+ "is-string": "^1.0.5",
+ "object-inspect": "^1.9.0",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.2",
+ "string.prototype.trimend": "^1.0.4",
+ "string.prototype.trimstart": "^1.0.4",
+ "unbox-primitive": "^1.0.0"
+ }
+ },
+ "es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "requires": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ }
+ },
+ "es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w=="
+ },
+ "escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw=="
+ },
+ "escape-goat": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz",
+ "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q=="
+ },
+ "escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ="
+ },
+ "eslint-scope": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz",
+ "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==",
+ "requires": {
+ "esrecurse": "^4.1.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "esm": {
+ "version": "3.2.25",
+ "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz",
+ "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA=="
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A=="
+ },
+ "esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "requires": {
+ "estraverse": "^5.2.0"
+ },
+ "dependencies": {
+ "estraverse": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz",
+ "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ=="
+ }
+ }
+ },
+ "estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw=="
+ },
+ "esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="
+ },
+ "etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
+ },
+ "eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ },
+ "events": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz",
+ "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="
+ },
+ "eventsource": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-1.1.0.tgz",
+ "integrity": "sha512-VSJjT5oCNrFvCS6igjzPAt5hBzQ2qPBFIbJ03zLI9SE0mxwZpMw6BfJrbFHm1a141AavMEB8JHmBhWAd66PfCg==",
+ "requires": {
+ "original": "^1.0.0"
+ }
+ },
+ "evp_bytestokey": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz",
+ "integrity": "sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA==",
+ "requires": {
+ "md5.js": "^1.3.4",
+ "safe-buffer": "^5.1.1"
+ }
+ },
+ "execa": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz",
+ "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==",
+ "requires": {
+ "cross-spawn": "^6.0.0",
+ "get-stream": "^4.0.0",
+ "is-stream": "^1.1.0",
+ "npm-run-path": "^2.0.0",
+ "p-finally": "^1.0.0",
+ "signal-exit": "^3.0.0",
+ "strip-eof": "^1.0.0"
+ }
+ },
+ "expand-brackets": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+ "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+ "requires": {
+ "debug": "^2.3.3",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "posix-character-classes": "^0.1.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ }
+ },
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+ }
+ }
+ },
+ "express": {
+ "version": "4.17.1",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz",
+ "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==",
+ "requires": {
+ "accepts": "~1.3.7",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.19.0",
+ "content-disposition": "0.5.3",
+ "content-type": "~1.0.4",
+ "cookie": "0.4.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "~1.1.2",
+ "fresh": "0.5.2",
+ "merge-descriptors": "1.0.1",
+ "methods": "~1.1.2",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.7",
+ "proxy-addr": "~2.0.5",
+ "qs": "6.7.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.1.2",
+ "send": "0.17.1",
+ "serve-static": "1.14.1",
+ "setprototypeof": "1.1.1",
+ "statuses": "~1.5.0",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "dependencies": {
+ "array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
+ },
+ "qs": {
+ "version": "6.7.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz",
+ "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ=="
+ }
+ }
+ },
+ "extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ },
+ "extglob": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+ "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+ "requires": {
+ "array-unique": "^0.3.2",
+ "define-property": "^1.0.0",
+ "expand-brackets": "^2.1.4",
+ "extend-shallow": "^2.0.1",
+ "fragment-cache": "^0.2.1",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ }
+ }
+ },
+ "extsprintf": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
+ "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU="
+ },
+ "fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "fast-glob": {
+ "version": "2.2.7",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz",
+ "integrity": "sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw==",
+ "requires": {
+ "@mrmlnc/readdir-enhanced": "^2.2.1",
+ "@nodelib/fs.stat": "^1.1.2",
+ "glob-parent": "^3.1.0",
+ "is-glob": "^4.0.0",
+ "merge2": "^1.2.3",
+ "micromatch": "^3.1.10"
+ }
+ },
+ "fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "faye-websocket": {
+ "version": "0.11.3",
+ "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.3.tgz",
+ "integrity": "sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA==",
+ "requires": {
+ "websocket-driver": ">=0.5.1"
+ }
+ },
+ "figgy-pudding": {
+ "version": "3.5.2",
+ "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz",
+ "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw=="
+ },
+ "figures": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
+ "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
+ "requires": {
+ "escape-string-regexp": "^1.0.5"
+ }
+ },
+ "file-loader": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-3.0.1.tgz",
+ "integrity": "sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw==",
+ "requires": {
+ "loader-utils": "^1.0.2",
+ "schema-utils": "^1.0.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "file-uri-to-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
+ "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
+ "optional": true
+ },
+ "fill-range": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+ "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1",
+ "to-regex-range": "^2.1.0"
+ }
+ },
+ "finalhandler": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz",
+ "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==",
+ "requires": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "~2.3.0",
+ "parseurl": "~1.3.3",
+ "statuses": "~1.5.0",
+ "unpipe": "~1.0.0"
+ }
+ },
+ "find-cache-dir": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.1.tgz",
+ "integrity": "sha512-t2GDMt3oGC/v+BMwzmllWDuJF/xcDtE5j/fCGbqDD7OLuJkj0cfh1YSA5VKPvwMeLFLNDBkwOKZ2X85jGLVftQ==",
+ "requires": {
+ "commondir": "^1.0.1",
+ "make-dir": "^3.0.2",
+ "pkg-dir": "^4.1.0"
+ }
+ },
+ "find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "requires": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ }
+ },
+ "flush-write-stream": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/flush-write-stream/-/flush-write-stream-1.1.1.tgz",
+ "integrity": "sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.3.6"
+ }
+ },
+ "follow-redirects": {
+ "version": "1.14.0",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.0.tgz",
+ "integrity": "sha512-0vRwd7RKQBTt+mgu87mtYeofLFZpTas2S9zY+jIeuLJMNvudIgF52nr19q40HOwH5RrhWIPuj9puybzSJiRrVg=="
+ },
+ "for-in": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+ "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA="
+ },
+ "foreach": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz",
+ "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k="
+ },
+ "forever-agent": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
+ "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE="
+ },
+ "form-data": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
+ "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
+ "requires": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.6",
+ "mime-types": "^2.1.12"
+ }
+ },
+ "forwarded": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
+ "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
+ },
+ "fragment-cache": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+ "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+ "requires": {
+ "map-cache": "^0.2.2"
+ }
+ },
+ "fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
+ },
+ "from2": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
+ "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
+ "requires": {
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.0.0"
+ }
+ },
+ "fs-extra": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+ "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "jsonfile": "^4.0.0",
+ "universalify": "^0.1.0"
+ }
+ },
+ "fs-write-stream-atomic": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz",
+ "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=",
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "iferr": "^0.1.5",
+ "imurmurhash": "^0.1.4",
+ "readable-stream": "1 || 2"
+ }
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
+ },
+ "fsevents": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz",
+ "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==",
+ "optional": true,
+ "requires": {
+ "bindings": "^1.5.0",
+ "nan": "^2.12.1"
+ }
+ },
+ "function-bind": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
+ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
+ },
+ "fuse.js": {
+ "version": "3.6.1",
+ "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz",
+ "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw=="
+ },
+ "gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="
+ },
+ "get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="
+ },
+ "get-intrinsic": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.1.tgz",
+ "integrity": "sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q==",
+ "requires": {
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.1"
+ }
+ },
+ "get-stream": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
+ "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "get-value": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+ "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg="
+ },
+ "getpass": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
+ "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
+ "requires": {
+ "assert-plus": "^1.0.0"
+ }
+ },
+ "glob": {
+ "version": "7.1.6",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+ "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+ "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+ "requires": {
+ "is-glob": "^3.1.0",
+ "path-dirname": "^1.0.0"
+ },
+ "dependencies": {
+ "is-glob": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+ "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+ "requires": {
+ "is-extglob": "^2.1.0"
+ }
+ }
+ }
+ },
+ "glob-to-regexp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz",
+ "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs="
+ },
+ "global": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/global/-/global-4.4.0.tgz",
+ "integrity": "sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w==",
+ "requires": {
+ "min-document": "^2.19.0",
+ "process": "^0.11.10"
+ }
+ },
+ "global-dirs": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz",
+ "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==",
+ "requires": {
+ "ini": "1.3.7"
+ }
+ },
+ "globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA=="
+ },
+ "globby": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-9.2.0.tgz",
+ "integrity": "sha512-ollPHROa5mcxDEkwg6bPt3QbEf4pDQSNtd6JPL1YvOvAo/7/0VAm9TccUeoTmarjPw4pfUthSCqcyfNB1I3ZSg==",
+ "requires": {
+ "@types/glob": "^7.1.1",
+ "array-union": "^1.0.2",
+ "dir-glob": "^2.2.2",
+ "fast-glob": "^2.2.6",
+ "glob": "^7.1.3",
+ "ignore": "^4.0.3",
+ "pify": "^4.0.1",
+ "slash": "^2.0.0"
+ }
+ },
+ "good-listener": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/good-listener/-/good-listener-1.2.2.tgz",
+ "integrity": "sha1-1TswzfkxPf+33JoNR3CWqm0UXFA=",
+ "optional": true,
+ "requires": {
+ "delegate": "^3.1.2"
+ }
+ },
+ "got": {
+ "version": "9.6.0",
+ "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz",
+ "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==",
+ "requires": {
+ "@sindresorhus/is": "^0.14.0",
+ "@szmarczak/http-timer": "^1.1.2",
+ "cacheable-request": "^6.0.0",
+ "decompress-response": "^3.3.0",
+ "duplexer3": "^0.1.4",
+ "get-stream": "^4.1.0",
+ "lowercase-keys": "^1.0.1",
+ "mimic-response": "^1.0.1",
+ "p-cancelable": "^1.0.0",
+ "to-readable-stream": "^1.0.0",
+ "url-parse-lax": "^3.0.0"
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.6",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz",
+ "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ=="
+ },
+ "gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "requires": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ }
+ },
+ "handle-thing": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
+ "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg=="
+ },
+ "har-schema": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
+ "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI="
+ },
+ "har-validator": {
+ "version": "5.1.5",
+ "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz",
+ "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==",
+ "requires": {
+ "ajv": "^6.12.3",
+ "har-schema": "^2.0.0"
+ }
+ },
+ "has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "requires": {
+ "function-bind": "^1.1.1"
+ }
+ },
+ "has-ansi": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+ "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8="
+ }
+ }
+ },
+ "has-bigints": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz",
+ "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA=="
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0="
+ },
+ "has-symbols": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz",
+ "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw=="
+ },
+ "has-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+ "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+ "requires": {
+ "get-value": "^2.0.6",
+ "has-values": "^1.0.0",
+ "isobject": "^3.0.0"
+ }
+ },
+ "has-values": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+ "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+ "requires": {
+ "is-number": "^3.0.0",
+ "kind-of": "^4.0.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+ "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "has-yarn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz",
+ "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw=="
+ },
+ "hash-base": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz",
+ "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==",
+ "requires": {
+ "inherits": "^2.0.4",
+ "readable-stream": "^3.6.0",
+ "safe-buffer": "^5.2.0"
+ },
+ "dependencies": {
+ "readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
+ }
+ }
+ },
+ "hash-sum": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/hash-sum/-/hash-sum-1.0.2.tgz",
+ "integrity": "sha1-M7QHd3VMZDJXPBIMw4CLvRDUfwQ="
+ },
+ "hash.js": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz",
+ "integrity": "sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "minimalistic-assert": "^1.0.1"
+ }
+ },
+ "he": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
+ "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw=="
+ },
+ "hex-color-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/hex-color-regex/-/hex-color-regex-1.1.0.tgz",
+ "integrity": "sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ=="
+ },
+ "hmac-drbg": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz",
+ "integrity": "sha1-0nRXAQJabHdabFRXk+1QL8DGSaE=",
+ "requires": {
+ "hash.js": "^1.0.3",
+ "minimalistic-assert": "^1.0.0",
+ "minimalistic-crypto-utils": "^1.0.1"
+ }
+ },
+ "hogan.js": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/hogan.js/-/hogan.js-3.0.2.tgz",
+ "integrity": "sha1-TNnhq9QpQUbnZ55B14mHMrAse/0=",
+ "requires": {
+ "mkdirp": "0.3.0",
+ "nopt": "1.0.10"
+ },
+ "dependencies": {
+ "mkdirp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.0.tgz",
+ "integrity": "sha1-G79asbqCevI1dRQ0kEJkVfSB/h4="
+ }
+ }
+ },
+ "hotkeys-js": {
+ "version": "3.8.1",
+ "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz",
+ "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw=="
+ },
+ "hpack.js": {
+ "version": "2.1.6",
+ "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz",
+ "integrity": "sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI=",
+ "requires": {
+ "inherits": "^2.0.1",
+ "obuf": "^1.0.0",
+ "readable-stream": "^2.0.1",
+ "wbuf": "^1.1.0"
+ }
+ },
+ "hsl-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/hsl-regex/-/hsl-regex-1.0.0.tgz",
+ "integrity": "sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4="
+ },
+ "hsla-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/hsla-regex/-/hsla-regex-1.0.0.tgz",
+ "integrity": "sha1-wc56MWjIxmFAM6S194d/OyJfnDg="
+ },
+ "html-entities": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.4.0.tgz",
+ "integrity": "sha512-8nxjcBcd8wovbeKx7h3wTji4e6+rhaVuPNpMqwWgnHh+N9ToqsCs6XztWRBPQ+UtzsoMAdKZtUENoVzU/EMtZA=="
+ },
+ "html-minifier": {
+ "version": "3.5.21",
+ "resolved": "https://registry.npmjs.org/html-minifier/-/html-minifier-3.5.21.tgz",
+ "integrity": "sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA==",
+ "requires": {
+ "camel-case": "3.0.x",
+ "clean-css": "4.2.x",
+ "commander": "2.17.x",
+ "he": "1.2.x",
+ "param-case": "2.1.x",
+ "relateurl": "0.2.x",
+ "uglify-js": "3.4.x"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.17.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.17.1.tgz",
+ "integrity": "sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg=="
+ }
+ }
+ },
+ "html-tags": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz",
+ "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg=="
+ },
+ "htmlparser2": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz",
+ "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==",
+ "requires": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "domutils": "^2.5.2",
+ "entities": "^2.0.0"
+ }
+ },
+ "http-cache-semantics": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz",
+ "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ=="
+ },
+ "http-deceiver": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz",
+ "integrity": "sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc="
+ },
+ "http-errors": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz",
+ "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==",
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.1",
+ "statuses": ">= 1.5.0 < 2",
+ "toidentifier": "1.0.0"
+ },
+ "dependencies": {
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ }
+ }
+ },
+ "http-parser-js": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.3.tgz",
+ "integrity": "sha512-t7hjvef/5HEK7RWTdUzVUhl8zkEu+LlaE0IYzdMuvbSDipxBRpOn4Uhw8ZyECEa808iVT8XCjzo6xmYt4CiLZg=="
+ },
+ "http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "requires": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "http-proxy-middleware": {
+ "version": "0.19.1",
+ "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz",
+ "integrity": "sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q==",
+ "requires": {
+ "http-proxy": "^1.17.0",
+ "is-glob": "^4.0.0",
+ "lodash": "^4.17.11",
+ "micromatch": "^3.1.10"
+ }
+ },
+ "http-signature": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
+ "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
+ "requires": {
+ "assert-plus": "^1.0.0",
+ "jsprim": "^1.2.2",
+ "sshpk": "^1.7.0"
+ }
+ },
+ "https-browserify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/https-browserify/-/https-browserify-1.0.0.tgz",
+ "integrity": "sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM="
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "icss-replace-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz",
+ "integrity": "sha1-Bupvg2ead0njhs/h/oEq5dsiPe0="
+ },
+ "icss-utils": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-4.1.1.tgz",
+ "integrity": "sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA==",
+ "requires": {
+ "postcss": "^7.0.14"
+ }
+ },
+ "ieee754": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
+ "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="
+ },
+ "iferr": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/iferr/-/iferr-0.1.5.tgz",
+ "integrity": "sha1-xg7taebY/bazEEofy8ocGS3FtQE="
+ },
+ "ignore": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
+ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg=="
+ },
+ "immediate": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz",
+ "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q=="
+ },
+ "import-cwd": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-cwd/-/import-cwd-2.1.0.tgz",
+ "integrity": "sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk=",
+ "requires": {
+ "import-from": "^2.1.0"
+ }
+ },
+ "import-fresh": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-2.0.0.tgz",
+ "integrity": "sha1-2BNVwVYS04bGH53dOSLUMEgipUY=",
+ "requires": {
+ "caller-path": "^2.0.0",
+ "resolve-from": "^3.0.0"
+ }
+ },
+ "import-from": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-from/-/import-from-2.1.0.tgz",
+ "integrity": "sha1-M1238qev/VOqpHHUuAId7ja387E=",
+ "requires": {
+ "resolve-from": "^3.0.0"
+ }
+ },
+ "import-lazy": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz",
+ "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM="
+ },
+ "import-local": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/import-local/-/import-local-2.0.0.tgz",
+ "integrity": "sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ==",
+ "requires": {
+ "pkg-dir": "^3.0.0",
+ "resolve-cwd": "^2.0.0"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU="
+ },
+ "pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "requires": {
+ "find-up": "^3.0.0"
+ }
+ }
+ }
+ },
+ "imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o="
+ },
+ "indexes-of": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/indexes-of/-/indexes-of-1.0.1.tgz",
+ "integrity": "sha1-8w9xbI4r00bHtn0985FVZqfAVgc="
+ },
+ "infer-owner": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/infer-owner/-/infer-owner-1.0.4.tgz",
+ "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A=="
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "ini": {
+ "version": "1.3.7",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz",
+ "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ=="
+ },
+ "internal-ip": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/internal-ip/-/internal-ip-4.3.0.tgz",
+ "integrity": "sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg==",
+ "requires": {
+ "default-gateway": "^4.2.0",
+ "ipaddr.js": "^1.9.0"
+ }
+ },
+ "ip": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz",
+ "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo="
+ },
+ "ip-regex": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-2.1.0.tgz",
+ "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk="
+ },
+ "ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="
+ },
+ "is-absolute-url": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz",
+ "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY="
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-arguments": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.0.tgz",
+ "integrity": "sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg==",
+ "requires": {
+ "call-bind": "^1.0.0"
+ }
+ },
+ "is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0="
+ },
+ "is-bigint": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.1.tgz",
+ "integrity": "sha512-J0ELF4yHFxHy0cmSxZuheDOz2luOdVvqjwmEcj8H/L1JHeuEDSDbeRP+Dk9kFVk5RTFzbucJ2Kb9F7ixY2QaCg=="
+ },
+ "is-binary-path": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz",
+ "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=",
+ "requires": {
+ "binary-extensions": "^1.0.0"
+ }
+ },
+ "is-boolean-object": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.0.tgz",
+ "integrity": "sha512-a7Uprx8UtD+HWdyYwnD1+ExtTgqQtD2k/1yJgtXP6wnMm8byhkoTZRl+95LLThpzNZJ5aEvi46cdH+ayMFRwmA==",
+ "requires": {
+ "call-bind": "^1.0.0"
+ }
+ },
+ "is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
+ },
+ "is-callable": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.3.tgz",
+ "integrity": "sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ=="
+ },
+ "is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "requires": {
+ "ci-info": "^2.0.0"
+ }
+ },
+ "is-color-stop": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-color-stop/-/is-color-stop-1.1.0.tgz",
+ "integrity": "sha1-z/9HGu5N1cnhWFmPvhKWe1za00U=",
+ "requires": {
+ "css-color-names": "^0.0.4",
+ "hex-color-regex": "^1.1.0",
+ "hsl-regex": "^1.0.0",
+ "hsla-regex": "^1.0.0",
+ "rgb-regex": "^1.0.1",
+ "rgba-regex": "^1.0.0"
+ }
+ },
+ "is-core-module": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.3.0.tgz",
+ "integrity": "sha512-xSphU2KG9867tsYdLD4RWQ1VqdFl4HTO9Thf3I/3dLEfr0dbPTWKsuCKrgqMljg4nPE+Gq0VCnzT3gr0CyBmsw==",
+ "requires": {
+ "has": "^1.0.3"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-date-object": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz",
+ "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g=="
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "is-directory": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/is-directory/-/is-directory-0.3.1.tgz",
+ "integrity": "sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE="
+ },
+ "is-expression": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-4.0.0.tgz",
+ "integrity": "sha512-zMIXX63sxzG3XrkHkrAPvm/OVZVSCPNkwMHU8oTX7/U3AL78I0QXCEICXUM13BIa8TYGZ68PiTKfQz3yaTNr4A==",
+ "requires": {
+ "acorn": "^7.1.1",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik="
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI="
+ },
+ "is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
+ },
+ "is-glob": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+ "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-installed-globally": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz",
+ "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==",
+ "requires": {
+ "global-dirs": "^2.0.1",
+ "is-path-inside": "^3.0.1"
+ }
+ },
+ "is-negative-zero": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.1.tgz",
+ "integrity": "sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w=="
+ },
+ "is-npm": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz",
+ "integrity": "sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig=="
+ },
+ "is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-number-object": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.4.tgz",
+ "integrity": "sha512-zohwelOAur+5uXtk8O3GPQ1eAcu4ZX3UwxQhUlfFFMNpUd83gXgjbhJh6HmB6LUNV/ieOLQuDwJO3dWJosUeMw=="
+ },
+ "is-obj": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
+ "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w=="
+ },
+ "is-path-cwd": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
+ "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ=="
+ },
+ "is-path-in-cwd": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-2.1.0.tgz",
+ "integrity": "sha512-rNocXHgipO+rvnP6dk3zI20RpOtrAM/kzbB258Uw5BWr3TpXi861yzjo16Dn4hUox07iw5AyeMLHWsujkjzvRQ==",
+ "requires": {
+ "is-path-inside": "^2.1.0"
+ },
+ "dependencies": {
+ "is-path-inside": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-2.1.0.tgz",
+ "integrity": "sha512-wiyhTzfDWsvwAW53OBWF5zuvaOGlZ6PwYxAbPVDhpm+gM09xKQGjBq/8uYN12aDvMxnAnq3dxTyoSoRNmg5YFg==",
+ "requires": {
+ "path-is-inside": "^1.0.2"
+ }
+ }
+ }
+ },
+ "is-path-inside": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
+ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ=="
+ },
+ "is-plain-obj": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
+ "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4="
+ },
+ "is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "is-promise": {
+ "version": "2.2.2",
+ "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz",
+ "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ=="
+ },
+ "is-regex": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.2.tgz",
+ "integrity": "sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-symbols": "^1.0.1"
+ }
+ },
+ "is-resolvable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-resolvable/-/is-resolvable-1.1.0.tgz",
+ "integrity": "sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg=="
+ },
+ "is-stream": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
+ "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ="
+ },
+ "is-string": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.5.tgz",
+ "integrity": "sha512-buY6VNRjhQMiF1qWDouloZlQbRhDPCebwxSjxMjxgemYT46YMd2NR0/H+fBhEfWX4A/w9TBJ+ol+okqJKFE6vQ=="
+ },
+ "is-symbol": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz",
+ "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==",
+ "requires": {
+ "has-symbols": "^1.0.1"
+ }
+ },
+ "is-typedarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+ "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo="
+ },
+ "is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA=="
+ },
+ "is-wsl": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz",
+ "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0="
+ },
+ "is-yarn-global": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz",
+ "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw=="
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA="
+ },
+ "isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8="
+ },
+ "isstream": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
+ "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo="
+ },
+ "javascript-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-2.1.0.tgz",
+ "integrity": "sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg=="
+ },
+ "js-base64": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz",
+ "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ=="
+ },
+ "js-stringify": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz",
+ "integrity": "sha1-Fzb939lyTyijaCrcYjCufk6Weds="
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ },
+ "jsbn": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
+ "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM="
+ },
+ "jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA=="
+ },
+ "json-buffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz",
+ "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg="
+ },
+ "json-parse-better-errors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
+ "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw=="
+ },
+ "json-schema": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
+ "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM="
+ },
+ "json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "json-stringify-safe": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+ "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus="
+ },
+ "json3": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.3.tgz",
+ "integrity": "sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA=="
+ },
+ "json5": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+ "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+ "requires": {
+ "minimist": "^1.2.0"
+ }
+ },
+ "jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
+ "requires": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "jsonp": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/jsonp/-/jsonp-0.2.1.tgz",
+ "integrity": "sha1-pltPoPEL2nGaBUQep7lMVfPhW64=",
+ "requires": {
+ "debug": "^2.1.3"
+ }
+ },
+ "jsprim": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
+ "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
+ "requires": {
+ "assert-plus": "1.0.0",
+ "extsprintf": "1.3.0",
+ "json-schema": "0.2.3",
+ "verror": "1.10.0"
+ }
+ },
+ "jstransformer": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz",
+ "integrity": "sha1-7Yvwkh4vPx7U1cGkT2hwntJHIsM=",
+ "requires": {
+ "is-promise": "^2.0.0",
+ "promise": "^7.0.1"
+ }
+ },
+ "keyv": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz",
+ "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==",
+ "requires": {
+ "json-buffer": "3.0.0"
+ }
+ },
+ "killable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/killable/-/killable-1.0.1.tgz",
+ "integrity": "sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg=="
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw=="
+ },
+ "last-call-webpack-plugin": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz",
+ "integrity": "sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w==",
+ "requires": {
+ "lodash": "^4.17.5",
+ "webpack-sources": "^1.1.0"
+ }
+ },
+ "latest-version": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz",
+ "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==",
+ "requires": {
+ "package-json": "^6.3.0"
+ }
+ },
+ "linkify-it": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz",
+ "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==",
+ "requires": {
+ "uc.micro": "^1.0.1"
+ }
+ },
+ "load-script": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/load-script/-/load-script-1.0.0.tgz",
+ "integrity": "sha1-BJGTngvuVkPuSUp+PaPSuscMbKQ="
+ },
+ "loader-runner": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-2.4.0.tgz",
+ "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw=="
+ },
+ "loader-utils": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz",
+ "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==",
+ "requires": {
+ "big.js": "^5.2.2",
+ "emojis-list": "^3.0.0",
+ "json5": "^1.0.1"
+ }
+ },
+ "locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "requires": {
+ "p-locate": "^4.1.0"
+ }
+ },
+ "lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "lodash._reinterpolate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz",
+ "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0="
+ },
+ "lodash.chunk": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/lodash.chunk/-/lodash.chunk-4.2.0.tgz",
+ "integrity": "sha1-ZuXOH3btJ7QwPYxlEujRIW6BBrw="
+ },
+ "lodash.clonedeep": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz",
+ "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8="
+ },
+ "lodash.debounce": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
+ "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168="
+ },
+ "lodash.kebabcase": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz",
+ "integrity": "sha1-hImxyw0p/4gZXM7KRI/21swpXDY="
+ },
+ "lodash.memoize": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
+ "integrity": "sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4="
+ },
+ "lodash.padstart": {
+ "version": "4.6.1",
+ "resolved": "https://registry.npmjs.org/lodash.padstart/-/lodash.padstart-4.6.1.tgz",
+ "integrity": "sha1-0uPuv/DZ05rVD1y9G1KnvOa7YRs="
+ },
+ "lodash.sortby": {
+ "version": "4.7.0",
+ "resolved": "https://registry.npmjs.org/lodash.sortby/-/lodash.sortby-4.7.0.tgz",
+ "integrity": "sha1-7dFMgk4sycHgsKG0K7UhBRakJDg="
+ },
+ "lodash.template": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz",
+ "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==",
+ "requires": {
+ "lodash._reinterpolate": "^3.0.0",
+ "lodash.templatesettings": "^4.0.0"
+ }
+ },
+ "lodash.templatesettings": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz",
+ "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==",
+ "requires": {
+ "lodash._reinterpolate": "^3.0.0"
+ }
+ },
+ "lodash.uniq": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
+ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M="
+ },
+ "loglevel": {
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.1.tgz",
+ "integrity": "sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw=="
+ },
+ "lower-case": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz",
+ "integrity": "sha1-miyr0bno4K6ZOkv31YdcOcQujqw="
+ },
+ "lowercase-keys": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+ "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA=="
+ },
+ "lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "requires": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "make-dir": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
+ "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
+ "requires": {
+ "semver": "^6.0.0"
+ }
+ },
+ "map-cache": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+ "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8="
+ },
+ "map-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+ "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+ "requires": {
+ "object-visit": "^1.0.0"
+ }
+ },
+ "markdown-it": {
+ "version": "12.0.6",
+ "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.6.tgz",
+ "integrity": "sha512-qv3sVLl4lMT96LLtR7xeRJX11OUFjsaD5oVat2/SNBIb21bJXwal2+SklcRbTwGwqWpWH/HRtYavOoJE+seL8w==",
+ "requires": {
+ "argparse": "^2.0.1",
+ "entities": "~2.1.0",
+ "linkify-it": "^3.0.1",
+ "mdurl": "^1.0.1",
+ "uc.micro": "^1.0.5"
+ },
+ "dependencies": {
+ "argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
+ }
+ }
+ },
+ "markdown-it-anchor": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz",
+ "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==",
+ "requires": {}
+ },
+ "markdown-it-attrs": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz",
+ "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==",
+ "requires": {}
+ },
+ "markdown-it-chain": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-chain/-/markdown-it-chain-1.3.0.tgz",
+ "integrity": "sha512-XClV8I1TKy8L2qsT9iX3qiV+50ZtcInGXI80CA+DP62sMs7hXlyV/RM3hfwy5O3Ad0sJm9xIwQELgANfESo8mQ==",
+ "requires": {
+ "webpack-chain": "^4.9.0"
+ },
+ "dependencies": {
+ "javascript-stringify": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz",
+ "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM="
+ },
+ "webpack-chain": {
+ "version": "4.12.1",
+ "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-4.12.1.tgz",
+ "integrity": "sha512-BCfKo2YkDe2ByqkEWe1Rw+zko4LsyS75LVr29C6xIrxAg9JHJ4pl8kaIZ396SUSNp6b4815dRZPSTAS8LlURRQ==",
+ "requires": {
+ "deepmerge": "^1.5.2",
+ "javascript-stringify": "^1.6.0"
+ }
+ }
+ }
+ },
+ "markdown-it-container": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-container/-/markdown-it-container-2.0.0.tgz",
+ "integrity": "sha1-ABm0P9Au7+zi8ZYKKJX7qBpARpU="
+ },
+ "markdown-it-emoji": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/markdown-it-emoji/-/markdown-it-emoji-1.4.0.tgz",
+ "integrity": "sha1-m+4OmpkKljupbfaYDE/dsF37Tcw="
+ },
+ "markdown-it-table-of-contents": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/markdown-it-table-of-contents/-/markdown-it-table-of-contents-0.4.4.tgz",
+ "integrity": "sha512-TAIHTHPwa9+ltKvKPWulm/beozQU41Ab+FIefRaQV1NRnpzwcV9QOe6wXQS5WLivm5Q/nlo0rl6laGkMDZE7Gw=="
+ },
+ "md5.js": {
+ "version": "1.3.5",
+ "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.5.tgz",
+ "integrity": "sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==",
+ "requires": {
+ "hash-base": "^3.0.0",
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ }
+ },
+ "mdn-data": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz",
+ "integrity": "sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA=="
+ },
+ "mdurl": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz",
+ "integrity": "sha1-/oWy7HWlkDfyrf7BAP1sYBdhFS4="
+ },
+ "media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
+ },
+ "memory-fs": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz",
+ "integrity": "sha1-OpoguEYlI+RHz7x+i7gO1me/xVI=",
+ "requires": {
+ "errno": "^0.1.3",
+ "readable-stream": "^2.0.1"
+ }
+ },
+ "merge-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
+ "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
+ },
+ "merge-source-map": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/merge-source-map/-/merge-source-map-1.1.0.tgz",
+ "integrity": "sha512-Qkcp7P2ygktpMPh2mCQZaf3jhN6D3Z/qVZHSdWvQ+2Ef5HgRAPBO57A77+ENm0CPx2+1Ce/MYKi3ymqdfuqibw==",
+ "requires": {
+ "source-map": "^0.6.1"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
+ },
+ "methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
+ },
+ "micromatch": {
+ "version": "3.1.10",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+ "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "braces": "^2.3.1",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "extglob": "^2.0.4",
+ "fragment-cache": "^0.2.1",
+ "kind-of": "^6.0.2",
+ "nanomatch": "^1.2.9",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.2"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "miller-rabin": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/miller-rabin/-/miller-rabin-4.0.1.tgz",
+ "integrity": "sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA==",
+ "requires": {
+ "bn.js": "^4.0.0",
+ "brorand": "^1.0.1"
+ },
+ "dependencies": {
+ "bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ }
+ }
+ },
+ "mime": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-2.5.2.tgz",
+ "integrity": "sha512-tqkh47FzKeCPD2PUiPB6pkbMzsCasjxAfC62/Wap5qrUWcb+sFasXUC5I3gYM5iBM8v/Qpn4UK0x+j0iHyFPDg=="
+ },
+ "mime-db": {
+ "version": "1.47.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz",
+ "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw=="
+ },
+ "mime-types": {
+ "version": "2.1.30",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz",
+ "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==",
+ "requires": {
+ "mime-db": "1.47.0"
+ }
+ },
+ "mimic-response": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
+ "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ=="
+ },
+ "min-document": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/min-document/-/min-document-2.19.0.tgz",
+ "integrity": "sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU=",
+ "requires": {
+ "dom-walk": "^0.1.0"
+ }
+ },
+ "mini-css-extract-plugin": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-0.6.0.tgz",
+ "integrity": "sha512-79q5P7YGI6rdnVyIAV4NXpBQJFWdkzJxCim3Kog4078fM0piAaFlwocqbejdWtLW1cEzCexPrh6EdyFsPgVdAw==",
+ "requires": {
+ "loader-utils": "^1.1.0",
+ "normalize-url": "^2.0.1",
+ "schema-utils": "^1.0.0",
+ "webpack-sources": "^1.1.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "minimalistic-assert": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
+ "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
+ },
+ "minimalistic-crypto-utils": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz",
+ "integrity": "sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo="
+ },
+ "minimatch": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+ "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.5",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz",
+ "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw=="
+ },
+ "mississippi": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/mississippi/-/mississippi-3.0.0.tgz",
+ "integrity": "sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA==",
+ "requires": {
+ "concat-stream": "^1.5.0",
+ "duplexify": "^3.4.2",
+ "end-of-stream": "^1.1.0",
+ "flush-write-stream": "^1.0.0",
+ "from2": "^2.1.0",
+ "parallel-transform": "^1.1.0",
+ "pump": "^3.0.0",
+ "pumpify": "^1.3.3",
+ "stream-each": "^1.1.0",
+ "through2": "^2.0.0"
+ }
+ },
+ "mixin-deep": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+ "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+ "requires": {
+ "for-in": "^1.0.2",
+ "is-extendable": "^1.0.1"
+ },
+ "dependencies": {
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "mkdirp": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz",
+ "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw=="
+ },
+ "move-concurrently": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz",
+ "integrity": "sha1-viwAX9oy4LKa8fBdfEszIUxwH5I=",
+ "requires": {
+ "aproba": "^1.1.1",
+ "copy-concurrently": "^1.0.0",
+ "fs-write-stream-atomic": "^1.0.8",
+ "mkdirp": "^0.5.1",
+ "rimraf": "^2.5.4",
+ "run-queue": "^1.0.3"
+ },
+ "dependencies": {
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ }
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+ },
+ "multicast-dns": {
+ "version": "6.2.3",
+ "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-6.2.3.tgz",
+ "integrity": "sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g==",
+ "requires": {
+ "dns-packet": "^1.3.1",
+ "thunky": "^1.0.2"
+ }
+ },
+ "multicast-dns-service-types": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz",
+ "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE="
+ },
+ "nan": {
+ "version": "2.14.2",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz",
+ "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==",
+ "optional": true
+ },
+ "nanomatch": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+ "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "fragment-cache": "^0.2.1",
+ "is-windows": "^1.0.2",
+ "kind-of": "^6.0.2",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "negotiator": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
+ "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
+ },
+ "neo-async": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw=="
+ },
+ "nice-try": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz",
+ "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ=="
+ },
+ "no-case": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/no-case/-/no-case-2.3.2.tgz",
+ "integrity": "sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ==",
+ "requires": {
+ "lower-case": "^1.1.1"
+ }
+ },
+ "node-forge": {
+ "version": "0.10.0",
+ "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz",
+ "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA=="
+ },
+ "node-libs-browser": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/node-libs-browser/-/node-libs-browser-2.2.1.tgz",
+ "integrity": "sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q==",
+ "requires": {
+ "assert": "^1.1.1",
+ "browserify-zlib": "^0.2.0",
+ "buffer": "^4.3.0",
+ "console-browserify": "^1.1.0",
+ "constants-browserify": "^1.0.0",
+ "crypto-browserify": "^3.11.0",
+ "domain-browser": "^1.1.1",
+ "events": "^3.0.0",
+ "https-browserify": "^1.0.0",
+ "os-browserify": "^0.3.0",
+ "path-browserify": "0.0.1",
+ "process": "^0.11.10",
+ "punycode": "^1.2.4",
+ "querystring-es3": "^0.2.0",
+ "readable-stream": "^2.3.3",
+ "stream-browserify": "^2.0.1",
+ "stream-http": "^2.7.2",
+ "string_decoder": "^1.0.0",
+ "timers-browserify": "^2.0.4",
+ "tty-browserify": "0.0.0",
+ "url": "^0.11.0",
+ "util": "^0.11.0",
+ "vm-browserify": "^1.0.1"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+ "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4="
+ }
+ }
+ },
+ "node-releases": {
+ "version": "1.1.71",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.71.tgz",
+ "integrity": "sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg=="
+ },
+ "nopt": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz",
+ "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=",
+ "requires": {
+ "abbrev": "1"
+ }
+ },
+ "normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="
+ },
+ "normalize-range": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
+ "integrity": "sha1-LRDAa9/TEuqXd2laTShDlFa3WUI="
+ },
+ "normalize-url": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz",
+ "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==",
+ "requires": {
+ "prepend-http": "^2.0.0",
+ "query-string": "^5.0.1",
+ "sort-keys": "^2.0.0"
+ }
+ },
+ "npm-run-path": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz",
+ "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=",
+ "requires": {
+ "path-key": "^2.0.0"
+ }
+ },
+ "nprogress": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/nprogress/-/nprogress-0.2.0.tgz",
+ "integrity": "sha1-y480xTIT2JVyP8urkH6UIq28r7E="
+ },
+ "nth-check": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.0.0.tgz",
+ "integrity": "sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q==",
+ "requires": {
+ "boolbase": "^1.0.0"
+ }
+ },
+ "num2fraction": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz",
+ "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4="
+ },
+ "oauth-sign": {
+ "version": "0.9.0",
+ "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
+ "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ=="
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM="
+ },
+ "object-copy": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+ "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+ "requires": {
+ "copy-descriptor": "^0.1.0",
+ "define-property": "^0.2.5",
+ "kind-of": "^3.0.3"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ }
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+ }
+ }
+ },
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "object-inspect": {
+ "version": "1.10.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.10.2.tgz",
+ "integrity": "sha512-gz58rdPpadwztRrPjZE9DZLOABUpTGdcANUgOwBFO1C+HZZhePoP83M65WGDmbpwFYJSWqavbl4SgDn4k8RYTA=="
+ },
+ "object-is": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz",
+ "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ }
+ },
+ "object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA=="
+ },
+ "object-visit": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+ "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+ "requires": {
+ "isobject": "^3.0.0"
+ }
+ },
+ "object.assign": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz",
+ "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==",
+ "requires": {
+ "call-bind": "^1.0.0",
+ "define-properties": "^1.1.3",
+ "has-symbols": "^1.0.1",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "object.getownpropertydescriptors": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.2.tgz",
+ "integrity": "sha512-WtxeKSzfBjlzL+F9b7M7hewDzMwy+C8NRssHd1YrNlzHzIDrXcXiNOMrezdAEM4UXixgV+vvnyBeN7Rygl2ttQ==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.18.0-next.2"
+ }
+ },
+ "object.pick": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+ "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "object.values": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.3.tgz",
+ "integrity": "sha512-nkF6PfDB9alkOUxpf1HNm/QlkeW3SReqL5WXeBLpEJJnlPSvRaDQpW3gQTksTN3fgJX4hL42RzKyOin6ff3tyw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.18.0-next.2",
+ "has": "^1.0.3"
+ }
+ },
+ "obuf": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
+ "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
+ },
+ "on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA=="
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "opencollective-postinstall": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz",
+ "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q=="
+ },
+ "opn": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/opn/-/opn-5.5.0.tgz",
+ "integrity": "sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA==",
+ "requires": {
+ "is-wsl": "^1.1.0"
+ }
+ },
+ "optimize-css-assets-webpack-plugin": {
+ "version": "5.0.4",
+ "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz",
+ "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==",
+ "requires": {
+ "cssnano": "^4.1.10",
+ "last-call-webpack-plugin": "^3.0.0"
+ }
+ },
+ "original": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/original/-/original-1.0.2.tgz",
+ "integrity": "sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg==",
+ "requires": {
+ "url-parse": "^1.4.3"
+ }
+ },
+ "os-browserify": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz",
+ "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc="
+ },
+ "p-cancelable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz",
+ "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw=="
+ },
+ "p-finally": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
+ "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4="
+ },
+ "p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "requires": {
+ "p-try": "^2.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "requires": {
+ "p-limit": "^2.2.0"
+ }
+ },
+ "p-map": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz",
+ "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw=="
+ },
+ "p-retry": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-3.0.1.tgz",
+ "integrity": "sha512-XE6G4+YTTkT2a0UWb2kjZe8xNwf8bIbnqpc/IS/idOBVhyves0mK5OJgeocjx7q5pvX/6m23xuzVPYT1uGM73w==",
+ "requires": {
+ "retry": "^0.12.0"
+ }
+ },
+ "p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ=="
+ },
+ "package-json": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz",
+ "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==",
+ "requires": {
+ "got": "^9.6.0",
+ "registry-auth-token": "^4.0.0",
+ "registry-url": "^5.0.0",
+ "semver": "^6.2.0"
+ }
+ },
+ "pako": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz",
+ "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw=="
+ },
+ "parallel-transform": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/parallel-transform/-/parallel-transform-1.2.0.tgz",
+ "integrity": "sha512-P2vSmIu38uIlvdcU7fDkyrxj33gTUy/ABO5ZUbGowxNCopBq/OoD42bP4UmMrJoPyk4Uqf0mu3mtWBhHCZD8yg==",
+ "requires": {
+ "cyclist": "^1.0.1",
+ "inherits": "^2.0.3",
+ "readable-stream": "^2.1.5"
+ }
+ },
+ "param-case": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/param-case/-/param-case-2.1.1.tgz",
+ "integrity": "sha1-35T9jPZTHs915r75oIWPvHK+Ikc=",
+ "requires": {
+ "no-case": "^2.2.0"
+ }
+ },
+ "parse-asn1": {
+ "version": "5.1.6",
+ "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz",
+ "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==",
+ "requires": {
+ "asn1.js": "^5.2.0",
+ "browserify-aes": "^1.0.0",
+ "evp_bytestokey": "^1.0.0",
+ "pbkdf2": "^3.0.3",
+ "safe-buffer": "^5.1.1"
+ }
+ },
+ "parse-json": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz",
+ "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=",
+ "requires": {
+ "error-ex": "^1.3.1",
+ "json-parse-better-errors": "^1.0.1"
+ }
+ },
+ "parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
+ "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="
+ },
+ "parse5-htmlparser2-tree-adapter": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz",
+ "integrity": "sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA==",
+ "requires": {
+ "parse5": "^6.0.1"
+ }
+ },
+ "parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
+ },
+ "pascalcase": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ="
+ },
+ "path-browserify": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-0.0.1.tgz",
+ "integrity": "sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ=="
+ },
+ "path-dirname": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA="
+ },
+ "path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
+ },
+ "path-is-inside": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
+ "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM="
+ },
+ "path-key": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
+ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A="
+ },
+ "path-parse": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+ "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
+ },
+ "path-to-regexp": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
+ "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
+ },
+ "path-type": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz",
+ "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==",
+ "requires": {
+ "pify": "^3.0.0"
+ },
+ "dependencies": {
+ "pify": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+ "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY="
+ }
+ }
+ },
+ "pbkdf2": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz",
+ "integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==",
+ "requires": {
+ "create-hash": "^1.1.2",
+ "create-hmac": "^1.1.4",
+ "ripemd160": "^2.0.1",
+ "safe-buffer": "^5.0.1",
+ "sha.js": "^2.4.8"
+ }
+ },
+ "performance-now": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
+ "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns="
+ },
+ "picomatch": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz",
+ "integrity": "sha512-KpELjfwcCDUb9PeigTs2mBJzXUPzAuP2oPcA989He8Rte0+YUAjw1JVedDhuTKPkHjSYzMN3npC9luThGYEKdg==",
+ "optional": true
+ },
+ "pify": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
+ "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g=="
+ },
+ "pinkie": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz",
+ "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA="
+ },
+ "pinkie-promise": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz",
+ "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=",
+ "requires": {
+ "pinkie": "^2.0.0"
+ }
+ },
+ "pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "requires": {
+ "find-up": "^4.0.0"
+ }
+ },
+ "portfinder": {
+ "version": "1.0.28",
+ "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz",
+ "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==",
+ "requires": {
+ "async": "^2.6.2",
+ "debug": "^3.1.1",
+ "mkdirp": "^0.5.5"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ }
+ }
+ },
+ "posix-character-classes": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs="
+ },
+ "postcss": {
+ "version": "7.0.35",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz",
+ "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==",
+ "requires": {
+ "chalk": "^2.4.2",
+ "source-map": "^0.6.1",
+ "supports-color": "^6.1.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ },
+ "supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "postcss-calc": {
+ "version": "7.0.5",
+ "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz",
+ "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==",
+ "requires": {
+ "postcss": "^7.0.27",
+ "postcss-selector-parser": "^6.0.2",
+ "postcss-value-parser": "^4.0.2"
+ }
+ },
+ "postcss-colormin": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-4.0.3.tgz",
+ "integrity": "sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "color": "^3.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-convert-values": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz",
+ "integrity": "sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ==",
+ "requires": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-discard-comments": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz",
+ "integrity": "sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg==",
+ "requires": {
+ "postcss": "^7.0.0"
+ }
+ },
+ "postcss-discard-duplicates": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz",
+ "integrity": "sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ==",
+ "requires": {
+ "postcss": "^7.0.0"
+ }
+ },
+ "postcss-discard-empty": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz",
+ "integrity": "sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w==",
+ "requires": {
+ "postcss": "^7.0.0"
+ }
+ },
+ "postcss-discard-overridden": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz",
+ "integrity": "sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg==",
+ "requires": {
+ "postcss": "^7.0.0"
+ }
+ },
+ "postcss-load-config": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz",
+ "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==",
+ "requires": {
+ "cosmiconfig": "^5.0.0",
+ "import-cwd": "^2.0.0"
+ }
+ },
+ "postcss-loader": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-3.0.0.tgz",
+ "integrity": "sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA==",
+ "requires": {
+ "loader-utils": "^1.1.0",
+ "postcss": "^7.0.0",
+ "postcss-load-config": "^2.0.0",
+ "schema-utils": "^1.0.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "postcss-merge-longhand": {
+ "version": "4.0.11",
+ "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz",
+ "integrity": "sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw==",
+ "requires": {
+ "css-color-names": "0.0.4",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0",
+ "stylehacks": "^4.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-merge-rules": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz",
+ "integrity": "sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "caniuse-api": "^3.0.0",
+ "cssnano-util-same-parent": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-selector-parser": "^3.0.0",
+ "vendors": "^1.0.0"
+ },
+ "dependencies": {
+ "postcss-selector-parser": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz",
+ "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==",
+ "requires": {
+ "dot-prop": "^5.2.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ }
+ }
+ }
+ },
+ "postcss-minify-font-values": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz",
+ "integrity": "sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg==",
+ "requires": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-minify-gradients": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz",
+ "integrity": "sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q==",
+ "requires": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "is-color-stop": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-minify-params": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz",
+ "integrity": "sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg==",
+ "requires": {
+ "alphanum-sort": "^1.0.0",
+ "browserslist": "^4.0.0",
+ "cssnano-util-get-arguments": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0",
+ "uniqs": "^2.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-minify-selectors": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz",
+ "integrity": "sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g==",
+ "requires": {
+ "alphanum-sort": "^1.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-selector-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-selector-parser": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz",
+ "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==",
+ "requires": {
+ "dot-prop": "^5.2.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ }
+ }
+ }
+ },
+ "postcss-modules-extract-imports": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz",
+ "integrity": "sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ==",
+ "requires": {
+ "postcss": "^7.0.5"
+ }
+ },
+ "postcss-modules-local-by-default": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz",
+ "integrity": "sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA==",
+ "requires": {
+ "postcss": "^7.0.6",
+ "postcss-selector-parser": "^6.0.0",
+ "postcss-value-parser": "^3.3.1"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-modules-scope": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz",
+ "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==",
+ "requires": {
+ "postcss": "^7.0.6",
+ "postcss-selector-parser": "^6.0.0"
+ }
+ },
+ "postcss-modules-values": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz",
+ "integrity": "sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w==",
+ "requires": {
+ "icss-replace-symbols": "^1.1.0",
+ "postcss": "^7.0.6"
+ }
+ },
+ "postcss-normalize-charset": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz",
+ "integrity": "sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g==",
+ "requires": {
+ "postcss": "^7.0.0"
+ }
+ },
+ "postcss-normalize-display-values": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz",
+ "integrity": "sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ==",
+ "requires": {
+ "cssnano-util-get-match": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-positions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz",
+ "integrity": "sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA==",
+ "requires": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-repeat-style": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz",
+ "integrity": "sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q==",
+ "requires": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "cssnano-util-get-match": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-string": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz",
+ "integrity": "sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA==",
+ "requires": {
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-timing-functions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz",
+ "integrity": "sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A==",
+ "requires": {
+ "cssnano-util-get-match": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-unicode": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz",
+ "integrity": "sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-url": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz",
+ "integrity": "sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA==",
+ "requires": {
+ "is-absolute-url": "^2.0.0",
+ "normalize-url": "^3.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "normalize-url": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-3.3.0.tgz",
+ "integrity": "sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg=="
+ },
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-normalize-whitespace": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz",
+ "integrity": "sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA==",
+ "requires": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-ordered-values": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz",
+ "integrity": "sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw==",
+ "requires": {
+ "cssnano-util-get-arguments": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-reduce-initial": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz",
+ "integrity": "sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "caniuse-api": "^3.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0"
+ }
+ },
+ "postcss-reduce-transforms": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz",
+ "integrity": "sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg==",
+ "requires": {
+ "cssnano-util-get-match": "^4.0.0",
+ "has": "^1.0.0",
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-safe-parser": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-4.0.2.tgz",
+ "integrity": "sha512-Uw6ekxSWNLCPesSv/cmqf2bY/77z11O7jZGPax3ycZMFU/oi2DMH9i89AdHc1tRwFg/arFoEwX0IS3LCUxJh1g==",
+ "requires": {
+ "postcss": "^7.0.26"
+ }
+ },
+ "postcss-selector-parser": {
+ "version": "6.0.5",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.5.tgz",
+ "integrity": "sha512-aFYPoYmXbZ1V6HZaSvat08M97A8HqO6Pjz+PiNpw/DhuRrC72XWAdp3hL6wusDCN31sSmcZyMGa2hZEuX+Xfhg==",
+ "requires": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ }
+ },
+ "postcss-svgo": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-4.0.3.tgz",
+ "integrity": "sha512-NoRbrcMWTtUghzuKSoIm6XV+sJdvZ7GZSc3wdBN0W19FTtp2ko8NqLsgoh/m9CzNhU3KLPvQmjIwtaNFkaFTvw==",
+ "requires": {
+ "postcss": "^7.0.0",
+ "postcss-value-parser": "^3.0.0",
+ "svgo": "^1.0.0"
+ },
+ "dependencies": {
+ "postcss-value-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
+ "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
+ }
+ }
+ },
+ "postcss-unique-selectors": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz",
+ "integrity": "sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg==",
+ "requires": {
+ "alphanum-sort": "^1.0.0",
+ "postcss": "^7.0.0",
+ "uniqs": "^2.0.0"
+ }
+ },
+ "postcss-value-parser": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz",
+ "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ=="
+ },
+ "prepend-http": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz",
+ "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc="
+ },
+ "prettier": {
+ "version": "1.19.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz",
+ "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==",
+ "optional": true
+ },
+ "pretty-error": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz",
+ "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==",
+ "requires": {
+ "lodash": "^4.17.20",
+ "renderkid": "^2.0.4"
+ }
+ },
+ "pretty-time": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz",
+ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA=="
+ },
+ "prismjs": {
+ "version": "1.23.0",
+ "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.23.0.tgz",
+ "integrity": "sha512-c29LVsqOaLbBHuIbsTxaKENh1N2EQBOHaWv7gkHN4dgRbxSREqDnDbtFJYdpPauS4YCplMSNCABQ6Eeor69bAA==",
+ "requires": {
+ "clipboard": "^2.0.0"
+ }
+ },
+ "process": {
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
+ "integrity": "sha1-czIwDoQBYb2j5podHZGn1LwW8YI="
+ },
+ "process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
+ },
+ "promise": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz",
+ "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==",
+ "requires": {
+ "asap": "~2.0.3"
+ }
+ },
+ "promise-inflight": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz",
+ "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM="
+ },
+ "proxy-addr": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz",
+ "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==",
+ "requires": {
+ "forwarded": "~0.1.2",
+ "ipaddr.js": "1.9.1"
+ }
+ },
+ "prr": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz",
+ "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY="
+ },
+ "pseudomap": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz",
+ "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM="
+ },
+ "psl": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz",
+ "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ=="
+ },
+ "public-encrypt": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/public-encrypt/-/public-encrypt-4.0.3.tgz",
+ "integrity": "sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q==",
+ "requires": {
+ "bn.js": "^4.1.0",
+ "browserify-rsa": "^4.0.0",
+ "create-hash": "^1.1.0",
+ "parse-asn1": "^5.0.0",
+ "randombytes": "^2.0.1",
+ "safe-buffer": "^5.1.2"
+ },
+ "dependencies": {
+ "bn.js": {
+ "version": "4.12.0",
+ "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz",
+ "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA=="
+ }
+ }
+ },
+ "pug": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/pug/-/pug-3.0.2.tgz",
+ "integrity": "sha512-bp0I/hiK1D1vChHh6EfDxtndHji55XP/ZJKwsRqrz6lRia6ZC2OZbdAymlxdVFwd1L70ebrVJw4/eZ79skrIaw==",
+ "requires": {
+ "pug-code-gen": "^3.0.2",
+ "pug-filters": "^4.0.0",
+ "pug-lexer": "^5.0.1",
+ "pug-linker": "^4.0.0",
+ "pug-load": "^3.0.0",
+ "pug-parser": "^6.0.0",
+ "pug-runtime": "^3.0.1",
+ "pug-strip-comments": "^2.0.0"
+ }
+ },
+ "pug-attrs": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-3.0.0.tgz",
+ "integrity": "sha512-azINV9dUtzPMFQktvTXciNAfAuVh/L/JCl0vtPCwvOA21uZrC08K/UnmrL+SXGEVc1FwzjW62+xw5S/uaLj6cA==",
+ "requires": {
+ "constantinople": "^4.0.1",
+ "js-stringify": "^1.0.2",
+ "pug-runtime": "^3.0.0"
+ }
+ },
+ "pug-code-gen": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-3.0.2.tgz",
+ "integrity": "sha512-nJMhW16MbiGRiyR4miDTQMRWDgKplnHyeLvioEJYbk1RsPI3FuA3saEP8uwnTb2nTJEKBU90NFVWJBk4OU5qyg==",
+ "requires": {
+ "constantinople": "^4.0.1",
+ "doctypes": "^1.1.0",
+ "js-stringify": "^1.0.2",
+ "pug-attrs": "^3.0.0",
+ "pug-error": "^2.0.0",
+ "pug-runtime": "^3.0.0",
+ "void-elements": "^3.1.0",
+ "with": "^7.0.0"
+ }
+ },
+ "pug-error": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-2.0.0.tgz",
+ "integrity": "sha512-sjiUsi9M4RAGHktC1drQfCr5C5eriu24Lfbt4s+7SykztEOwVZtbFk1RRq0tzLxcMxMYTBR+zMQaG07J/btayQ=="
+ },
+ "pug-filters": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-4.0.0.tgz",
+ "integrity": "sha512-yeNFtq5Yxmfz0f9z2rMXGw/8/4i1cCFecw/Q7+D0V2DdtII5UvqE12VaZ2AY7ri6o5RNXiweGH79OCq+2RQU4A==",
+ "requires": {
+ "constantinople": "^4.0.1",
+ "jstransformer": "1.0.0",
+ "pug-error": "^2.0.0",
+ "pug-walk": "^2.0.0",
+ "resolve": "^1.15.1"
+ }
+ },
+ "pug-lexer": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-5.0.1.tgz",
+ "integrity": "sha512-0I6C62+keXlZPZkOJeVam9aBLVP2EnbeDw3An+k0/QlqdwH6rv8284nko14Na7c0TtqtogfWXcRoFE4O4Ff20w==",
+ "requires": {
+ "character-parser": "^2.2.0",
+ "is-expression": "^4.0.0",
+ "pug-error": "^2.0.0"
+ }
+ },
+ "pug-linker": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-4.0.0.tgz",
+ "integrity": "sha512-gjD1yzp0yxbQqnzBAdlhbgoJL5qIFJw78juN1NpTLt/mfPJ5VgC4BvkoD3G23qKzJtIIXBbcCt6FioLSFLOHdw==",
+ "requires": {
+ "pug-error": "^2.0.0",
+ "pug-walk": "^2.0.0"
+ }
+ },
+ "pug-load": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-3.0.0.tgz",
+ "integrity": "sha512-OCjTEnhLWZBvS4zni/WUMjH2YSUosnsmjGBB1An7CsKQarYSWQ0GCVyd4eQPMFJqZ8w9xgs01QdiZXKVjk92EQ==",
+ "requires": {
+ "object-assign": "^4.1.1",
+ "pug-walk": "^2.0.0"
+ }
+ },
+ "pug-parser": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-6.0.0.tgz",
+ "integrity": "sha512-ukiYM/9cH6Cml+AOl5kETtM9NR3WulyVP2y4HOU45DyMim1IeP/OOiyEWRr6qk5I5klpsBnbuHpwKmTx6WURnw==",
+ "requires": {
+ "pug-error": "^2.0.0",
+ "token-stream": "1.0.0"
+ }
+ },
+ "pug-plain-loader": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pug-plain-loader/-/pug-plain-loader-1.1.0.tgz",
+ "integrity": "sha512-1nYgIJLaahRuHJHhzSPODV44aZfb00bO7kiJiMkke6Hj4SVZftuvx6shZ4BOokk50dJc2RSFqNUBOlus0dniFQ==",
+ "requires": {
+ "loader-utils": "^1.1.0"
+ }
+ },
+ "pug-runtime": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-3.0.1.tgz",
+ "integrity": "sha512-L50zbvrQ35TkpHwv0G6aLSuueDRwc/97XdY8kL3tOT0FmhgG7UypU3VztfV/LATAvmUfYi4wNxSajhSAeNN+Kg=="
+ },
+ "pug-strip-comments": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-2.0.0.tgz",
+ "integrity": "sha512-zo8DsDpH7eTkPHCXFeAk1xZXJbyoTfdPlNR0bK7rpOMuhBYb0f5qUVCO1xlsitYd3w5FQTK7zpNVKb3rZoUrrQ==",
+ "requires": {
+ "pug-error": "^2.0.0"
+ }
+ },
+ "pug-walk": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-2.0.0.tgz",
+ "integrity": "sha512-yYELe9Q5q9IQhuvqsZNwA5hfPkMJ8u92bQLIMcsMxf/VADjNtEYptU+inlufAFYcWdHlwNfZOEnOOQrZrcyJCQ=="
+ },
+ "pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "pumpify": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz",
+ "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==",
+ "requires": {
+ "duplexify": "^3.6.0",
+ "inherits": "^2.0.3",
+ "pump": "^2.0.0"
+ },
+ "dependencies": {
+ "pump": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz",
+ "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==",
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ }
+ }
+ },
+ "punycode": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
+ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A=="
+ },
+ "pupa": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz",
+ "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==",
+ "requires": {
+ "escape-goat": "^2.0.0"
+ }
+ },
+ "q": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz",
+ "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc="
+ },
+ "qs": {
+ "version": "6.5.2",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+ "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
+ },
+ "query-string": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz",
+ "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==",
+ "requires": {
+ "decode-uri-component": "^0.2.0",
+ "object-assign": "^4.1.0",
+ "strict-uri-encode": "^1.0.0"
+ }
+ },
+ "querystring": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.1.tgz",
+ "integrity": "sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg=="
+ },
+ "querystring-es3": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/querystring-es3/-/querystring-es3-0.2.1.tgz",
+ "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM="
+ },
+ "querystringify": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz",
+ "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ=="
+ },
+ "randombytes": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
+ "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
+ "requires": {
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "randomfill": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/randomfill/-/randomfill-1.0.4.tgz",
+ "integrity": "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw==",
+ "requires": {
+ "randombytes": "^2.0.5",
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ },
+ "raw-body": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz",
+ "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==",
+ "requires": {
+ "bytes": "3.1.0",
+ "http-errors": "1.7.2",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "dependencies": {
+ "bytes": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz",
+ "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg=="
+ }
+ }
+ },
+ "rc": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+ "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.7",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+ "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "readdirp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz",
+ "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==",
+ "requires": {
+ "graceful-fs": "^4.1.11",
+ "micromatch": "^3.1.10",
+ "readable-stream": "^2.0.2"
+ }
+ },
+ "reduce": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/reduce/-/reduce-1.0.2.tgz",
+ "integrity": "sha512-xX7Fxke/oHO5IfZSk77lvPa/7bjMh9BuCk4OOoX5XTXrM7s0Z+MkPfSDfz0q7r91BhhGSs8gii/VEN/7zhCPpQ==",
+ "requires": {
+ "object-keys": "^1.1.0"
+ }
+ },
+ "regenerate": {
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz",
+ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A=="
+ },
+ "regenerate-unicode-properties": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz",
+ "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==",
+ "requires": {
+ "regenerate": "^1.4.0"
+ }
+ },
+ "regenerator-runtime": {
+ "version": "0.13.7",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz",
+ "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew=="
+ },
+ "regenerator-transform": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz",
+ "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==",
+ "requires": {
+ "@babel/runtime": "^7.8.4"
+ }
+ },
+ "regex-not": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+ "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+ "requires": {
+ "extend-shallow": "^3.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "regexp.prototype.flags": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.3.1.tgz",
+ "integrity": "sha512-JiBdRBq91WlY7uRJ0ds7R+dU02i6LKi8r3BuQhNXn+kmeLN+EfHhfjqMRis1zJxnlu88hq/4dx0P2OP3APRTOA==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ }
+ },
+ "regexpu-core": {
+ "version": "4.7.1",
+ "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz",
+ "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==",
+ "requires": {
+ "regenerate": "^1.4.0",
+ "regenerate-unicode-properties": "^8.2.0",
+ "regjsgen": "^0.5.1",
+ "regjsparser": "^0.6.4",
+ "unicode-match-property-ecmascript": "^1.0.4",
+ "unicode-match-property-value-ecmascript": "^1.2.0"
+ }
+ },
+ "registry-auth-token": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.1.tgz",
+ "integrity": "sha512-6gkSb4U6aWJB4SF2ZvLb76yCBjcvufXBqvvEx1HbmKPkutswjW1xNVRY0+daljIYRbogN7O0etYSlbiaEQyMyw==",
+ "requires": {
+ "rc": "^1.2.8"
+ }
+ },
+ "registry-url": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz",
+ "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==",
+ "requires": {
+ "rc": "^1.2.8"
+ }
+ },
+ "regjsgen": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz",
+ "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A=="
+ },
+ "regjsparser": {
+ "version": "0.6.9",
+ "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.9.tgz",
+ "integrity": "sha512-ZqbNRz1SNjLAiYuwY0zoXW8Ne675IX5q+YHioAGbCw4X96Mjl2+dcX9B2ciaeyYjViDAfvIjFpQjJgLttTEERQ==",
+ "requires": {
+ "jsesc": "~0.5.0"
+ },
+ "dependencies": {
+ "jsesc": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
+ "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0="
+ }
+ }
+ },
+ "relateurl": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz",
+ "integrity": "sha1-VNvzd+UUQKypCkzSdGANP/LYiKk="
+ },
+ "remove-trailing-separator": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz",
+ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8="
+ },
+ "renderkid": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.5.tgz",
+ "integrity": "sha512-ccqoLg+HLOHq1vdfYNm4TBeaCDIi1FLt3wGojTDSvdewUv65oTmI3cnT2E4hRjl1gzKZIPK+KZrXzlUYKnR+vQ==",
+ "requires": {
+ "css-select": "^2.0.2",
+ "dom-converter": "^0.2",
+ "htmlparser2": "^3.10.1",
+ "lodash": "^4.17.20",
+ "strip-ansi": "^3.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8="
+ },
+ "css-select": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz",
+ "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==",
+ "requires": {
+ "boolbase": "^1.0.0",
+ "css-what": "^3.2.1",
+ "domutils": "^1.7.0",
+ "nth-check": "^1.0.2"
+ }
+ },
+ "css-what": {
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz",
+ "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ=="
+ },
+ "dom-serializer": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz",
+ "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==",
+ "requires": {
+ "domelementtype": "^2.0.1",
+ "entities": "^2.0.0"
+ }
+ },
+ "domhandler": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz",
+ "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==",
+ "requires": {
+ "domelementtype": "1"
+ },
+ "dependencies": {
+ "domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ }
+ }
+ },
+ "domutils": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz",
+ "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==",
+ "requires": {
+ "dom-serializer": "0",
+ "domelementtype": "1"
+ },
+ "dependencies": {
+ "domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ }
+ }
+ },
+ "htmlparser2": {
+ "version": "3.10.1",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.10.1.tgz",
+ "integrity": "sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ==",
+ "requires": {
+ "domelementtype": "^1.3.1",
+ "domhandler": "^2.3.0",
+ "domutils": "^1.5.1",
+ "entities": "^1.1.1",
+ "inherits": "^2.0.1",
+ "readable-stream": "^3.1.1"
+ },
+ "dependencies": {
+ "domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ },
+ "entities": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz",
+ "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w=="
+ }
+ }
+ },
+ "nth-check": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
+ "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
+ "requires": {
+ "boolbase": "~1.0.0"
+ }
+ },
+ "readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ }
+ }
+ },
+ "repeat-element": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz",
+ "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ=="
+ },
+ "repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc="
+ },
+ "request": {
+ "version": "2.88.2",
+ "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz",
+ "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==",
+ "requires": {
+ "aws-sign2": "~0.7.0",
+ "aws4": "^1.8.0",
+ "caseless": "~0.12.0",
+ "combined-stream": "~1.0.6",
+ "extend": "~3.0.2",
+ "forever-agent": "~0.6.1",
+ "form-data": "~2.3.2",
+ "har-validator": "~5.1.3",
+ "http-signature": "~1.2.0",
+ "is-typedarray": "~1.0.0",
+ "isstream": "~0.1.2",
+ "json-stringify-safe": "~5.0.1",
+ "mime-types": "~2.1.19",
+ "oauth-sign": "~0.9.0",
+ "performance-now": "^2.1.0",
+ "qs": "~6.5.2",
+ "safe-buffer": "^5.1.2",
+ "tough-cookie": "~2.5.0",
+ "tunnel-agent": "^0.6.0",
+ "uuid": "^3.3.2"
+ }
+ },
+ "require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I="
+ },
+ "require-main-filename": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz",
+ "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg=="
+ },
+ "requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8="
+ },
+ "resolve": {
+ "version": "1.20.0",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz",
+ "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==",
+ "requires": {
+ "is-core-module": "^2.2.0",
+ "path-parse": "^1.0.6"
+ }
+ },
+ "resolve-cwd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-2.0.0.tgz",
+ "integrity": "sha1-AKn3OHVW4nA46uIyyqNypqWbZlo=",
+ "requires": {
+ "resolve-from": "^3.0.0"
+ }
+ },
+ "resolve-from": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-3.0.0.tgz",
+ "integrity": "sha1-six699nWiBvItuZTM17rywoYh0g="
+ },
+ "resolve-url": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+ "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo="
+ },
+ "responselike": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz",
+ "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=",
+ "requires": {
+ "lowercase-keys": "^1.0.0"
+ }
+ },
+ "ret": {
+ "version": "0.1.15",
+ "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg=="
+ },
+ "retry": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
+ "integrity": "sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs="
+ },
+ "rgb-regex": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/rgb-regex/-/rgb-regex-1.0.1.tgz",
+ "integrity": "sha1-wODWiC3w4jviVKR16O3UGRX+rrE="
+ },
+ "rgba-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/rgba-regex/-/rgba-regex-1.0.0.tgz",
+ "integrity": "sha1-QzdOLiyglosO8VI0YLfXMP8i7rM="
+ },
+ "rimraf": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
+ "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "ripemd160": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz",
+ "integrity": "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA==",
+ "requires": {
+ "hash-base": "^3.0.0",
+ "inherits": "^2.0.1"
+ }
+ },
+ "run-queue": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/run-queue/-/run-queue-1.0.3.tgz",
+ "integrity": "sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec=",
+ "requires": {
+ "aproba": "^1.1.1"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "safe-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+ "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+ "requires": {
+ "ret": "~0.1.10"
+ }
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "sax": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
+ },
+ "schema-utils": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz",
+ "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==",
+ "requires": {
+ "@types/json-schema": "^7.0.5",
+ "ajv": "^6.12.4",
+ "ajv-keywords": "^3.5.2"
+ }
+ },
+ "section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ }
+ },
+ "select": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/select/-/select-1.1.2.tgz",
+ "integrity": "sha1-DnNQrN7ICxEIUoeG7B1EGNEbOW0=",
+ "optional": true
+ },
+ "select-hose": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
+ "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo="
+ },
+ "selfsigned": {
+ "version": "1.10.8",
+ "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz",
+ "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==",
+ "requires": {
+ "node-forge": "^0.10.0"
+ }
+ },
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw=="
+ },
+ "semver-diff": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz",
+ "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==",
+ "requires": {
+ "semver": "^6.3.0"
+ }
+ },
+ "send": {
+ "version": "0.17.1",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz",
+ "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==",
+ "requires": {
+ "debug": "2.6.9",
+ "depd": "~1.1.2",
+ "destroy": "~1.0.4",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "~1.7.2",
+ "mime": "1.6.0",
+ "ms": "2.1.1",
+ "on-finished": "~2.3.0",
+ "range-parser": "~1.2.1",
+ "statuses": "~1.5.0"
+ },
+ "dependencies": {
+ "mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
+ },
+ "ms": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
+ "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
+ }
+ }
+ },
+ "serialize-javascript": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz",
+ "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==",
+ "requires": {
+ "randombytes": "^2.1.0"
+ }
+ },
+ "serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=",
+ "requires": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.17",
+ "parseurl": "~1.3.2"
+ },
+ "dependencies": {
+ "http-errors": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ }
+ },
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ },
+ "setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
+ }
+ }
+ },
+ "serve-static": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz",
+ "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==",
+ "requires": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.17.1"
+ }
+ },
+ "set-blocking": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz",
+ "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc="
+ },
+ "set-value": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+ "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-extendable": "^0.1.1",
+ "is-plain-object": "^2.0.3",
+ "split-string": "^3.0.1"
+ }
+ },
+ "setimmediate": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz",
+ "integrity": "sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU="
+ },
+ "setprototypeof": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz",
+ "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw=="
+ },
+ "sha.js": {
+ "version": "2.4.11",
+ "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz",
+ "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==",
+ "requires": {
+ "inherits": "^2.0.1",
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "shebang-command": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
+ "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=",
+ "requires": {
+ "shebang-regex": "^1.0.0"
+ }
+ },
+ "shebang-regex": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
+ "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM="
+ },
+ "signal-exit": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz",
+ "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA=="
+ },
+ "simple-swizzle": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
+ "integrity": "sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo=",
+ "requires": {
+ "is-arrayish": "^0.3.1"
+ },
+ "dependencies": {
+ "is-arrayish": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
+ "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="
+ }
+ }
+ },
+ "sitemap": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-3.2.2.tgz",
+ "integrity": "sha512-TModL/WU4m2q/mQcrDgNANn0P4LwprM9MMvG4hu5zP4c6IIKs2YLTu6nXXnNr8ODW/WFtxKggiJ1EGn2W0GNmg==",
+ "requires": {
+ "lodash.chunk": "^4.2.0",
+ "lodash.padstart": "^4.6.1",
+ "whatwg-url": "^7.0.0",
+ "xmlbuilder": "^13.0.0"
+ }
+ },
+ "slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A=="
+ },
+ "smoothscroll-polyfill": {
+ "version": "0.4.4",
+ "resolved": "https://registry.npmjs.org/smoothscroll-polyfill/-/smoothscroll-polyfill-0.4.4.tgz",
+ "integrity": "sha512-TK5ZA9U5RqCwMpfoMq/l1mrH0JAR7y7KRvOBx0n2869aLxch+gT9GhN3yUfjiw+d/DiF1mKo14+hd62JyMmoBg=="
+ },
+ "snapdragon": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+ "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+ "requires": {
+ "base": "^0.11.1",
+ "debug": "^2.2.0",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "map-cache": "^0.2.2",
+ "source-map": "^0.5.6",
+ "source-map-resolve": "^0.5.0",
+ "use": "^3.1.0"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ }
+ },
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+ }
+ }
+ },
+ "snapdragon-node": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+ "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+ "requires": {
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.0",
+ "snapdragon-util": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ }
+ }
+ },
+ "snapdragon-util": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+ "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+ "requires": {
+ "kind-of": "^3.2.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "sockjs": {
+ "version": "0.3.21",
+ "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.21.tgz",
+ "integrity": "sha512-DhbPFGpxjc6Z3I+uX07Id5ZO2XwYsWOrYjaSeieES78cq+JaJvVe5q/m1uvjIQhXinhIeCFRH6JgXe+mvVMyXw==",
+ "requires": {
+ "faye-websocket": "^0.11.3",
+ "uuid": "^3.4.0",
+ "websocket-driver": "^0.7.4"
+ }
+ },
+ "sockjs-client": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/sockjs-client/-/sockjs-client-1.5.1.tgz",
+ "integrity": "sha512-VnVAb663fosipI/m6pqRXakEOw7nvd7TUgdr3PlR/8V2I95QIdwT8L4nMxhyU8SmDBHYXU1TOElaKOmKLfYzeQ==",
+ "requires": {
+ "debug": "^3.2.6",
+ "eventsource": "^1.0.7",
+ "faye-websocket": "^0.11.3",
+ "inherits": "^2.0.4",
+ "json3": "^3.3.3",
+ "url-parse": "^1.5.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ }
+ }
+ },
+ "sort-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz",
+ "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=",
+ "requires": {
+ "is-plain-obj": "^1.0.0"
+ }
+ },
+ "source-list-map": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/source-list-map/-/source-list-map-2.0.1.tgz",
+ "integrity": "sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw=="
+ },
+ "source-map": {
+ "version": "0.7.3",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz",
+ "integrity": "sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ=="
+ },
+ "source-map-resolve": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
+ "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
+ "requires": {
+ "atob": "^2.1.2",
+ "decode-uri-component": "^0.2.0",
+ "resolve-url": "^0.2.1",
+ "source-map-url": "^0.4.0",
+ "urix": "^0.1.0"
+ }
+ },
+ "source-map-support": {
+ "version": "0.5.19",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz",
+ "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==",
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "source-map-url": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz",
+ "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw=="
+ },
+ "spdy": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz",
+ "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==",
+ "requires": {
+ "debug": "^4.1.0",
+ "handle-thing": "^2.0.0",
+ "http-deceiver": "^1.2.7",
+ "select-hose": "^2.0.0",
+ "spdy-transport": "^3.0.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ }
+ }
+ },
+ "spdy-transport": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz",
+ "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==",
+ "requires": {
+ "debug": "^4.1.0",
+ "detect-node": "^2.0.4",
+ "hpack.js": "^2.1.6",
+ "obuf": "^1.1.2",
+ "readable-stream": "^3.0.6",
+ "wbuf": "^1.7.3"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "readable-stream": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz",
+ "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ }
+ }
+ },
+ "split-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+ "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+ "requires": {
+ "extend-shallow": "^3.0.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw="
+ },
+ "sshpk": {
+ "version": "1.16.1",
+ "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz",
+ "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==",
+ "requires": {
+ "asn1": "~0.2.3",
+ "assert-plus": "^1.0.0",
+ "bcrypt-pbkdf": "^1.0.0",
+ "dashdash": "^1.12.0",
+ "ecc-jsbn": "~0.1.1",
+ "getpass": "^0.1.1",
+ "jsbn": "~0.1.0",
+ "safer-buffer": "^2.0.2",
+ "tweetnacl": "~0.14.0"
+ }
+ },
+ "ssri": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/ssri/-/ssri-6.0.2.tgz",
+ "integrity": "sha512-cepbSq/neFK7xB6A50KHN0xHDotYzq58wWCa5LeWqnPrHG8GzfEjO/4O8kpmcGW+oaxkvhEJCWgbgNk4/ZV93Q==",
+ "requires": {
+ "figgy-pudding": "^3.5.1"
+ }
+ },
+ "stable": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz",
+ "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w=="
+ },
+ "stack-utils": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-1.0.5.tgz",
+ "integrity": "sha512-KZiTzuV3CnSnSvgMRrARVCj+Ht7rMbauGDK0LdVFRGyenwdylpajAp4Q0i6SX8rEmbTpMMf6ryq2gb8pPq2WgQ==",
+ "requires": {
+ "escape-string-regexp": "^2.0.0"
+ },
+ "dependencies": {
+ "escape-string-regexp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
+ "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w=="
+ }
+ }
+ },
+ "static-extend": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+ "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+ "requires": {
+ "define-property": "^0.2.5",
+ "object-copy": "^0.1.0"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ }
+ },
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+ }
+ }
+ },
+ "statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow="
+ },
+ "std-env": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-2.3.0.tgz",
+ "integrity": "sha512-4qT5B45+Kjef2Z6pE0BkskzsH0GO7GrND0wGlTM1ioUe3v0dGYx9ZJH0Aro/YyA8fqQ5EyIKDRjZojJYMFTflw==",
+ "requires": {
+ "ci-info": "^3.0.0"
+ },
+ "dependencies": {
+ "ci-info": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.1.1.tgz",
+ "integrity": "sha512-kdRWLBIJwdsYJWYJFtAFFYxybguqeF91qpZaggjG5Nf8QKdizFG2hjqvaTXbxFIcYbSaD74KpAXv6BSm17DHEQ=="
+ }
+ }
+ },
+ "stream-browserify": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/stream-browserify/-/stream-browserify-2.0.2.tgz",
+ "integrity": "sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg==",
+ "requires": {
+ "inherits": "~2.0.1",
+ "readable-stream": "^2.0.2"
+ }
+ },
+ "stream-each": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/stream-each/-/stream-each-1.2.3.tgz",
+ "integrity": "sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw==",
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "stream-shift": "^1.0.0"
+ }
+ },
+ "stream-http": {
+ "version": "2.8.3",
+ "resolved": "https://registry.npmjs.org/stream-http/-/stream-http-2.8.3.tgz",
+ "integrity": "sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw==",
+ "requires": {
+ "builtin-status-codes": "^3.0.0",
+ "inherits": "^2.0.1",
+ "readable-stream": "^2.3.6",
+ "to-arraybuffer": "^1.0.0",
+ "xtend": "^4.0.0"
+ }
+ },
+ "stream-shift": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz",
+ "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ=="
+ },
+ "strict-uri-encode": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz",
+ "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM="
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "string-width": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
+ "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.0"
+ }
+ },
+ "string.prototype.trimend": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz",
+ "integrity": "sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ }
+ },
+ "string.prototype.trimstart": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz",
+ "integrity": "sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3"
+ }
+ },
+ "strip-ansi": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
+ "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
+ "requires": {
+ "ansi-regex": "^5.0.0"
+ }
+ },
+ "strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha1-5SEekiQ2n7uB1jOi8ABE3IztrZI="
+ },
+ "strip-eof": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
+ "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8="
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
+ "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo="
+ },
+ "stylehacks": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-4.0.3.tgz",
+ "integrity": "sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "postcss": "^7.0.0",
+ "postcss-selector-parser": "^3.0.0"
+ },
+ "dependencies": {
+ "postcss-selector-parser": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz",
+ "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==",
+ "requires": {
+ "dot-prop": "^5.2.0",
+ "indexes-of": "^1.0.1",
+ "uniq": "^1.0.1"
+ }
+ }
+ }
+ },
+ "stylus": {
+ "version": "0.54.8",
+ "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz",
+ "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==",
+ "requires": {
+ "css-parse": "~2.0.0",
+ "debug": "~3.1.0",
+ "glob": "^7.1.6",
+ "mkdirp": "~1.0.4",
+ "safer-buffer": "^2.1.2",
+ "sax": "~1.2.4",
+ "semver": "^6.3.0",
+ "source-map": "^0.7.3"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+ "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ }
+ }
+ },
+ "stylus-loader": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/stylus-loader/-/stylus-loader-3.0.2.tgz",
+ "integrity": "sha512-+VomPdZ6a0razP+zinir61yZgpw2NfljeSsdUF5kJuEzlo3khXhY19Fn6l8QQz1GRJGtMCo8nG5C04ePyV7SUA==",
+ "requires": {
+ "loader-utils": "^1.0.2",
+ "lodash.clonedeep": "^4.5.0",
+ "when": "~3.6.x"
+ }
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "svg-tags": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz",
+ "integrity": "sha1-WPcc7jvVGbWdSyqEO2x95krAR2Q="
+ },
+ "svgo": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/svgo/-/svgo-1.3.2.tgz",
+ "integrity": "sha512-yhy/sQYxR5BkC98CY7o31VGsg014AKLEPxdfhora76l36hD9Rdy5NZA/Ocn6yayNPgSamYdtX2rFJdcv07AYVw==",
+ "requires": {
+ "chalk": "^2.4.1",
+ "coa": "^2.0.2",
+ "css-select": "^2.0.0",
+ "css-select-base-adapter": "^0.1.1",
+ "css-tree": "1.0.0-alpha.37",
+ "csso": "^4.0.2",
+ "js-yaml": "^3.13.1",
+ "mkdirp": "~0.5.1",
+ "object.values": "^1.1.0",
+ "sax": "~1.2.4",
+ "stable": "^0.1.8",
+ "unquote": "~1.1.1",
+ "util.promisify": "~1.0.0"
+ },
+ "dependencies": {
+ "css-select": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-2.1.0.tgz",
+ "integrity": "sha512-Dqk7LQKpwLoH3VovzZnkzegqNSuAziQyNZUcrdDM401iY+R5NkGBXGmtO05/yaXQziALuPogeG0b7UAgjnTJTQ==",
+ "requires": {
+ "boolbase": "^1.0.0",
+ "css-what": "^3.2.1",
+ "domutils": "^1.7.0",
+ "nth-check": "^1.0.2"
+ }
+ },
+ "css-what": {
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz",
+ "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ=="
+ },
+ "dom-serializer": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz",
+ "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==",
+ "requires": {
+ "domelementtype": "^2.0.1",
+ "entities": "^2.0.0"
+ }
+ },
+ "domutils": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz",
+ "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==",
+ "requires": {
+ "dom-serializer": "0",
+ "domelementtype": "1"
+ },
+ "dependencies": {
+ "domelementtype": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz",
+ "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w=="
+ }
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "nth-check": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-1.0.2.tgz",
+ "integrity": "sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg==",
+ "requires": {
+ "boolbase": "~1.0.0"
+ }
+ }
+ }
+ },
+ "tapable": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
+ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA=="
+ },
+ "term-size": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz",
+ "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg=="
+ },
+ "terser": {
+ "version": "4.8.0",
+ "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz",
+ "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==",
+ "requires": {
+ "commander": "^2.20.0",
+ "source-map": "~0.6.1",
+ "source-map-support": "~0.5.12"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "terser-webpack-plugin": {
+ "version": "1.4.5",
+ "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz",
+ "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==",
+ "requires": {
+ "cacache": "^12.0.2",
+ "find-cache-dir": "^2.1.0",
+ "is-wsl": "^1.1.0",
+ "schema-utils": "^1.0.0",
+ "serialize-javascript": "^4.0.0",
+ "source-map": "^0.6.1",
+ "terser": "^4.1.2",
+ "webpack-sources": "^1.4.0",
+ "worker-farm": "^1.7.0"
+ },
+ "dependencies": {
+ "find-cache-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz",
+ "integrity": "sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ==",
+ "requires": {
+ "commondir": "^1.0.1",
+ "make-dir": "^2.0.0",
+ "pkg-dir": "^3.0.0"
+ }
+ },
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "requires": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU="
+ },
+ "pkg-dir": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz",
+ "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==",
+ "requires": {
+ "find-up": "^3.0.0"
+ }
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ="
+ },
+ "through": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
+ "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU="
+ },
+ "through2": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
+ "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
+ "requires": {
+ "readable-stream": "~2.3.6",
+ "xtend": "~4.0.1"
+ }
+ },
+ "thunky": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz",
+ "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA=="
+ },
+ "timers-browserify": {
+ "version": "2.0.12",
+ "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz",
+ "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==",
+ "requires": {
+ "setimmediate": "^1.0.4"
+ }
+ },
+ "timsort": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/timsort/-/timsort-0.3.0.tgz",
+ "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q="
+ },
+ "tiny-cookie": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz",
+ "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg=="
+ },
+ "tiny-emitter": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/tiny-emitter/-/tiny-emitter-2.1.0.tgz",
+ "integrity": "sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q==",
+ "optional": true
+ },
+ "to-arraybuffer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz",
+ "integrity": "sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M="
+ },
+ "to-factory": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-factory/-/to-factory-1.0.0.tgz",
+ "integrity": "sha1-hzivi9lxIK0dQEeXKtpVY7+UebE="
+ },
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4="
+ },
+ "to-object-path": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+ "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "to-readable-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz",
+ "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q=="
+ },
+ "to-regex": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+ "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+ "requires": {
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "regex-not": "^1.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "to-regex-range": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+ "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+ "requires": {
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1"
+ }
+ },
+ "toidentifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+ "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw=="
+ },
+ "token-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-1.0.0.tgz",
+ "integrity": "sha1-zCAOqyYT9BZtJ/+a/HylbUnfbrQ="
+ },
+ "toml": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/toml/-/toml-3.0.0.tgz",
+ "integrity": "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w=="
+ },
+ "toposort": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/toposort/-/toposort-1.0.7.tgz",
+ "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk="
+ },
+ "tough-cookie": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz",
+ "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==",
+ "requires": {
+ "psl": "^1.1.28",
+ "punycode": "^2.1.1"
+ }
+ },
+ "tr46": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-1.0.1.tgz",
+ "integrity": "sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=",
+ "requires": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "tty-browserify": {
+ "version": "0.0.0",
+ "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz",
+ "integrity": "sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY="
+ },
+ "tunnel-agent": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
+ "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
+ "requires": {
+ "safe-buffer": "^5.0.1"
+ }
+ },
+ "tweetnacl": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
+ "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q="
+ },
+ "type-fest": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
+ "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA=="
+ },
+ "type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "requires": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ }
+ },
+ "typedarray": {
+ "version": "0.0.6",
+ "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
+ "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c="
+ },
+ "typedarray-to-buffer": {
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz",
+ "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==",
+ "requires": {
+ "is-typedarray": "^1.0.0"
+ }
+ },
+ "uc.micro": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz",
+ "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA=="
+ },
+ "uglify-js": {
+ "version": "3.4.10",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.4.10.tgz",
+ "integrity": "sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw==",
+ "requires": {
+ "commander": "~2.19.0",
+ "source-map": "~0.6.1"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.19.0.tgz",
+ "integrity": "sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg=="
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "unbox-primitive": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.1.tgz",
+ "integrity": "sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw==",
+ "requires": {
+ "function-bind": "^1.1.1",
+ "has-bigints": "^1.0.1",
+ "has-symbols": "^1.0.2",
+ "which-boxed-primitive": "^1.0.2"
+ }
+ },
+ "unicode-canonical-property-names-ecmascript": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz",
+ "integrity": "sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ=="
+ },
+ "unicode-match-property-ecmascript": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz",
+ "integrity": "sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg==",
+ "requires": {
+ "unicode-canonical-property-names-ecmascript": "^1.0.4",
+ "unicode-property-aliases-ecmascript": "^1.0.4"
+ }
+ },
+ "unicode-match-property-value-ecmascript": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz",
+ "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ=="
+ },
+ "unicode-property-aliases-ecmascript": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz",
+ "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg=="
+ },
+ "union-value": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+ "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+ "requires": {
+ "arr-union": "^3.1.0",
+ "get-value": "^2.0.6",
+ "is-extendable": "^0.1.1",
+ "set-value": "^2.0.1"
+ }
+ },
+ "uniq": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/uniq/-/uniq-1.0.1.tgz",
+ "integrity": "sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8="
+ },
+ "uniqs": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/uniqs/-/uniqs-2.0.0.tgz",
+ "integrity": "sha1-/+3ks2slKQaW5uFl1KWe25mOawI="
+ },
+ "unique-filename": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz",
+ "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==",
+ "requires": {
+ "unique-slug": "^2.0.0"
+ }
+ },
+ "unique-slug": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz",
+ "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==",
+ "requires": {
+ "imurmurhash": "^0.1.4"
+ }
+ },
+ "unique-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
+ "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
+ "requires": {
+ "crypto-random-string": "^2.0.0"
+ }
+ },
+ "universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
+ },
+ "unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
+ },
+ "unquote": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/unquote/-/unquote-1.1.1.tgz",
+ "integrity": "sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ="
+ },
+ "unset-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+ "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+ "requires": {
+ "has-value": "^0.3.1",
+ "isobject": "^3.0.0"
+ },
+ "dependencies": {
+ "has-value": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+ "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+ "requires": {
+ "get-value": "^2.0.3",
+ "has-values": "^0.1.4",
+ "isobject": "^2.0.0"
+ },
+ "dependencies": {
+ "isobject": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+ "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+ "requires": {
+ "isarray": "1.0.0"
+ }
+ }
+ }
+ },
+ "has-values": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+ "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E="
+ }
+ }
+ },
+ "upath": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz",
+ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg=="
+ },
+ "update-notifier": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz",
+ "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==",
+ "requires": {
+ "boxen": "^4.2.0",
+ "chalk": "^3.0.0",
+ "configstore": "^5.0.1",
+ "has-yarn": "^2.1.0",
+ "import-lazy": "^2.1.0",
+ "is-ci": "^2.0.0",
+ "is-installed-globally": "^0.3.1",
+ "is-npm": "^4.0.0",
+ "is-yarn-global": "^0.3.0",
+ "latest-version": "^5.0.0",
+ "pupa": "^2.0.1",
+ "semver-diff": "^3.1.1",
+ "xdg-basedir": "^4.0.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "chalk": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+ "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "upper-case": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/upper-case/-/upper-case-1.1.3.tgz",
+ "integrity": "sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg="
+ },
+ "uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "requires": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "urix": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+ "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI="
+ },
+ "url": {
+ "version": "0.11.0",
+ "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz",
+ "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=",
+ "requires": {
+ "punycode": "1.3.2",
+ "querystring": "0.2.0"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz",
+ "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0="
+ },
+ "querystring": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz",
+ "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA="
+ }
+ }
+ },
+ "url-loader": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-1.1.2.tgz",
+ "integrity": "sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg==",
+ "requires": {
+ "loader-utils": "^1.1.0",
+ "mime": "^2.0.3",
+ "schema-utils": "^1.0.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "url-parse": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.1.tgz",
+ "integrity": "sha512-HOfCOUJt7iSYzEx/UqgtwKRMC6EU91NFhsCHMv9oM03VJcVo2Qrp8T8kI9D7amFf1cu+/3CEhgb3rF9zL7k85Q==",
+ "requires": {
+ "querystringify": "^2.1.1",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "url-parse-lax": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz",
+ "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=",
+ "requires": {
+ "prepend-http": "^2.0.0"
+ }
+ },
+ "use": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+ "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ=="
+ },
+ "util": {
+ "version": "0.11.1",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.11.1.tgz",
+ "integrity": "sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ==",
+ "requires": {
+ "inherits": "2.0.3"
+ },
+ "dependencies": {
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
+ }
+ }
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
+ },
+ "util.promisify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.1.tgz",
+ "integrity": "sha512-g9JpC/3He3bm38zsLupWryXHoEcS22YHthuPQSJdMy6KNrzIRzWqcsHzD/WUnqe45whVou4VIsPew37DoXWNrA==",
+ "requires": {
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.17.2",
+ "has-symbols": "^1.0.1",
+ "object.getownpropertydescriptors": "^2.1.0"
+ }
+ },
+ "utila": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz",
+ "integrity": "sha1-ihagXURWV6Oupe7MWxKk+lN5dyw="
+ },
+ "utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
+ },
+ "uuid": {
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
+ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A=="
+ },
+ "v-runtime-template": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/v-runtime-template/-/v-runtime-template-1.10.0.tgz",
+ "integrity": "sha512-WLlq9jUepSfUrMEenw3mn7FDXX6hhbl11JjC1OKhwLzifHzVrY5a696TUHDPyj9jke3GGnR7b+2T3od/RL5cww=="
+ },
+ "vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
+ },
+ "vendors": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/vendors/-/vendors-1.0.4.tgz",
+ "integrity": "sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w=="
+ },
+ "verror": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
+ "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
+ "requires": {
+ "assert-plus": "^1.0.0",
+ "core-util-is": "1.0.2",
+ "extsprintf": "^1.2.0"
+ }
+ },
+ "vm-browserify": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vm-browserify/-/vm-browserify-1.1.2.tgz",
+ "integrity": "sha512-2ham8XPWTONajOR0ohOKOHXkm3+gaBmGut3SRuu75xLd/RRaY6vqgh8NBYYk7+RW3u5AtzPQZG8F10LHkl0lAQ=="
+ },
+ "void-elements": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz",
+ "integrity": "sha1-YU9/v42AHwu18GYfWy9XhXUOTwk="
+ },
+ "vue": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz",
+ "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg=="
+ },
+ "vue-hot-reload-api": {
+ "version": "2.3.4",
+ "resolved": "https://registry.npmjs.org/vue-hot-reload-api/-/vue-hot-reload-api-2.3.4.tgz",
+ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog=="
+ },
+ "vue-loader": {
+ "version": "15.9.6",
+ "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.6.tgz",
+ "integrity": "sha512-j0cqiLzwbeImIC6nVIby2o/ABAWhlppyL/m5oJ67R5MloP0hj/DtFgb0Zmq3J9CG7AJ+AXIvHVnJAPBvrLyuDg==",
+ "requires": {
+ "@vue/component-compiler-utils": "^3.1.0",
+ "hash-sum": "^1.0.2",
+ "loader-utils": "^1.1.0",
+ "vue-hot-reload-api": "^2.3.0",
+ "vue-style-loader": "^4.1.0"
+ }
+ },
+ "vue-router": {
+ "version": "3.5.1",
+ "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.5.1.tgz",
+ "integrity": "sha512-RRQNLT8Mzr8z7eL4p7BtKvRaTSGdCbTy2+Mm5HTJvLGYSSeG9gDzNasJPP/yOYKLy+/cLG/ftrqq5fvkFwBJEw=="
+ },
+ "vue-server-renderer": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz",
+ "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==",
+ "requires": {
+ "chalk": "^1.1.3",
+ "hash-sum": "^1.0.2",
+ "he": "^1.1.0",
+ "lodash.template": "^4.5.0",
+ "lodash.uniq": "^4.5.0",
+ "resolve": "^1.2.0",
+ "serialize-javascript": "^3.1.0",
+ "source-map": "0.5.6"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8="
+ },
+ "ansi-styles": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+ "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4="
+ },
+ "chalk": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+ "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+ "requires": {
+ "ansi-styles": "^2.2.1",
+ "escape-string-regexp": "^1.0.2",
+ "has-ansi": "^2.0.0",
+ "strip-ansi": "^3.0.0",
+ "supports-color": "^2.0.0"
+ }
+ },
+ "serialize-javascript": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz",
+ "integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==",
+ "requires": {
+ "randombytes": "^2.1.0"
+ }
+ },
+ "source-map": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz",
+ "integrity": "sha1-dc449SvwczxafwwRjYEzSiu19BI="
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+ "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc="
+ }
+ }
+ },
+ "vue-style-loader": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/vue-style-loader/-/vue-style-loader-4.1.3.tgz",
+ "integrity": "sha512-sFuh0xfbtpRlKfm39ss/ikqs9AbKCoXZBpHeVZ8Tx650o0k0q/YCM7FRvigtxpACezfq6af+a7JeqVTWvncqDg==",
+ "requires": {
+ "hash-sum": "^1.0.2",
+ "loader-utils": "^1.0.2"
+ }
+ },
+ "vue-template-compiler": {
+ "version": "2.6.12",
+ "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz",
+ "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==",
+ "requires": {
+ "de-indent": "^1.0.2",
+ "he": "^1.1.0"
+ }
+ },
+ "vue-template-es2015-compiler": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.9.1.tgz",
+ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw=="
+ },
+ "vuepress": {
+ "version": "1.8.2",
+ "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.8.2.tgz",
+ "integrity": "sha512-BU1lUDwsA3ghf7a9ga4dsf0iTc++Z/l7BR1kUagHWVBHw7HNRgRDfAZBDDQXhllMILVToIxaTifpne9mSi94OA==",
+ "requires": {
+ "@vuepress/core": "1.8.2",
+ "@vuepress/theme-default": "1.8.2",
+ "cac": "^6.5.6",
+ "envinfo": "^7.2.0",
+ "opencollective-postinstall": "^2.0.2",
+ "update-notifier": "^4.0.0"
+ }
+ },
+ "vuepress-html-webpack-plugin": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/vuepress-html-webpack-plugin/-/vuepress-html-webpack-plugin-3.2.0.tgz",
+ "integrity": "sha512-BebAEl1BmWlro3+VyDhIOCY6Gef2MCBllEVAP3NUAtMguiyOwo/dClbwJ167WYmcxHJKLl7b0Chr9H7fpn1d0A==",
+ "requires": {
+ "html-minifier": "^3.2.3",
+ "loader-utils": "^0.2.16",
+ "lodash": "^4.17.3",
+ "pretty-error": "^2.0.2",
+ "tapable": "^1.0.0",
+ "toposort": "^1.0.0",
+ "util.promisify": "1.0.0"
+ },
+ "dependencies": {
+ "big.js": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz",
+ "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q=="
+ },
+ "emojis-list": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz",
+ "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k="
+ },
+ "json5": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz",
+ "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE="
+ },
+ "loader-utils": {
+ "version": "0.2.17",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-0.2.17.tgz",
+ "integrity": "sha1-+G5jdNQyBabmxg6RlvF8Apm/s0g=",
+ "requires": {
+ "big.js": "^3.1.3",
+ "emojis-list": "^2.0.0",
+ "json5": "^0.5.0",
+ "object-assign": "^4.0.1"
+ }
+ },
+ "util.promisify": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/util.promisify/-/util.promisify-1.0.0.tgz",
+ "integrity": "sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA==",
+ "requires": {
+ "define-properties": "^1.1.2",
+ "object.getownpropertydescriptors": "^2.0.3"
+ }
+ }
+ }
+ },
+ "vuepress-plugin-container": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.5.tgz",
+ "integrity": "sha512-TQrDX/v+WHOihj3jpilVnjXu9RcTm6m8tzljNJwYhxnJUW0WWQ0hFLcDTqTBwgKIFdEiSxVOmYE+bJX/sq46MA==",
+ "requires": {
+ "@vuepress/shared-utils": "^1.2.0",
+ "markdown-it-container": "^2.0.0"
+ }
+ },
+ "vuepress-plugin-google-tag-manager": {
+ "version": "0.0.5",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-google-tag-manager/-/vuepress-plugin-google-tag-manager-0.0.5.tgz",
+ "integrity": "sha512-Hm1GNDdNmc4Vs9c3OMfTtHicB/oZWNCmzMFPdlOObVN1OjizIjImdm+LZIwiVKVndT2TQ4BPhMx7HQkovmD2Lg=="
+ },
+ "vuepress-plugin-sitemap": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-sitemap/-/vuepress-plugin-sitemap-2.3.1.tgz",
+ "integrity": "sha512-n+8lbukhrKrsI9H/EX0EBgkE1pn85LAQFvQ5dIvrZP4Kz6JxPOPPNTQmZMhahQV1tXbLZQCEN7A1WZH4x+arJQ==",
+ "requires": {
+ "sitemap": "^3.0.0"
+ }
+ },
+ "vuepress-plugin-smooth-scroll": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/vuepress-plugin-smooth-scroll/-/vuepress-plugin-smooth-scroll-0.0.3.tgz",
+ "integrity": "sha512-qsQkDftLVFLe8BiviIHaLV0Ea38YLZKKonDGsNQy1IE0wllFpFIEldWD8frWZtDFdx6b/O3KDMgVQ0qp5NjJCg==",
+ "requires": {
+ "smoothscroll-polyfill": "^0.4.3"
+ }
+ },
+ "vuepress-theme-cosmos": {
+ "version": "1.0.182",
+ "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.182.tgz",
+ "integrity": "sha512-Mc1ZOsSqLGgbB9xEXsx5QkHUBkKXOoDgkjrp5iX+fwmM4TCmR4MWbTlKpEzfzsxZ1DuixtwVkv0MT+eNvD2Lfw==",
+ "requires": {
+ "@cosmos-ui/vue": "^0.35.0",
+ "@vuepress/plugin-google-analytics": "1.7.1",
+ "algoliasearch": "^4.2.0",
+ "axios": "^0.21.0",
+ "cheerio": "^1.0.0-rc.3",
+ "clipboard-copy": "^3.1.0",
+ "entities": "2.1.0",
+ "esm": "^3.2.25",
+ "gray-matter": "^4.0.2",
+ "hotkeys-js": "3.8.1",
+ "jsonp": "^0.2.1",
+ "markdown-it": "^12.0.0",
+ "markdown-it-attrs": "^3.0.3",
+ "prismjs": "^1.22.0",
+ "pug": "^3.0.1",
+ "pug-plain-loader": "^1.0.0",
+ "stylus": "^0.54.8",
+ "stylus-loader": "^3.0.2",
+ "tiny-cookie": "^2.3.2",
+ "v-runtime-template": "^1.10.0",
+ "vuepress": "^1.5.4",
+ "vuepress-plugin-google-tag-manager": "0.0.5",
+ "vuepress-plugin-sitemap": "^2.3.1"
+ }
+ },
+ "watchpack": {
+ "version": "1.7.5",
+ "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.5.tgz",
+ "integrity": "sha512-9P3MWk6SrKjHsGkLT2KHXdQ/9SNkyoJbabxnKOoJepsvJjJG8uYTR3yTPxPQvNDI3w4Nz1xnE0TLHK4RIVe/MQ==",
+ "requires": {
+ "chokidar": "^3.4.1",
+ "graceful-fs": "^4.1.2",
+ "neo-async": "^2.5.0",
+ "watchpack-chokidar2": "^2.0.1"
+ },
+ "dependencies": {
+ "anymatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz",
+ "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==",
+ "optional": true,
+ "requires": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ }
+ },
+ "binary-extensions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
+ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
+ "optional": true
+ },
+ "braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "optional": true,
+ "requires": {
+ "fill-range": "^7.0.1"
+ }
+ },
+ "chokidar": {
+ "version": "3.5.1",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.1.tgz",
+ "integrity": "sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==",
+ "optional": true,
+ "requires": {
+ "anymatch": "~3.1.1",
+ "braces": "~3.0.2",
+ "fsevents": "~2.3.1",
+ "glob-parent": "~5.1.0",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.5.0"
+ }
+ },
+ "fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "optional": true,
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "fsevents": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
+ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
+ "optional": true
+ },
+ "glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "optional": true,
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "optional": true,
+ "requires": {
+ "binary-extensions": "^2.0.0"
+ }
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "optional": true
+ },
+ "readdirp": {
+ "version": "3.5.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
+ "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
+ "optional": true,
+ "requires": {
+ "picomatch": "^2.2.1"
+ }
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "optional": true,
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ }
+ }
+ },
+ "watchpack-chokidar2": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.1.tgz",
+ "integrity": "sha512-nCFfBIPKr5Sh61s4LPpy1Wtfi0HE8isJ3d2Yb5/Ppw2P2B/3eVSEBjKfN0fmHJSK14+31KwMKmcrzs2GM4P0Ww==",
+ "optional": true,
+ "requires": {
+ "chokidar": "^2.1.8"
+ }
+ },
+ "wbuf": {
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz",
+ "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==",
+ "requires": {
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "webidl-conversions": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-4.0.2.tgz",
+ "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg=="
+ },
+ "webpack": {
+ "version": "4.46.0",
+ "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.46.0.tgz",
+ "integrity": "sha512-6jJuJjg8znb/xRItk7bkT0+Q7AHCYjjFnvKIWQPkNIOyRqoCGvkOs0ipeQzrqz4l5FtN5ZI/ukEHroeX/o1/5Q==",
+ "requires": {
+ "@webassemblyjs/ast": "1.9.0",
+ "@webassemblyjs/helper-module-context": "1.9.0",
+ "@webassemblyjs/wasm-edit": "1.9.0",
+ "@webassemblyjs/wasm-parser": "1.9.0",
+ "acorn": "^6.4.1",
+ "ajv": "^6.10.2",
+ "ajv-keywords": "^3.4.1",
+ "chrome-trace-event": "^1.0.2",
+ "enhanced-resolve": "^4.5.0",
+ "eslint-scope": "^4.0.3",
+ "json-parse-better-errors": "^1.0.2",
+ "loader-runner": "^2.4.0",
+ "loader-utils": "^1.2.3",
+ "memory-fs": "^0.4.1",
+ "micromatch": "^3.1.10",
+ "mkdirp": "^0.5.3",
+ "neo-async": "^2.6.1",
+ "node-libs-browser": "^2.2.1",
+ "schema-utils": "^1.0.0",
+ "tapable": "^1.1.3",
+ "terser-webpack-plugin": "^1.4.3",
+ "watchpack": "^1.7.4",
+ "webpack-sources": "^1.4.1"
+ },
+ "dependencies": {
+ "acorn": {
+ "version": "6.4.2",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz",
+ "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ=="
+ },
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ }
+ }
+ },
+ "webpack-chain": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.5.1.tgz",
+ "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==",
+ "requires": {
+ "deepmerge": "^1.5.2",
+ "javascript-stringify": "^2.0.1"
+ }
+ },
+ "webpack-dev-middleware": {
+ "version": "3.7.3",
+ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-3.7.3.tgz",
+ "integrity": "sha512-djelc/zGiz9nZj/U7PTBi2ViorGJXEWo/3ltkPbDyxCXhhEXkW0ce99falaok4TPj+AsxLiXJR0EBOb0zh9fKQ==",
+ "requires": {
+ "memory-fs": "^0.4.1",
+ "mime": "^2.4.4",
+ "mkdirp": "^0.5.1",
+ "range-parser": "^1.2.1",
+ "webpack-log": "^2.0.0"
+ },
+ "dependencies": {
+ "mkdirp": {
+ "version": "0.5.5",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz",
+ "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==",
+ "requires": {
+ "minimist": "^1.2.5"
+ }
+ }
+ }
+ },
+ "webpack-dev-server": {
+ "version": "3.11.2",
+ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.2.tgz",
+ "integrity": "sha512-A80BkuHRQfCiNtGBS1EMf2ChTUs0x+B3wGDFmOeT4rmJOHhHTCH2naNxIHhmkr0/UillP4U3yeIyv1pNp+QDLQ==",
+ "requires": {
+ "ansi-html": "0.0.7",
+ "bonjour": "^3.5.0",
+ "chokidar": "^2.1.8",
+ "compression": "^1.7.4",
+ "connect-history-api-fallback": "^1.6.0",
+ "debug": "^4.1.1",
+ "del": "^4.1.1",
+ "express": "^4.17.1",
+ "html-entities": "^1.3.1",
+ "http-proxy-middleware": "0.19.1",
+ "import-local": "^2.0.0",
+ "internal-ip": "^4.3.0",
+ "ip": "^1.1.5",
+ "is-absolute-url": "^3.0.3",
+ "killable": "^1.0.1",
+ "loglevel": "^1.6.8",
+ "opn": "^5.5.0",
+ "p-retry": "^3.0.1",
+ "portfinder": "^1.0.26",
+ "schema-utils": "^1.0.0",
+ "selfsigned": "^1.10.8",
+ "semver": "^6.3.0",
+ "serve-index": "^1.9.1",
+ "sockjs": "^0.3.21",
+ "sockjs-client": "^1.5.0",
+ "spdy": "^4.0.2",
+ "strip-ansi": "^3.0.1",
+ "supports-color": "^6.1.0",
+ "url": "^0.11.0",
+ "webpack-dev-middleware": "^3.7.2",
+ "webpack-log": "^2.0.0",
+ "ws": "^6.2.1",
+ "yargs": "^13.3.2"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8="
+ },
+ "debug": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "is-absolute-url": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz",
+ "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q=="
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "schema-utils": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz",
+ "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==",
+ "requires": {
+ "ajv": "^6.1.0",
+ "ajv-errors": "^1.0.0",
+ "ajv-keywords": "^3.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz",
+ "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==",
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "webpack-log": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/webpack-log/-/webpack-log-2.0.0.tgz",
+ "integrity": "sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg==",
+ "requires": {
+ "ansi-colors": "^3.0.0",
+ "uuid": "^3.3.2"
+ }
+ },
+ "webpack-merge": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-4.2.2.tgz",
+ "integrity": "sha512-TUE1UGoTX2Cd42j3krGYqObZbOD+xF7u28WB7tfUordytSjbWTIjK/8V0amkBfTYN4/pB/GIDlJZZ657BGG19g==",
+ "requires": {
+ "lodash": "^4.17.15"
+ }
+ },
+ "webpack-sources": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-1.4.3.tgz",
+ "integrity": "sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==",
+ "requires": {
+ "source-list-map": "^2.0.0",
+ "source-map": "~0.6.1"
+ },
+ "dependencies": {
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ }
+ }
+ },
+ "webpackbar": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-3.2.0.tgz",
+ "integrity": "sha512-PC4o+1c8gWWileUfwabe0gqptlXUDJd5E0zbpr2xHP1VSOVlZVPBZ8j6NCR8zM5zbKdxPhctHXahgpNK1qFDPw==",
+ "requires": {
+ "ansi-escapes": "^4.1.0",
+ "chalk": "^2.4.1",
+ "consola": "^2.6.0",
+ "figures": "^3.0.0",
+ "pretty-time": "^1.1.0",
+ "std-env": "^2.2.1",
+ "text-table": "^0.2.0",
+ "wrap-ansi": "^5.1.0"
+ }
+ },
+ "websocket-driver": {
+ "version": "0.7.4",
+ "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz",
+ "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==",
+ "requires": {
+ "http-parser-js": ">=0.5.1",
+ "safe-buffer": ">=5.1.0",
+ "websocket-extensions": ">=0.1.1"
+ }
+ },
+ "websocket-extensions": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
+ "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg=="
+ },
+ "whatwg-url": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-7.1.0.tgz",
+ "integrity": "sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==",
+ "requires": {
+ "lodash.sortby": "^4.7.0",
+ "tr46": "^1.0.1",
+ "webidl-conversions": "^4.0.2"
+ }
+ },
+ "when": {
+ "version": "3.6.4",
+ "resolved": "https://registry.npmjs.org/when/-/when-3.6.4.tgz",
+ "integrity": "sha1-RztRfsFZ4rhQBUl6E5g/CVQS404="
+ },
+ "which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "requires": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ }
+ },
+ "which-module": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz",
+ "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho="
+ },
+ "widest-line": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz",
+ "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==",
+ "requires": {
+ "string-width": "^4.0.0"
+ }
+ },
+ "with": {
+ "version": "7.0.2",
+ "resolved": "https://registry.npmjs.org/with/-/with-7.0.2.tgz",
+ "integrity": "sha512-RNGKj82nUPg3g5ygxkQl0R937xLyho1J24ItRCBTr/m1YnZkzJy1hUiHUJrc/VlsDQzsCnInEGSg3bci0Lmd4w==",
+ "requires": {
+ "@babel/parser": "^7.9.6",
+ "@babel/types": "^7.9.6",
+ "assert-never": "^1.2.1",
+ "babel-walk": "3.0.0-canary-5"
+ }
+ },
+ "worker-farm": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/worker-farm/-/worker-farm-1.7.0.tgz",
+ "integrity": "sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw==",
+ "requires": {
+ "errno": "~0.1.7"
+ }
+ },
+ "wrap-ansi": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz",
+ "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==",
+ "requires": {
+ "ansi-styles": "^3.2.0",
+ "string-width": "^3.0.0",
+ "strip-ansi": "^5.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg=="
+ },
+ "emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8="
+ },
+ "string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "requires": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ }
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
+ },
+ "write-file-atomic": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
+ "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==",
+ "requires": {
+ "imurmurhash": "^0.1.4",
+ "is-typedarray": "^1.0.0",
+ "signal-exit": "^3.0.2",
+ "typedarray-to-buffer": "^3.1.5"
+ }
+ },
+ "ws": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz",
+ "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==",
+ "requires": {
+ "async-limiter": "~1.0.0"
+ }
+ },
+ "xdg-basedir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz",
+ "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q=="
+ },
+ "xmlbuilder": {
+ "version": "13.0.2",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-13.0.2.tgz",
+ "integrity": "sha512-Eux0i2QdDYKbdbA6AM6xE4m6ZTZr4G4xF9kahI2ukSEMCzwce2eX9WlTI5J3s+NU7hpasFsr8hWIONae7LluAQ=="
+ },
+ "xtend": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
+ },
+ "y18n": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz",
+ "integrity": "sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ=="
+ },
+ "yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="
+ },
+ "yargs": {
+ "version": "13.3.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz",
+ "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==",
+ "requires": {
+ "cliui": "^5.0.0",
+ "find-up": "^3.0.0",
+ "get-caller-file": "^2.0.1",
+ "require-directory": "^2.1.1",
+ "require-main-filename": "^2.0.0",
+ "set-blocking": "^2.0.0",
+ "string-width": "^3.0.0",
+ "which-module": "^2.0.0",
+ "y18n": "^4.0.0",
+ "yargs-parser": "^13.1.2"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+ "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg=="
+ },
+ "emoji-regex": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
+ },
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "is-fullwidth-code-point": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+ "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8="
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU="
+ },
+ "string-width": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+ "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+ "requires": {
+ "emoji-regex": "^7.0.1",
+ "is-fullwidth-code-point": "^2.0.0",
+ "strip-ansi": "^5.1.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+ "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+ "requires": {
+ "ansi-regex": "^4.1.0"
+ }
+ }
+ }
+ },
+ "yargs-parser": {
+ "version": "13.1.2",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz",
+ "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==",
+ "requires": {
+ "camelcase": "^5.0.0",
+ "decamelize": "^1.2.0"
+ },
+ "dependencies": {
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg=="
+ }
+ }
+ },
+ "zepto": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/zepto/-/zepto-1.2.0.tgz",
+ "integrity": "sha1-4Se9nmb9hGvl6rSME5SIL3wOT5g="
+ }
+ }
+}
diff --git a/docs/package.json b/docs/package.json
new file mode 100644
index 00000000..0770590f
--- /dev/null
+++ b/docs/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "docs",
+ "version": "1.0.0",
+ "description": "IBC-GO Documentation",
+ "main": "index.js",
+ "scripts": {
+ "serve": "trap 'exit 0' SIGINT; vuepress dev --no-cache",
+ "postserve": "./post.sh",
+ "prebuild": "./pre.sh",
+ "build": "trap 'exit 0' SIGINT; vuepress build --no-cache",
+ "postbuild": "./post.sh"
+ },
+ "author": "",
+ "license": "ISC",
+ "dependencies": {
+ "vuepress-theme-cosmos": "^1.0.182"
+ }
+}
diff --git a/docs/post.sh b/docs/post.sh
new file mode 100755
index 00000000..2662dd1a
--- /dev/null
+++ b/docs/post.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+rm -rf modules
diff --git a/docs/protodoc-markdown.tmpl b/docs/protodoc-markdown.tmpl
index e69de29b..28201837 100644
--- a/docs/protodoc-markdown.tmpl
+++ b/docs/protodoc-markdown.tmpl
@@ -0,0 +1,105 @@
+
+# Protobuf Documentation
+
+
+## Table of Contents
+{{range .Files}}
+{{$file_name := .Name}}- [{{.Name}}](#{{.Name}})
+ {{- if .Messages }}
+ {{range .Messages}} - [{{.LongName}}](#{{.FullName}})
+ {{end}}
+ {{- end -}}
+ {{- if .Enums }}
+ {{range .Enums}} - [{{.LongName}}](#{{.FullName}})
+ {{end}}
+ {{- end -}}
+ {{- if .Extensions }}
+ {{range .Extensions}} - [File-level Extensions](#{{$file_name}}-extensions)
+ {{end}}
+ {{- end -}}
+ {{- if .Services }}
+ {{range .Services}} - [{{.Name}}](#{{.FullName}})
+ {{end}}
+ {{- end -}}
+{{end}}
+- [Scalar Value Types](#scalar-value-types)
+
+{{range .Files}}
+{{$file_name := .Name}}
+
+Top
+
+## {{.Name}}
+{{.Description}}
+
+{{range .Messages}}
+
+
+### {{.LongName}}
+{{.Description}}
+
+{{if .HasFields}}
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+{{range .Fields -}}
+ | `{{.Name}}` | [{{.LongType}}](#{{.FullType}}) | {{.Label}} | {{if (index .Options "deprecated"|default false)}}**Deprecated.** {{end}}{{nobr .Description}}{{if .DefaultValue}} Default: {{.DefaultValue}}{{end}} |
+{{end}}
+{{end}}
+
+{{if .HasExtensions}}
+| Extension | Type | Base | Number | Description |
+| --------- | ---- | ---- | ------ | ----------- |
+{{range .Extensions -}}
+ | `{{.Name}}` | {{.LongType}} | {{.ContainingLongType}} | {{.Number}} | {{nobr .Description}}{{if .DefaultValue}} Default: {{.DefaultValue}}{{end}} |
+{{end}}
+{{end}}
+
+{{end}}
+
+{{range .Enums}}
+
+
+### {{.LongName}}
+{{.Description}}
+
+| Name | Number | Description |
+| ---- | ------ | ----------- |
+{{range .Values -}}
+ | {{.Name}} | {{.Number}} | {{nobr .Description}} |
+{{end}}
+
+{{end}}
+
+{{if .HasExtensions}}
+
+
+### File-level Extensions
+| Extension | Type | Base | Number | Description |
+| --------- | ---- | ---- | ------ | ----------- |
+{{range .Extensions -}}
+ | `{{.Name}}` | {{.LongType}} | {{.ContainingLongType}} | {{.Number}} | {{nobr .Description}}{{if .DefaultValue}} Default: `{{.DefaultValue}}`{{end}} |
+{{end}}
+{{end}}
+
+{{range .Services}}
+
+
+### {{.Name}}
+{{.Description}}
+
+| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
+| ----------- | ------------ | ------------- | ------------| ------- | -------- |
+{{range .Methods -}}
+ | `{{.Name}}` | [{{.RequestLongType}}](#{{.RequestFullType}}){{if .RequestStreaming}} stream{{end}} | [{{.ResponseLongType}}](#{{.ResponseFullType}}){{if .ResponseStreaming}} stream{{end}} | {{nobr .Description}} | {{with (index .Options "google.api.http")}}{{range .Rules}}{{.Method}}|{{.Pattern}}{{end}}{{end}}|
+{{end}}
+{{end}}
+
+{{end}}
+
+## Scalar Value Types
+
+| .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby |
+| ----------- | ----- | --- | ---- | ------ | -- | -- | --- | ---- |
+{{range .Scalars -}}
+ | {{.ProtoType}} | {{.Notes}} | {{.CppType}} | {{.JavaType}} | {{.PythonType}} | {{.GoType}} | {{.CSharp}} | {{.PhpType}} | {{.RubyType}} |
+{{end}}
diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh
index 656cff26..1c22cdda 100755
--- a/scripts/protocgen.sh
+++ b/scripts/protocgen.sh
@@ -27,11 +27,11 @@ done
# command to generate docs using protoc-gen-doc
buf protoc \
--I "proto" \
--I "third_party/proto" \
---doc_out=./docs/ibc \
---doc_opt=./docs/protodoc-markdown.tmpl,proto-docs.md \
-$(find "$(pwd)/proto" -maxdepth 5 -name '*.proto')
+ -I "proto" \
+ -I "third_party/proto" \
+ --doc_out=./docs/ibc \
+ --doc_opt=./docs/protodoc-markdown.tmpl,proto-docs.md \
+ $(find "$(pwd)/proto" -maxdepth 5 -name '*.proto')
go mod tidy
From 1ff0da6694d08e494a9c87d5b9737ab5312677a0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 6 May 2021 12:41:08 +0200
Subject: [PATCH 047/393] Update main to SDK release (#79)
* update main to latest commit of SDK
* bump to SDK commit 0c2d4a86fd3b2acb7bda7f09b9a68bab67a566ee
* fix build/tests
* update SDK commit and fix simapp
* use sdk v0.43.0-alpha1 release
* update SDK version to commit '711976e'
* fix build
* bump SDK commit to 'e3e89f52607b9d205e1ddea4af2d47e98e0ff2b1'
* add SDK change into migrations
---
docs/ibc/upgrades/developer-guide.md | 2 +-
docs/migrations/ibc-migration-043.md | 9 +
go.mod | 14 +-
go.sum | 83 +++++---
modules/apps/transfer/keeper/encoding.go | 8 +-
modules/apps/transfer/keeper/keeper.go | 4 +-
modules/apps/transfer/module.go | 8 +-
modules/core/02-client/abci_test.go | 2 +-
modules/core/02-client/keeper/keeper.go | 4 +-
modules/core/02-client/keeper/keeper_test.go | 4 +-
.../core/02-client/keeper/proposal_test.go | 13 +-
modules/core/02-client/types/encoding.go | 22 +-
modules/core/02-client/types/proposal.go | 6 +-
modules/core/02-client/types/proposal_test.go | 15 --
.../core/03-connection/client/utils/utils.go | 6 +-
.../core/03-connection/keeper/grpc_query.go | 2 +-
modules/core/03-connection/keeper/keeper.go | 14 +-
.../core/03-connection/simulation/decoder.go | 10 +-
.../03-connection/simulation/decoder_test.go | 4 +-
modules/core/03-connection/types/msgs_test.go | 2 +-
modules/core/04-channel/client/utils/utils.go | 2 +-
modules/core/04-channel/keeper/grpc_query.go | 4 +-
modules/core/04-channel/keeper/keeper.go | 10 +-
modules/core/04-channel/simulation/decoder.go | 6 +-
.../04-channel/simulation/decoder_test.go | 2 +-
modules/core/04-channel/types/msgs_test.go | 2 +-
modules/core/04-channel/types/packet.go | 2 +-
modules/core/client/query.go | 2 +-
modules/core/exported/client.go | 28 +--
modules/core/keeper/keeper.go | 6 +-
modules/core/module.go | 8 +-
modules/core/simulation/decoder_test.go | 4 +-
.../06-solomachine/spec/01_concepts.md | 8 +-
.../06-solomachine/types/client_state.go | 28 +--
.../06-solomachine/types/client_state_test.go | 16 +-
.../06-solomachine/types/codec.go | 22 +-
.../types/misbehaviour_handle.go | 4 +-
.../types/misbehaviour_handle_test.go | 8 +-
.../06-solomachine/types/proof.go | 74 +++----
.../06-solomachine/types/proposal_handle.go | 2 +-
.../06-solomachine/types/solomachine_test.go | 2 +-
.../06-solomachine/types/update.go | 4 +-
.../06-solomachine/types/update_test.go | 4 +-
.../07-tendermint/types/client_state.go | 28 +--
.../types/misbehaviour_handle.go | 2 +-
.../07-tendermint/types/proposal_handle.go | 2 +-
.../07-tendermint/types/store.go | 10 +-
.../07-tendermint/types/tendermint_test.go | 2 +-
.../07-tendermint/types/update.go | 2 +-
.../07-tendermint/types/upgrade.go | 6 +-
.../09-localhost/types/client_state.go | 32 +--
.../09-localhost/types/client_state_test.go | 8 +-
.../09-localhost/types/localhost_test.go | 2 +-
testing/README.md | 4 +-
testing/app.go | 2 +-
testing/chain.go | 8 +-
testing/mock/mock.go | 8 +-
testing/sdk_test.go | 14 +-
testing/simapp/app.go | 73 ++-----
testing/simapp/app_test.go | 193 ------------------
testing/simapp/genesis.go | 2 +-
testing/simapp/params/encoding.go | 2 +-
testing/simapp/simd/cmd/cmd_test.go | 2 +-
testing/simapp/simd/cmd/genaccounts.go | 4 +-
testing/simapp/simd/cmd/genaccounts_test.go | 2 +-
testing/simapp/simd/cmd/root.go | 21 +-
testing/simapp/simd/cmd/testnet.go | 18 +-
testing/simapp/state.go | 6 +-
testing/simapp/utils.go | 2 +-
testing/simapp/utils_test.go | 2 +-
testing/solomachine.go | 14 +-
71 files changed, 372 insertions(+), 579 deletions(-)
delete mode 100644 testing/simapp/app_test.go
diff --git a/docs/ibc/upgrades/developer-guide.md b/docs/ibc/upgrades/developer-guide.md
index 998cb276..d41b3346 100644
--- a/docs/ibc/upgrades/developer-guide.md
+++ b/docs/ibc/upgrades/developer-guide.md
@@ -19,7 +19,7 @@ The IBC protocol allows client implementations to provide a path to upgrading cl
// may be cancelled or modified before the last planned height.
VerifyUpgradeAndUpdateState(
ctx sdk.Context,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
store sdk.KVStore,
newClient ClientState,
newConsState ConsensusState,
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index fbbe4323..f2d20966 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -107,3 +107,12 @@ The `OnRecvPacket` callback has been modified to only return the acknowledgement
## IBC Event changes
The `packet_data` attribute has been deprecated in favor of `packet_data_hex`, in order to provide standardized encoding/decoding of packet data in events. While the `packet_data` event still exists, all relayers and IBC Event consumers are strongly encouraged to switch over to using `packet_data_hex` as soon as possible.
+
+## Relevant SDK changes
+
+* (codec) [\#9226](https://github.com/cosmos/cosmos-sdk/pull/9226) Rename codec interfaces and methods, to follow a general Go interfaces:
+ * `codec.Marshaler` → `codec.Codec` (this defines objects which serialize other objects)
+ * `codec.BinaryMarshaler` → `codec.BinaryCodec`
+ * `codec.JSONMarshaler` → `codec.JSONCodec`
+ * Removed `BinaryBare` suffix from `BinaryCodec` methods (`MarshalBinaryBare`, `UnmarshalBinaryBare`, ...)
+ * Removed `Binary` infix from `BinaryCodec` methods (`MarshalBinaryLengthPrefixed`, `UnmarshalBinaryLengthPrefixed`, ...)
diff --git a/go.mod b/go.mod
index f9c9b147..06831989 100644
--- a/go.mod
+++ b/go.mod
@@ -5,11 +5,11 @@ module github.com/cosmos/ibc-go
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
require (
- github.com/armon/go-metrics v0.3.6
- github.com/confio/ics23/go v0.6.3
- github.com/cosmos/cosmos-sdk v0.42.0-alpha1.0.20210301172302-05ce78935a9b
+ github.com/armon/go-metrics v0.3.7
+ github.com/confio/ics23/go v0.6.6
+ github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b
github.com/gogo/protobuf v1.3.3
- github.com/golang/protobuf v1.4.3
+ github.com/golang/protobuf v1.5.2
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/pkg/errors v0.9.1
@@ -18,9 +18,9 @@ require (
github.com/spf13/cobra v1.1.3
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.7.0
- github.com/tendermint/tendermint v0.34.8
+ github.com/tendermint/tendermint v0.34.10
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
- google.golang.org/grpc v1.36.0
- google.golang.org/protobuf v1.25.0
+ google.golang.org/grpc v1.37.0
+ google.golang.org/protobuf v1.26.0
)
diff --git a/go.sum b/go.sum
index 8461dc8a..d1d4a9ef 100644
--- a/go.sum
+++ b/go.sum
@@ -62,8 +62,8 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.6 h1:x/tmtOF9cDBoXH7XoAGOz2qqm1DknFD1590XmD/DUJ8=
-github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.7 h1:c/oCtWzYpboy6+6f6LjXRlyW7NwA2SWf+a9KMlHq/bM=
+github.com/armon/go-metrics v0.3.7/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
@@ -115,8 +115,9 @@ github.com/coinbase/rosetta-sdk-go v0.5.8/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn
github.com/coinbase/rosetta-sdk-go v0.5.9 h1:CuGQE3HFmYwdEACJnuOtVI9cofqPsGvq6FdFIzaOPKI=
github.com/coinbase/rosetta-sdk-go v0.5.9/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM=
github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
-github.com/confio/ics23/go v0.6.3 h1:PuGK2V1NJWZ8sSkNDq91jgT/cahFEW9RGp4Y5jxulf0=
github.com/confio/ics23/go v0.6.3/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
+github.com/confio/ics23/go v0.6.6 h1:pkOy18YxxJ/r0XFDCnrl4Bjv6h4LkBSpLS6F38mrKL8=
+github.com/confio/ics23/go v0.6.6/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -127,8 +128,8 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cosmos/cosmos-sdk v0.42.0-alpha1.0.20210301172302-05ce78935a9b h1:zKLvd77wFDC+1mcSOW0sZ3TYYWLcj+GmMwneI+SLkOA=
-github.com/cosmos/cosmos-sdk v0.42.0-alpha1.0.20210301172302-05ce78935a9b/go.mod h1:7mfToqDfAuY5qgVxJaB5DKksOIewhoFMPDEjV/4cu8A=
+github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b h1:5AOuxO9jaK+89wdCMj2e75GC1qQRXeYGYOUfqdN/Kb0=
+github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b/go.mod h1:u/SsFuAiyrBlVafgDQF/hbLzeEaA7w9XmKw3QiJm3G4=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
@@ -186,6 +187,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.9.23/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
@@ -269,8 +271,10 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -284,13 +288,17 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us=
+github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
@@ -300,6 +308,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
@@ -317,8 +326,9 @@ github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.1/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
-github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 h1:FlFbCRLd5Jr4iYXZufAvgWN6Ao0JrI5chLINnUXDDr0=
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@@ -379,6 +389,8 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jhump/protoreflect v1.8.2 h1:k2xE7wcUomeqwY0LDCYA16y4WWfyTcMx5mKhk0d4ua0=
+github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
@@ -426,8 +438,8 @@ github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY=
-github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -484,6 +496,7 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
@@ -511,8 +524,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/otiai10/copy v1.5.0 h1:SoXDGnlTUZoqB/wSuj/Y5L6T5i6iN4YRAcMCd+JnLNU=
-github.com/otiai10/copy v1.5.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
+github.com/otiai10/copy v1.5.1 h1:a/cs2E1/1V0az8K5nblbl+ymEa4E11AfaOLMar8V34w=
+github.com/otiai10/copy v1.5.1/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
@@ -551,8 +564,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
-github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
-github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
+github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg=
+github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -568,17 +581,18 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y=
github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
+github.com/prometheus/common v0.23.0 h1:GXWvPYuTUenIa+BhOq/x+L/QZzCqASkVRny5KTlPDGM=
+github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
@@ -599,8 +613,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
-github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
+github.com/rs/zerolog v1.21.0 h1:Q3vdXlfLNT+OftyBHsU0Y445MD+8m8axjKgf2si0QcM=
+github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -673,8 +687,8 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
github.com/tendermint/btcd v0.1.1 h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s=
github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrnDD5pN+U=
-github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2 h1:crekJuQ57yIBDuKd3/dMJ00ZvOHURuv9RGJSi2hWTW4=
-github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2/go.mod h1:gBPw8WV2Erm4UGHlBRiM3zaEBst4bsuihmMCNQdgP/s=
+github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2.0.20210304154332-87d6ca4410df h1:hoMLrOS4WyyMM+Y+iWdGu94o0zzp6Q43y7v89Q1/OIw=
+github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2.0.20210304154332-87d6ca4410df/go.mod h1:gBPw8WV2Erm4UGHlBRiM3zaEBst4bsuihmMCNQdgP/s=
github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI=
github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15/go.mod h1:z4YtwM70uOnk8h0pjJYlj3zdYwi9l03By6iAIF5j/Pk=
github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E=
@@ -682,8 +696,8 @@ github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoM
github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4=
github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg=
github.com/tendermint/tendermint v0.34.0/go.mod h1:Aj3PIipBFSNO21r+Lq3TtzQ+uKESxkbA3yo/INM4QwQ=
-github.com/tendermint/tendermint v0.34.8 h1:PMWgUx47FrNTsfhxCWzoiIlVAC1SE9+WBlnsF9oQW0I=
-github.com/tendermint/tendermint v0.34.8/go.mod h1:JVuu3V1ZexOaZG8VJMRl8lnfrGw6hEB2TVnoUwKRbss=
+github.com/tendermint/tendermint v0.34.10 h1:wBOc/It8sh/pVH9np2V5fBvRmIyFN/bUrGPx+eAHexs=
+github.com/tendermint/tendermint v0.34.10/go.mod h1:aeHL7alPh4uTBIJQ8mgFEE8VwJLXI1VD3rVOmH2Mcy0=
github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI=
github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8=
github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ=
@@ -710,6 +724,7 @@ github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+m
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
@@ -822,6 +837,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -867,8 +883,10 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs=
-golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY=
+golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -895,18 +913,21 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -964,8 +985,8 @@ google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0 h1:o1bcQ6imQMIOpdrO3SWf2z5RV72WbDwdXuK0MDlc8As=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -975,8 +996,11 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1018,6 +1042,7 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k=
nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/modules/apps/transfer/keeper/encoding.go b/modules/apps/transfer/keeper/encoding.go
index 30e2ff4d..78403694 100644
--- a/modules/apps/transfer/keeper/encoding.go
+++ b/modules/apps/transfer/keeper/encoding.go
@@ -8,7 +8,7 @@ import (
// raw encoded bytes.
func (k Keeper) UnmarshalDenomTrace(bz []byte) (types.DenomTrace, error) {
var denomTrace types.DenomTrace
- if err := k.cdc.UnmarshalBinaryBare(bz, &denomTrace); err != nil {
+ if err := k.cdc.Unmarshal(bz, &denomTrace); err != nil {
return types.DenomTrace{}, err
}
@@ -19,18 +19,18 @@ func (k Keeper) UnmarshalDenomTrace(bz []byte) (types.DenomTrace, error) {
// raw encoded bytes. It panics on error.
func (k Keeper) MustUnmarshalDenomTrace(bz []byte) types.DenomTrace {
var denomTrace types.DenomTrace
- k.cdc.MustUnmarshalBinaryBare(bz, &denomTrace)
+ k.cdc.MustUnmarshal(bz, &denomTrace)
return denomTrace
}
// MarshalDenomTrace attempts to encode an DenomTrace object and returns the
// raw encoded bytes.
func (k Keeper) MarshalDenomTrace(denomTrace types.DenomTrace) ([]byte, error) {
- return k.cdc.MarshalBinaryBare(&denomTrace)
+ return k.cdc.Marshal(&denomTrace)
}
// MustMarshalDenomTrace attempts to encode an DenomTrace object and returns the
// raw encoded bytes. It panics on error.
func (k Keeper) MustMarshalDenomTrace(denomTrace types.DenomTrace) []byte {
- return k.cdc.MustMarshalBinaryBare(&denomTrace)
+ return k.cdc.MustMarshal(&denomTrace)
}
diff --git a/modules/apps/transfer/keeper/keeper.go b/modules/apps/transfer/keeper/keeper.go
index be69ca3f..2ec1b68f 100644
--- a/modules/apps/transfer/keeper/keeper.go
+++ b/modules/apps/transfer/keeper/keeper.go
@@ -20,7 +20,7 @@ import (
// Keeper defines the IBC fungible transfer keeper
type Keeper struct {
storeKey sdk.StoreKey
- cdc codec.BinaryMarshaler
+ cdc codec.BinaryCodec
paramSpace paramtypes.Subspace
channelKeeper types.ChannelKeeper
@@ -32,7 +32,7 @@ type Keeper struct {
// NewKeeper creates a new IBC transfer Keeper instance
func NewKeeper(
- cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace,
+ cdc codec.BinaryCodec, key sdk.StoreKey, paramSpace paramtypes.Subspace,
channelKeeper types.ChannelKeeper, portKeeper types.PortKeeper,
authKeeper types.AccountKeeper, bankKeeper types.BankKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,
) Keeper {
diff --git a/modules/apps/transfer/module.go b/modules/apps/transfer/module.go
index a8080aad..c19a740b 100644
--- a/modules/apps/transfer/module.go
+++ b/modules/apps/transfer/module.go
@@ -56,12 +56,12 @@ func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry)
// DefaultGenesis returns default genesis state as raw bytes for the ibc
// transfer module.
-func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage {
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage {
return cdc.MustMarshalJSON(types.DefaultGenesisState())
}
// ValidateGenesis performs genesis state validation for the ibc transfer module.
-func (AppModuleBasic) ValidateGenesis(cdc codec.JSONMarshaler, config client.TxEncodingConfig, bz json.RawMessage) error {
+func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error {
var gs types.GenesisState
if err := cdc.UnmarshalJSON(bz, &gs); err != nil {
return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err)
@@ -130,7 +130,7 @@ func (am AppModule) RegisterServices(cfg module.Configurator) {
// InitGenesis performs genesis initialization for the ibc-transfer module. It returns
// no validator updates.
-func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate {
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
var genesisState types.GenesisState
cdc.MustUnmarshalJSON(data, &genesisState)
am.keeper.InitGenesis(ctx, genesisState)
@@ -139,7 +139,7 @@ func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data j
// ExportGenesis returns the exported genesis state as raw bytes for the ibc-transfer
// module.
-func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage {
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage {
gs := am.keeper.ExportGenesis(ctx)
return cdc.MustMarshalJSON(gs)
}
diff --git a/modules/core/02-client/abci_test.go b/modules/core/02-client/abci_test.go
index 0bbdf489..cad34f3b 100644
--- a/modules/core/02-client/abci_test.go
+++ b/modules/core/02-client/abci_test.go
@@ -70,7 +70,7 @@ func (suite *ClientTestSuite) TestBeginBlockerConsensusState() {
}
// set upgrade plan in the upgrade store
store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey))
- bz := suite.chainA.App.AppCodec().MustMarshalBinaryBare(plan)
+ bz := suite.chainA.App.AppCodec().MustMarshal(plan)
store.Set(upgradetypes.PlanKey(), bz)
nextValsHash := []byte("nextValsHash")
diff --git a/modules/core/02-client/keeper/keeper.go b/modules/core/02-client/keeper/keeper.go
index d19360dc..2211216a 100644
--- a/modules/core/02-client/keeper/keeper.go
+++ b/modules/core/02-client/keeper/keeper.go
@@ -25,14 +25,14 @@ import (
// state information
type Keeper struct {
storeKey sdk.StoreKey
- cdc codec.BinaryMarshaler
+ cdc codec.BinaryCodec
paramSpace paramtypes.Subspace
stakingKeeper types.StakingKeeper
upgradeKeeper types.UpgradeKeeper
}
// NewKeeper creates a new NewKeeper instance
-func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper, uk types.UpgradeKeeper) Keeper {
+func NewKeeper(cdc codec.BinaryCodec, key sdk.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper, uk types.UpgradeKeeper) Keeper {
// set KeyTable if it has not already been set
if !paramSpace.HasKeyTable() {
paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable())
diff --git a/modules/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go
index 676967cb..9d318eb7 100644
--- a/modules/core/02-client/keeper/keeper_test.go
+++ b/modules/core/02-client/keeper/keeper_test.go
@@ -55,7 +55,7 @@ type KeeperTestSuite struct {
chainA *ibctesting.TestChain
chainB *ibctesting.TestChain
- cdc codec.Marshaler
+ cdc codec.Codec
ctx sdk.Context
keeper *keeper.Keeper
consensusState *ibctmtypes.ConsensusState
@@ -112,7 +112,7 @@ func (suite *KeeperTestSuite) SetupTest() {
val.Tokens = sdk.NewInt(rand.Int63())
validators = append(validators, val)
- hi := stakingtypes.NewHistoricalInfo(suite.ctx.BlockHeader(), validators)
+ hi := stakingtypes.NewHistoricalInfo(suite.ctx.BlockHeader(), validators, sdk.DefaultPowerReduction)
app.StakingKeeper.SetHistoricalInfo(suite.ctx, int64(i), &hi)
}
diff --git a/modules/core/02-client/keeper/proposal_test.go b/modules/core/02-client/keeper/proposal_test.go
index 5e4eca40..cee39c3c 100644
--- a/modules/core/02-client/keeper/proposal_test.go
+++ b/modules/core/02-client/keeper/proposal_test.go
@@ -179,17 +179,6 @@ func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
}
}, false,
},
- {
- "schedule upgrade fails - plan sets time and height", func() {
- plan = upgradetypes.Plan{
- Name: "invalid plan",
- Height: 1000,
- Time: suite.chainA.GetContext().BlockTime(),
- }
- content, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, upgradedClientState)
- suite.Require().NoError(err)
- }, false,
- },
}
for _, tc := range testCases {
@@ -215,7 +204,7 @@ func (suite *KeeperTestSuite) TestHandleUpgradeProposal() {
if oldPlan.Height != 0 {
// set upgrade plan in the upgrade store
store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey))
- bz := suite.chainA.App.AppCodec().MustMarshalBinaryBare(&oldPlan)
+ bz := suite.chainA.App.AppCodec().MustMarshal(&oldPlan)
store.Set(upgradetypes.PlanKey(), bz)
bz, err := types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClientState)
diff --git a/modules/core/02-client/types/encoding.go b/modules/core/02-client/types/encoding.go
index 327dd163..c3ec255b 100644
--- a/modules/core/02-client/types/encoding.go
+++ b/modules/core/02-client/types/encoding.go
@@ -9,7 +9,7 @@ import (
// MustUnmarshalClientState attempts to decode and return an ClientState object from
// raw encoded bytes. It panics on error.
-func MustUnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) exported.ClientState {
+func MustUnmarshalClientState(cdc codec.BinaryCodec, bz []byte) exported.ClientState {
clientState, err := UnmarshalClientState(cdc, bz)
if err != nil {
panic(fmt.Errorf("failed to decode client state: %w", err))
@@ -20,7 +20,7 @@ func MustUnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) exported.Cli
// MustMarshalClientState attempts to encode an ClientState object and returns the
// raw encoded bytes. It panics on error.
-func MustMarshalClientState(cdc codec.BinaryMarshaler, clientState exported.ClientState) []byte {
+func MustMarshalClientState(cdc codec.BinaryCodec, clientState exported.ClientState) []byte {
bz, err := MarshalClientState(cdc, clientState)
if err != nil {
panic(fmt.Errorf("failed to encode client state: %w", err))
@@ -30,14 +30,14 @@ func MustMarshalClientState(cdc codec.BinaryMarshaler, clientState exported.Clie
}
// MarshalClientState protobuf serializes an ClientState interface
-func MarshalClientState(cdc codec.BinaryMarshaler, clientStateI exported.ClientState) ([]byte, error) {
+func MarshalClientState(cdc codec.BinaryCodec, clientStateI exported.ClientState) ([]byte, error) {
return cdc.MarshalInterface(clientStateI)
}
// UnmarshalClientState returns an ClientState interface from raw encoded clientState
// bytes of a Proto-based ClientState type. An error is returned upon decoding
// failure.
-func UnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) (exported.ClientState, error) {
+func UnmarshalClientState(cdc codec.BinaryCodec, bz []byte) (exported.ClientState, error) {
var clientState exported.ClientState
if err := cdc.UnmarshalInterface(bz, &clientState); err != nil {
return nil, err
@@ -48,7 +48,7 @@ func UnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) (exported.Client
// MustUnmarshalConsensusState attempts to decode and return an ConsensusState object from
// raw encoded bytes. It panics on error.
-func MustUnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) exported.ConsensusState {
+func MustUnmarshalConsensusState(cdc codec.BinaryCodec, bz []byte) exported.ConsensusState {
consensusState, err := UnmarshalConsensusState(cdc, bz)
if err != nil {
panic(fmt.Errorf("failed to decode consensus state: %w", err))
@@ -59,7 +59,7 @@ func MustUnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) exported.
// MustMarshalConsensusState attempts to encode a ConsensusState object and returns the
// raw encoded bytes. It panics on error.
-func MustMarshalConsensusState(cdc codec.BinaryMarshaler, consensusState exported.ConsensusState) []byte {
+func MustMarshalConsensusState(cdc codec.BinaryCodec, consensusState exported.ConsensusState) []byte {
bz, err := MarshalConsensusState(cdc, consensusState)
if err != nil {
panic(fmt.Errorf("failed to encode consensus state: %w", err))
@@ -69,14 +69,14 @@ func MustMarshalConsensusState(cdc codec.BinaryMarshaler, consensusState exporte
}
// MarshalConsensusState protobuf serializes a ConsensusState interface
-func MarshalConsensusState(cdc codec.BinaryMarshaler, cs exported.ConsensusState) ([]byte, error) {
+func MarshalConsensusState(cdc codec.BinaryCodec, cs exported.ConsensusState) ([]byte, error) {
return cdc.MarshalInterface(cs)
}
// UnmarshalConsensusState returns a ConsensusState interface from raw encoded consensus state
// bytes of a Proto-based ConsensusState type. An error is returned upon decoding
// failure.
-func UnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) (exported.ConsensusState, error) {
+func UnmarshalConsensusState(cdc codec.BinaryCodec, bz []byte) (exported.ConsensusState, error) {
var consensusState exported.ConsensusState
if err := cdc.UnmarshalInterface(bz, &consensusState); err != nil {
return nil, err
@@ -86,13 +86,13 @@ func UnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) (exported.Con
}
// MarshalHeader protobuf serializes a Header interface
-func MarshalHeader(cdc codec.BinaryMarshaler, h exported.Header) ([]byte, error) {
+func MarshalHeader(cdc codec.BinaryCodec, h exported.Header) ([]byte, error) {
return cdc.MarshalInterface(h)
}
// MustMarshalHeader attempts to encode a Header object and returns the
// raw encoded bytes. It panics on error.
-func MustMarshalHeader(cdc codec.BinaryMarshaler, header exported.Header) []byte {
+func MustMarshalHeader(cdc codec.BinaryCodec, header exported.Header) []byte {
bz, err := MarshalHeader(cdc, header)
if err != nil {
panic(fmt.Errorf("failed to encode header: %w", err))
@@ -103,7 +103,7 @@ func MustMarshalHeader(cdc codec.BinaryMarshaler, header exported.Header) []byte
// UnmarshalHeader returns a Header interface from raw proto encoded header bytes.
// An error is returned upon decoding failure.
-func UnmarshalHeader(cdc codec.BinaryMarshaler, bz []byte) (exported.Header, error) {
+func UnmarshalHeader(cdc codec.BinaryCodec, bz []byte) (exported.Header, error) {
var header exported.Header
if err := cdc.UnmarshalInterface(bz, &header); err != nil {
return nil, err
diff --git a/modules/core/02-client/types/proposal.go b/modules/core/02-client/types/proposal.go
index 4946d374..3d10a925 100644
--- a/modules/core/02-client/types/proposal.go
+++ b/modules/core/02-client/types/proposal.go
@@ -6,8 +6,8 @@ import (
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
- "github.com/cosmos/ibc-go/modules/core/exported"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
const (
@@ -111,10 +111,6 @@ func (up *UpgradeProposal) ValidateBasic() error {
return err
}
- if up.Plan.Time.Unix() > 0 {
- return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "IBC chain upgrades must only set height")
- }
-
if up.Plan.Height <= 0 {
return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "IBC chain upgrades must set a positive height")
}
diff --git a/modules/core/02-client/types/proposal_test.go b/modules/core/02-client/types/proposal_test.go
index 76aee117..56d6103c 100644
--- a/modules/core/02-client/types/proposal_test.go
+++ b/modules/core/02-client/types/proposal_test.go
@@ -2,7 +2,6 @@ package types_test
import (
"fmt"
- "time"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
@@ -129,13 +128,6 @@ func (suite *TypesTestSuite) TestUpgradeProposalValidateBasic() {
}, false,
},
- {
- "fails plan validate basic, height and time is 0", func() {
- invalidPlan := upgradetypes.Plan{Name: "ibc upgrade"}
- proposal, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, invalidPlan, cs)
- suite.Require().NoError(err)
- }, false,
- },
{
"plan height is zero", func() {
invalidPlan := upgradetypes.Plan{Name: "ibc upgrade", Height: 0}
@@ -143,13 +135,6 @@ func (suite *TypesTestSuite) TestUpgradeProposalValidateBasic() {
suite.Require().NoError(err)
}, false,
},
- {
- "plan time is not set to 0", func() {
- invalidPlan := upgradetypes.Plan{Name: "ibc upgrade", Time: time.Now()}
- proposal, err = types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, invalidPlan, cs)
- suite.Require().NoError(err)
- }, false,
- },
{
"client state is nil", func() {
proposal = &types.UpgradeProposal{
diff --git a/modules/core/03-connection/client/utils/utils.go b/modules/core/03-connection/client/utils/utils.go
index 1091236f..e4a108ad 100644
--- a/modules/core/03-connection/client/utils/utils.go
+++ b/modules/core/03-connection/client/utils/utils.go
@@ -53,7 +53,7 @@ func queryConnectionABCI(clientCtx client.Context, connectionID string) (*types.
cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
var connection types.ConnectionEnd
- if err := cdc.UnmarshalBinaryBare(value, &connection); err != nil {
+ if err := cdc.Unmarshal(value, &connection); err != nil {
return nil, err
}
@@ -92,7 +92,7 @@ func queryClientConnectionsABCI(clientCtx client.Context, clientID string) (*typ
}
var paths []string
- if err := clientCtx.LegacyAmino.UnmarshalBinaryBare(value, &paths); err != nil {
+ if err := clientCtx.LegacyAmino.Unmarshal(value, &paths); err != nil {
return nil, err
}
@@ -215,5 +215,5 @@ func ParseProof(cdc *codec.LegacyAmino, arg string) ([]byte, error) {
}
}
- return cdc.MarshalBinaryBare(&merkleProof)
+ return cdc.Marshal(&merkleProof)
}
diff --git a/modules/core/03-connection/keeper/grpc_query.go b/modules/core/03-connection/keeper/grpc_query.go
index e43af8d3..244250f2 100644
--- a/modules/core/03-connection/keeper/grpc_query.go
+++ b/modules/core/03-connection/keeper/grpc_query.go
@@ -55,7 +55,7 @@ func (q Keeper) Connections(c context.Context, req *types.QueryConnectionsReques
pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
var result types.ConnectionEnd
- if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil {
+ if err := q.cdc.Unmarshal(value, &result); err != nil {
return err
}
diff --git a/modules/core/03-connection/keeper/keeper.go b/modules/core/03-connection/keeper/keeper.go
index 235b92b7..49747b56 100644
--- a/modules/core/03-connection/keeper/keeper.go
+++ b/modules/core/03-connection/keeper/keeper.go
@@ -19,12 +19,12 @@ type Keeper struct {
types.QueryServer
storeKey sdk.StoreKey
- cdc codec.BinaryMarshaler
+ cdc codec.BinaryCodec
clientKeeper types.ClientKeeper
}
// NewKeeper creates a new IBC connection Keeper instance
-func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, ck types.ClientKeeper) Keeper {
+func NewKeeper(cdc codec.BinaryCodec, key sdk.StoreKey, ck types.ClientKeeper) Keeper {
return Keeper{
storeKey: key,
cdc: cdc,
@@ -62,7 +62,7 @@ func (k Keeper) GetConnection(ctx sdk.Context, connectionID string) (types.Conne
}
var connection types.ConnectionEnd
- k.cdc.MustUnmarshalBinaryBare(bz, &connection)
+ k.cdc.MustUnmarshal(bz, &connection)
return connection, true
}
@@ -70,7 +70,7 @@ func (k Keeper) GetConnection(ctx sdk.Context, connectionID string) (types.Conne
// SetConnection sets a connection to the store
func (k Keeper) SetConnection(ctx sdk.Context, connectionID string, connection types.ConnectionEnd) {
store := ctx.KVStore(k.storeKey)
- bz := k.cdc.MustMarshalBinaryBare(&connection)
+ bz := k.cdc.MustMarshal(&connection)
store.Set(host.ConnectionKey(connectionID), bz)
}
@@ -101,7 +101,7 @@ func (k Keeper) GetClientConnectionPaths(ctx sdk.Context, clientID string) ([]st
}
var clientPaths types.ClientPaths
- k.cdc.MustUnmarshalBinaryBare(bz, &clientPaths)
+ k.cdc.MustUnmarshal(bz, &clientPaths)
return clientPaths.Paths, true
}
@@ -109,7 +109,7 @@ func (k Keeper) GetClientConnectionPaths(ctx sdk.Context, clientID string) ([]st
func (k Keeper) SetClientConnectionPaths(ctx sdk.Context, clientID string, paths []string) {
store := ctx.KVStore(k.storeKey)
clientPaths := types.ClientPaths{Paths: paths}
- bz := k.cdc.MustMarshalBinaryBare(&clientPaths)
+ bz := k.cdc.MustMarshal(&clientPaths)
store.Set(host.ClientConnectionsKey(clientID), bz)
}
@@ -160,7 +160,7 @@ func (k Keeper) IterateConnections(ctx sdk.Context, cb func(types.IdentifiedConn
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
var connection types.ConnectionEnd
- k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &connection)
+ k.cdc.MustUnmarshal(iterator.Value(), &connection)
connectionID := host.MustParseConnectionPath(string(iterator.Key()))
identifiedConnection := types.NewIdentifiedConnection(connectionID, connection)
diff --git a/modules/core/03-connection/simulation/decoder.go b/modules/core/03-connection/simulation/decoder.go
index 8c485230..8b331cb5 100644
--- a/modules/core/03-connection/simulation/decoder.go
+++ b/modules/core/03-connection/simulation/decoder.go
@@ -12,18 +12,18 @@ import (
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
// Value to the corresponding connection type.
-func NewDecodeStore(cdc codec.BinaryMarshaler, kvA, kvB kv.Pair) (string, bool) {
+func NewDecodeStore(cdc codec.BinaryCodec, kvA, kvB kv.Pair) (string, bool) {
switch {
case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.HasSuffix(kvA.Key, []byte(host.KeyConnectionPrefix)):
var clientConnectionsA, clientConnectionsB types.ClientPaths
- cdc.MustUnmarshalBinaryBare(kvA.Value, &clientConnectionsA)
- cdc.MustUnmarshalBinaryBare(kvB.Value, &clientConnectionsB)
+ cdc.MustUnmarshal(kvA.Value, &clientConnectionsA)
+ cdc.MustUnmarshal(kvB.Value, &clientConnectionsB)
return fmt.Sprintf("ClientPaths A: %v\nClientPaths B: %v", clientConnectionsA, clientConnectionsB), true
case bytes.HasPrefix(kvA.Key, []byte(host.KeyConnectionPrefix)):
var connectionA, connectionB types.ConnectionEnd
- cdc.MustUnmarshalBinaryBare(kvA.Value, &connectionA)
- cdc.MustUnmarshalBinaryBare(kvB.Value, &connectionB)
+ cdc.MustUnmarshal(kvA.Value, &connectionA)
+ cdc.MustUnmarshal(kvB.Value, &connectionB)
return fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connectionA, connectionB), true
default:
diff --git a/modules/core/03-connection/simulation/decoder_test.go b/modules/core/03-connection/simulation/decoder_test.go
index e9b72657..981da400 100644
--- a/modules/core/03-connection/simulation/decoder_test.go
+++ b/modules/core/03-connection/simulation/decoder_test.go
@@ -32,11 +32,11 @@ func TestDecodeStore(t *testing.T) {
Pairs: []kv.Pair{
{
Key: host.ClientConnectionsKey(connection.ClientId),
- Value: cdc.MustMarshalBinaryBare(&paths),
+ Value: cdc.MustMarshal(&paths),
},
{
Key: host.ConnectionKey(connectionID),
- Value: cdc.MustMarshalBinaryBare(&connection),
+ Value: cdc.MustMarshal(&connection),
},
{
Key: []byte{0x99},
diff --git a/modules/core/03-connection/types/msgs_test.go b/modules/core/03-connection/types/msgs_test.go
index 1875804d..57c12352 100644
--- a/modules/core/03-connection/types/msgs_test.go
+++ b/modules/core/03-connection/types/msgs_test.go
@@ -65,7 +65,7 @@ func (suite *MsgTestSuite) SetupTest() {
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
suite.Require().NoError(err)
- proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof)
+ proof, err := app.AppCodec().Marshal(&merkleProof)
suite.Require().NoError(err)
suite.proof = proof
diff --git a/modules/core/04-channel/client/utils/utils.go b/modules/core/04-channel/client/utils/utils.go
index f1384ad7..ec70a8f6 100644
--- a/modules/core/04-channel/client/utils/utils.go
+++ b/modules/core/04-channel/client/utils/utils.go
@@ -50,7 +50,7 @@ func queryChannelABCI(clientCtx client.Context, portID, channelID string) (*type
cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
var channel types.Channel
- if err := cdc.UnmarshalBinaryBare(value, &channel); err != nil {
+ if err := cdc.Unmarshal(value, &channel); err != nil {
return nil, err
}
diff --git a/modules/core/04-channel/keeper/grpc_query.go b/modules/core/04-channel/keeper/grpc_query.go
index d7c29a4f..5f215591 100644
--- a/modules/core/04-channel/keeper/grpc_query.go
+++ b/modules/core/04-channel/keeper/grpc_query.go
@@ -56,7 +56,7 @@ func (q Keeper) Channels(c context.Context, req *types.QueryChannelsRequest) (*t
pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
var result types.Channel
- if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil {
+ if err := q.cdc.Unmarshal(value, &result); err != nil {
return err
}
@@ -99,7 +99,7 @@ func (q Keeper) ConnectionChannels(c context.Context, req *types.QueryConnection
pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error {
var result types.Channel
- if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil {
+ if err := q.cdc.Unmarshal(value, &result); err != nil {
return err
}
diff --git a/modules/core/04-channel/keeper/keeper.go b/modules/core/04-channel/keeper/keeper.go
index 3ffe8ed4..2250ef6e 100644
--- a/modules/core/04-channel/keeper/keeper.go
+++ b/modules/core/04-channel/keeper/keeper.go
@@ -26,7 +26,7 @@ type Keeper struct {
types.QueryServer
storeKey sdk.StoreKey
- cdc codec.BinaryMarshaler
+ cdc codec.BinaryCodec
clientKeeper types.ClientKeeper
connectionKeeper types.ConnectionKeeper
portKeeper types.PortKeeper
@@ -35,7 +35,7 @@ type Keeper struct {
// NewKeeper creates a new IBC channel Keeper instance
func NewKeeper(
- cdc codec.BinaryMarshaler, key sdk.StoreKey,
+ cdc codec.BinaryCodec, key sdk.StoreKey,
clientKeeper types.ClientKeeper, connectionKeeper types.ConnectionKeeper,
portKeeper types.PortKeeper, scopedKeeper capabilitykeeper.ScopedKeeper,
) Keeper {
@@ -73,14 +73,14 @@ func (k Keeper) GetChannel(ctx sdk.Context, portID, channelID string) (types.Cha
}
var channel types.Channel
- k.cdc.MustUnmarshalBinaryBare(bz, &channel)
+ k.cdc.MustUnmarshal(bz, &channel)
return channel, true
}
// SetChannel sets a channel to the store
func (k Keeper) SetChannel(ctx sdk.Context, portID, channelID string, channel types.Channel) {
store := ctx.KVStore(k.storeKey)
- bz := k.cdc.MustMarshalBinaryBare(&channel)
+ bz := k.cdc.MustMarshal(&channel)
store.Set(host.ChannelKey(portID, channelID), bz)
}
@@ -362,7 +362,7 @@ func (k Keeper) IterateChannels(ctx sdk.Context, cb func(types.IdentifiedChannel
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
var channel types.Channel
- k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &channel)
+ k.cdc.MustUnmarshal(iterator.Value(), &channel)
portID, channelID := host.MustParseChannelPath(string(iterator.Key()))
identifiedChannel := types.NewIdentifiedChannel(portID, channelID, channel)
diff --git a/modules/core/04-channel/simulation/decoder.go b/modules/core/04-channel/simulation/decoder.go
index d96e95f3..59e56e96 100644
--- a/modules/core/04-channel/simulation/decoder.go
+++ b/modules/core/04-channel/simulation/decoder.go
@@ -13,12 +13,12 @@ import (
// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's
// Value to the corresponding channel type.
-func NewDecodeStore(cdc codec.BinaryMarshaler, kvA, kvB kv.Pair) (string, bool) {
+func NewDecodeStore(cdc codec.BinaryCodec, kvA, kvB kv.Pair) (string, bool) {
switch {
case bytes.HasPrefix(kvA.Key, []byte(host.KeyChannelEndPrefix)):
var channelA, channelB types.Channel
- cdc.MustUnmarshalBinaryBare(kvA.Value, &channelA)
- cdc.MustUnmarshalBinaryBare(kvB.Value, &channelB)
+ cdc.MustUnmarshal(kvA.Value, &channelA)
+ cdc.MustUnmarshal(kvB.Value, &channelB)
return fmt.Sprintf("Channel A: %v\nChannel B: %v", channelA, channelB), true
case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqSendPrefix)):
diff --git a/modules/core/04-channel/simulation/decoder_test.go b/modules/core/04-channel/simulation/decoder_test.go
index 5b6b83d2..0f6f83a9 100644
--- a/modules/core/04-channel/simulation/decoder_test.go
+++ b/modules/core/04-channel/simulation/decoder_test.go
@@ -32,7 +32,7 @@ func TestDecodeStore(t *testing.T) {
Pairs: []kv.Pair{
{
Key: host.ChannelKey(portID, channelID),
- Value: cdc.MustMarshalBinaryBare(&channel),
+ Value: cdc.MustMarshal(&channel),
},
{
Key: host.NextSequenceSendKey(portID, channelID),
diff --git a/modules/core/04-channel/types/msgs_test.go b/modules/core/04-channel/types/msgs_test.go
index daa2195b..a296520e 100644
--- a/modules/core/04-channel/types/msgs_test.go
+++ b/modules/core/04-channel/types/msgs_test.go
@@ -95,7 +95,7 @@ func (suite *TypesTestSuite) SetupTest() {
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
suite.Require().NoError(err)
- proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof)
+ proof, err := app.AppCodec().Marshal(&merkleProof)
suite.Require().NoError(err)
suite.proof = proof
diff --git a/modules/core/04-channel/types/packet.go b/modules/core/04-channel/types/packet.go
index 092f2b6b..5e9c56e6 100644
--- a/modules/core/04-channel/types/packet.go
+++ b/modules/core/04-channel/types/packet.go
@@ -15,7 +15,7 @@ import (
// sha256_hash(timeout_timestamp + timeout_height.RevisionNumber + timeout_height.RevisionHeight + sha256_hash(data))
// from a given packet. This results in a fixed length preimage.
// NOTE: sdk.Uint64ToBigEndian sets the uint64 to a slice of length 8.
-func CommitPacket(cdc codec.BinaryMarshaler, packet exported.PacketI) []byte {
+func CommitPacket(cdc codec.BinaryCodec, packet exported.PacketI) []byte {
timeoutHeight := packet.GetTimeoutHeight()
buf := sdk.Uint64ToBigEndian(packet.GetTimeoutTimestamp())
diff --git a/modules/core/client/query.go b/modules/core/client/query.go
index fbfeae04..4b954189 100644
--- a/modules/core/client/query.go
+++ b/modules/core/client/query.go
@@ -57,7 +57,7 @@ func QueryTendermintProof(clientCtx client.Context, key []byte) ([]byte, []byte,
cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry)
- proofBz, err := cdc.MarshalBinaryBare(&merkleProof)
+ proofBz, err := cdc.Marshal(&merkleProof)
if err != nil {
return nil, nil, clienttypes.Height{}, err
}
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index 890a93d1..b79106c1 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -51,20 +51,20 @@ type ClientState interface {
// Initialization function
// Clients must validate the initial consensus state, and may store any client-specific metadata
// necessary for correct light client operation
- Initialize(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, ConsensusState) error
+ Initialize(sdk.Context, codec.BinaryCodec, sdk.KVStore, ConsensusState) error
// Status function
// Clients must return their status. Only Active clients are allowed to process packets.
- Status(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryMarshaler) Status
+ Status(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryCodec) Status
// Genesis function
ExportMetadata(sdk.KVStore) []GenesisMetadata
// Update and Misbehaviour functions
- CheckHeaderAndUpdateState(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, Header) (ClientState, ConsensusState, error)
- CheckMisbehaviourAndUpdateState(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, Misbehaviour) (ClientState, error)
- CheckSubstituteAndUpdateState(ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore, substituteClientStore sdk.KVStore, substituteClient ClientState, height Height) (ClientState, error)
+ CheckHeaderAndUpdateState(sdk.Context, codec.BinaryCodec, sdk.KVStore, Header) (ClientState, ConsensusState, error)
+ CheckMisbehaviourAndUpdateState(sdk.Context, codec.BinaryCodec, sdk.KVStore, Misbehaviour) (ClientState, error)
+ CheckSubstituteAndUpdateState(ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore, substituteClientStore sdk.KVStore, substituteClient ClientState, height Height) (ClientState, error)
// Upgrade functions
// NOTE: proof heights are not included as upgrade to a new revision is expected to pass only on the last
@@ -74,7 +74,7 @@ type ClientState interface {
// may be cancelled or modified before the last planned height.
VerifyUpgradeAndUpdateState(
ctx sdk.Context,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
store sdk.KVStore,
newClient ClientState,
newConsState ConsensusState,
@@ -90,7 +90,7 @@ type ClientState interface {
VerifyClientState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
prefix Prefix,
counterpartyClientIdentifier string,
@@ -99,7 +99,7 @@ type ClientState interface {
) error
VerifyClientConsensusState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
counterpartyClientIdentifier string,
consensusHeight Height,
@@ -109,7 +109,7 @@ type ClientState interface {
) error
VerifyConnectionState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
prefix Prefix,
proof []byte,
@@ -118,7 +118,7 @@ type ClientState interface {
) error
VerifyChannelState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
prefix Prefix,
proof []byte,
@@ -128,7 +128,7 @@ type ClientState interface {
) error
VerifyPacketCommitment(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -141,7 +141,7 @@ type ClientState interface {
) error
VerifyPacketAcknowledgement(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -154,7 +154,7 @@ type ClientState interface {
) error
VerifyPacketReceiptAbsence(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -166,7 +166,7 @@ type ClientState interface {
) error
VerifyNextSequenceRecv(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height Height,
currentTimestamp uint64,
delayPeriod uint64,
diff --git a/modules/core/keeper/keeper.go b/modules/core/keeper/keeper.go
index df83f1ff..0320bb46 100644
--- a/modules/core/keeper/keeper.go
+++ b/modules/core/keeper/keeper.go
@@ -21,7 +21,7 @@ type Keeper struct {
// implements gRPC QueryServer interface
types.QueryServer
- cdc codec.BinaryMarshaler
+ cdc codec.BinaryCodec
ClientKeeper clientkeeper.Keeper
ConnectionKeeper connectionkeeper.Keeper
@@ -32,7 +32,7 @@ type Keeper struct {
// NewKeeper creates a new ibc Keeper
func NewKeeper(
- cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace,
+ cdc codec.BinaryCodec, key sdk.StoreKey, paramSpace paramtypes.Subspace,
stakingKeeper clienttypes.StakingKeeper, upgradeKeeper clienttypes.UpgradeKeeper,
scopedKeeper capabilitykeeper.ScopedKeeper,
) *Keeper {
@@ -51,7 +51,7 @@ func NewKeeper(
}
// Codec returns the IBC module codec.
-func (k Keeper) Codec() codec.BinaryMarshaler {
+func (k Keeper) Codec() codec.BinaryCodec {
return k.cdc
}
diff --git a/modules/core/module.go b/modules/core/module.go
index 45c53abb..c00d6448 100644
--- a/modules/core/module.go
+++ b/modules/core/module.go
@@ -50,12 +50,12 @@ func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {}
// DefaultGenesis returns default genesis state as raw bytes for the ibc
// module.
-func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage {
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage {
return cdc.MustMarshalJSON(types.DefaultGenesisState())
}
// ValidateGenesis performs genesis state validation for the ibc module.
-func (AppModuleBasic) ValidateGenesis(cdc codec.JSONMarshaler, config client.TxEncodingConfig, bz json.RawMessage) error {
+func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error {
var gs types.GenesisState
if err := cdc.UnmarshalJSON(bz, &gs); err != nil {
return fmt.Errorf("failed to unmarshal %s genesis state: %w", host.ModuleName, err)
@@ -140,7 +140,7 @@ func (am AppModule) RegisterServices(cfg module.Configurator) {
// InitGenesis performs genesis initialization for the ibc module. It returns
// no validator updates.
-func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, bz json.RawMessage) []abci.ValidatorUpdate {
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, bz json.RawMessage) []abci.ValidatorUpdate {
var gs types.GenesisState
err := cdc.UnmarshalJSON(bz, &gs)
if err != nil {
@@ -152,7 +152,7 @@ func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, bz jso
// ExportGenesis returns the exported genesis state as raw bytes for the ibc
// module.
-func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage {
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage {
return cdc.MustMarshalJSON(ExportGenesis(ctx, *am.keeper))
}
diff --git a/modules/core/simulation/decoder_test.go b/modules/core/simulation/decoder_test.go
index 0817d8f2..af023fed 100644
--- a/modules/core/simulation/decoder_test.go
+++ b/modules/core/simulation/decoder_test.go
@@ -45,11 +45,11 @@ func TestDecodeStore(t *testing.T) {
},
{
Key: host.ConnectionKey(connectionID),
- Value: app.IBCKeeper.Codec().MustMarshalBinaryBare(&connection),
+ Value: app.IBCKeeper.Codec().MustMarshal(&connection),
},
{
Key: host.ChannelKey(portID, channelID),
- Value: app.IBCKeeper.Codec().MustMarshalBinaryBare(&channel),
+ Value: app.IBCKeeper.Codec().MustMarshal(&channel),
},
{
Key: []byte{0x99},
diff --git a/modules/light-clients/06-solomachine/spec/01_concepts.md b/modules/light-clients/06-solomachine/spec/01_concepts.md
index a121803f..49b6ddaa 100644
--- a/modules/light-clients/06-solomachine/spec/01_concepts.md
+++ b/modules/light-clients/06-solomachine/spec/01_concepts.md
@@ -53,7 +53,7 @@ data := &ClientStateData{
ClientState: any,
}
-dataBz, err := cdc.MarshalBinaryBare(data)
+dataBz, err := cdc.Marshal(data)
```
The helper functions `...DataBytes()` in [proofs.go](../types/proofs.go) handle this
@@ -72,7 +72,7 @@ signBytes := &SignBytes{
Data: dataBz,
}
-signBz, err := cdc.MarshalBinaryBare(signBytes)
+signBz, err := cdc.Marshal(signBytes)
```
The helper functions `...SignBytes()` in [proofs.go](../types/proofs.go) handle this functionality.
@@ -91,7 +91,7 @@ sigData := &signing.SingleSignatureData{
}
protoSigData := signing.SignatureDataToProto(sigData)
-bz, err := cdc.MarshalBinaryBare(protoSigData)
+bz, err := cdc.Marshal(protoSigData)
```
4. Construct a `TimestampedSignatureData` and marshal it. The marshaled result can be passed in
@@ -105,7 +105,7 @@ timestampedSignatureData := &types.TimestampedSignatureData{
Timestamp: solomachine.Time,
}
-proof, err := cdc.MarshalBinaryBare(timestampedSignatureData)
+proof, err := cdc.Marshal(timestampedSignatureData)
```
NOTE: At the end of this process, the sequence associated with the key needs to be updated.
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index a45ccc31..7ce5fa1f 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -44,7 +44,7 @@ func (cs ClientState) GetLatestHeight() exported.Height {
// The client may be:
// - Active: if frozen sequence is 0
// - Frozen: otherwise solo machine is frozen
-func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryMarshaler) exported.Status {
+func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec) exported.Status {
if cs.FrozenSequence != 0 {
return exported.Frozen
}
@@ -89,7 +89,7 @@ func (cs ClientState) ZeroCustomFields() exported.ClientState {
}
// Initialize will check that initial consensus state is equal to the latest consensus state of the initial client.
-func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, consState exported.ConsensusState) error {
+func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, consState exported.ConsensusState) error {
if !reflect.DeepEqual(cs.ConsensusState, consState) {
return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "consensus state in initial client does not equal initial consensus state. expected: %s, got: %s",
cs.ConsensusState, consState)
@@ -104,7 +104,7 @@ func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata {
// VerifyUpgradeAndUpdateState returns an error since solomachine client does not support upgrades
func (cs ClientState) VerifyUpgradeAndUpdateState(
- _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore,
+ _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore,
_ exported.ClientState, _ exported.ConsensusState, _, _ []byte,
) (exported.ClientState, exported.ConsensusState, error) {
return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade solomachine client")
@@ -114,7 +114,7 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
// stored on the solo machine.
func (cs *ClientState) VerifyClientState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
prefix exported.Prefix,
counterpartyClientIdentifier string,
@@ -151,7 +151,7 @@ func (cs *ClientState) VerifyClientState(
// running chain stored on the solo machine.
func (cs *ClientState) VerifyClientConsensusState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
counterpartyClientIdentifier string,
consensusHeight exported.Height,
@@ -189,7 +189,7 @@ func (cs *ClientState) VerifyClientConsensusState(
// specified connection end stored on the target machine.
func (cs *ClientState) VerifyConnectionState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
prefix exported.Prefix,
proof []byte,
@@ -226,7 +226,7 @@ func (cs *ClientState) VerifyConnectionState(
// channel end, under the specified port, stored on the target machine.
func (cs *ClientState) VerifyChannelState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
prefix exported.Prefix,
proof []byte,
@@ -264,7 +264,7 @@ func (cs *ClientState) VerifyChannelState(
// the specified port, specified channel, and specified sequence.
func (cs *ClientState) VerifyPacketCommitment(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
_ uint64,
_ uint64,
@@ -305,7 +305,7 @@ func (cs *ClientState) VerifyPacketCommitment(
// acknowledgement at the specified port, specified channel, and specified sequence.
func (cs *ClientState) VerifyPacketAcknowledgement(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
_ uint64,
_ uint64,
@@ -347,7 +347,7 @@ func (cs *ClientState) VerifyPacketAcknowledgement(
// specified sequence.
func (cs *ClientState) VerifyPacketReceiptAbsence(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
_ uint64,
_ uint64,
@@ -387,7 +387,7 @@ func (cs *ClientState) VerifyPacketReceiptAbsence(
// received of the specified channel at the specified port.
func (cs *ClientState) VerifyNextSequenceRecv(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
_ uint64,
_ uint64,
@@ -428,7 +428,7 @@ func (cs *ClientState) VerifyNextSequenceRecv(
// consensus state, the unmarshalled proof representing the signature and timestamp
// along with the solo-machine sequence encoded in the proofHeight.
func produceVerificationArgs(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
cs *ClientState,
height exported.Height,
prefix exported.Prefix,
@@ -457,7 +457,7 @@ func produceVerificationArgs(
}
timestampedSigData := &TimestampedSignatureData{}
- if err := cdc.UnmarshalBinaryBare(proof, timestampedSigData); err != nil {
+ if err := cdc.Unmarshal(proof, timestampedSigData); err != nil {
return nil, nil, 0, 0, sdkerrors.Wrapf(err, "failed to unmarshal proof into type %T", timestampedSigData)
}
@@ -497,7 +497,7 @@ func produceVerificationArgs(
}
// sets the client state to the store
-func setClientState(store sdk.KVStore, cdc codec.BinaryMarshaler, clientState exported.ClientState) {
+func setClientState(store sdk.KVStore, cdc codec.BinaryCodec, clientState exported.ClientState) {
bz := clienttypes.MustMarshalClientState(cdc, clientState)
store.Set([]byte(host.KeyClientState), bz)
}
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index bd2587e3..a3e6b703 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -164,7 +164,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientState() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -294,7 +294,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -420,7 +420,7 @@ func (suite *SoloMachineTestSuite) TestVerifyConnectionState() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -510,7 +510,7 @@ func (suite *SoloMachineTestSuite) TestVerifyChannelState() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -599,7 +599,7 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketCommitment() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -686,7 +686,7 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgement() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -773,7 +773,7 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketReceiptAbsence() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
@@ -860,7 +860,7 @@ func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() {
Timestamp: solomachine.Time,
}
- proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc)
+ proof, err := suite.chainA.Codec.Marshal(signatureDoc)
suite.Require().NoError(err)
testCases := []struct {
diff --git a/modules/light-clients/06-solomachine/types/codec.go b/modules/light-clients/06-solomachine/types/codec.go
index 833c9c37..0b691025 100644
--- a/modules/light-clients/06-solomachine/types/codec.go
+++ b/modules/light-clients/06-solomachine/types/codec.go
@@ -30,9 +30,9 @@ func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
)
}
-func UnmarshalSignatureData(cdc codec.BinaryMarshaler, data []byte) (signing.SignatureData, error) {
+func UnmarshalSignatureData(cdc codec.BinaryCodec, data []byte) (signing.SignatureData, error) {
protoSigData := &signing.SignatureDescriptor_Data{}
- if err := cdc.UnmarshalBinaryBare(data, protoSigData); err != nil {
+ if err := cdc.Unmarshal(data, protoSigData); err != nil {
return nil, sdkerrors.Wrapf(err, "failed to unmarshal proof into type %T", protoSigData)
}
@@ -43,7 +43,7 @@ func UnmarshalSignatureData(cdc codec.BinaryMarshaler, data []byte) (signing.Sig
// UnmarshalDataByType attempts to unmarshal the data to the specified type. An error is
// return if it fails.
-func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []byte) (Data, error) {
+func UnmarshalDataByType(cdc codec.BinaryCodec, dataType DataType, data []byte) (Data, error) {
if len(data) == 0 {
return nil, sdkerrors.Wrap(ErrInvalidSignatureAndData, "data cannot be empty")
}
@@ -54,7 +54,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case CLIENT:
clientData := &ClientStateData{}
- if err := cdc.UnmarshalBinaryBare(data, clientData); err != nil {
+ if err := cdc.Unmarshal(data, clientData); err != nil {
return nil, err
}
@@ -66,7 +66,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case CONSENSUS:
consensusData := &ConsensusStateData{}
- if err := cdc.UnmarshalBinaryBare(data, consensusData); err != nil {
+ if err := cdc.Unmarshal(data, consensusData); err != nil {
return nil, err
}
@@ -78,7 +78,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case CONNECTION:
connectionData := &ConnectionStateData{}
- if err := cdc.UnmarshalBinaryBare(data, connectionData); err != nil {
+ if err := cdc.Unmarshal(data, connectionData); err != nil {
return nil, err
}
@@ -86,7 +86,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case CHANNEL:
channelData := &ChannelStateData{}
- if err := cdc.UnmarshalBinaryBare(data, channelData); err != nil {
+ if err := cdc.Unmarshal(data, channelData); err != nil {
return nil, err
}
@@ -94,7 +94,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case PACKETCOMMITMENT:
commitmentData := &PacketCommitmentData{}
- if err := cdc.UnmarshalBinaryBare(data, commitmentData); err != nil {
+ if err := cdc.Unmarshal(data, commitmentData); err != nil {
return nil, err
}
@@ -102,7 +102,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case PACKETACKNOWLEDGEMENT:
ackData := &PacketAcknowledgementData{}
- if err := cdc.UnmarshalBinaryBare(data, ackData); err != nil {
+ if err := cdc.Unmarshal(data, ackData); err != nil {
return nil, err
}
@@ -110,7 +110,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case PACKETRECEIPTABSENCE:
receiptAbsenceData := &PacketReceiptAbsenceData{}
- if err := cdc.UnmarshalBinaryBare(data, receiptAbsenceData); err != nil {
+ if err := cdc.Unmarshal(data, receiptAbsenceData); err != nil {
return nil, err
}
@@ -118,7 +118,7 @@ func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []by
case NEXTSEQUENCERECV:
nextSeqRecvData := &NextSequenceRecvData{}
- if err := cdc.UnmarshalBinaryBare(data, nextSeqRecvData); err != nil {
+ if err := cdc.Unmarshal(data, nextSeqRecvData); err != nil {
return nil, err
}
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour_handle.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
index 2597e5e3..b50c3883 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
@@ -16,7 +16,7 @@ import (
// order processing dependent.
func (cs ClientState) CheckMisbehaviourAndUpdateState(
ctx sdk.Context,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
clientStore sdk.KVStore,
misbehaviour exported.Misbehaviour,
) (exported.ClientState, error) {
@@ -53,7 +53,7 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
// verifySignatureAndData verifies that the currently registered public key has signed
// over the provided data and that the data is valid. The data is valid if it can be
// unmarshaled into the specified data type.
-func verifySignatureAndData(cdc codec.BinaryMarshaler, clientState ClientState, misbehaviour *Misbehaviour, sigAndData *SignatureAndData) error {
+func verifySignatureAndData(cdc codec.BinaryCodec, clientState ClientState, misbehaviour *Misbehaviour, sigAndData *SignatureAndData) error {
// do not check misbehaviour timestamp since we want to allow processing of past misbehaviour
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
index 9f35f7ed..cb13f4a7 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
@@ -120,7 +120,7 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
Data: msg,
}
- data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ data, err := suite.chainA.Codec.Marshal(signBytes)
suite.Require().NoError(err)
sig := solomachine.GenerateSignature(data)
@@ -148,7 +148,7 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
Data: msg,
}
- data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ data, err := suite.chainA.Codec.Marshal(signBytes)
suite.Require().NoError(err)
sig := solomachine.GenerateSignature(data)
@@ -209,7 +209,7 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
Data: msg,
}
- data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ data, err := suite.chainA.Codec.Marshal(signBytes)
suite.Require().NoError(err)
sig := solomachine.GenerateSignature(data)
@@ -228,7 +228,7 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
DataType: types.CLIENT,
Data: msg,
}
- data, err = suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ data, err = suite.chainA.Codec.Marshal(signBytes)
suite.Require().NoError(err)
sig = solomachine.GenerateSignature(data)
diff --git a/modules/light-clients/06-solomachine/types/proof.go b/modules/light-clients/06-solomachine/types/proof.go
index 785a7b99..315195fb 100644
--- a/modules/light-clients/06-solomachine/types/proof.go
+++ b/modules/light-clients/06-solomachine/types/proof.go
@@ -50,7 +50,7 @@ func VerifySignature(pubKey cryptotypes.PubKey, signBytes []byte, sigData signin
// MisbehaviourSignBytes returns the sign bytes for verification of misbehaviour.
func MisbehaviourSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
dataType DataType,
@@ -63,12 +63,12 @@ func MisbehaviourSignBytes(
Data: data,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// HeaderSignBytes returns the sign bytes for verification of misbehaviour.
func HeaderSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
header *Header,
) ([]byte, error) {
data := &HeaderData{
@@ -76,7 +76,7 @@ func HeaderSignBytes(
NewDiversifier: header.NewDiversifier,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -89,13 +89,13 @@ func HeaderSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// ClientStateSignBytes returns the sign bytes for verification of the
// client state.
func ClientStateSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -114,13 +114,13 @@ func ClientStateSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// ClientStateDataBytes returns the client state data bytes used in constructing
// SignBytes.
func ClientStateDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
clientState exported.ClientState,
) ([]byte, error) {
@@ -134,7 +134,7 @@ func ClientStateDataBytes(
ClientState: any,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -145,7 +145,7 @@ func ClientStateDataBytes(
// ConsensusStateSignBytes returns the sign bytes for verification of the
// consensus state.
func ConsensusStateSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -164,13 +164,13 @@ func ConsensusStateSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// ConsensusStateDataBytes returns the consensus state data bytes used in constructing
// SignBytes.
func ConsensusStateDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
consensusState exported.ConsensusState,
) ([]byte, error) {
@@ -184,7 +184,7 @@ func ConsensusStateDataBytes(
ConsensusState: any,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -195,7 +195,7 @@ func ConsensusStateDataBytes(
// ConnectionStateSignBytes returns the sign bytes for verification of the
// connection state.
func ConnectionStateSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -214,13 +214,13 @@ func ConnectionStateSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// ConnectionStateDataBytes returns the connection state data bytes used in constructing
// SignBytes.
func ConnectionStateDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
connectionEnd exported.ConnectionI,
) ([]byte, error) {
@@ -237,7 +237,7 @@ func ConnectionStateDataBytes(
Connection: &connection,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -248,7 +248,7 @@ func ConnectionStateDataBytes(
// ChannelStateSignBytes returns the sign bytes for verification of the
// channel state.
func ChannelStateSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -267,13 +267,13 @@ func ChannelStateSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// ChannelStateDataBytes returns the channel state data bytes used in constructing
// SignBytes.
func ChannelStateDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
channelEnd exported.ChannelI,
) ([]byte, error) {
@@ -289,7 +289,7 @@ func ChannelStateDataBytes(
Channel: &channel,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -300,7 +300,7 @@ func ChannelStateDataBytes(
// PacketCommitmentSignBytes returns the sign bytes for verification of the
// packet commitment.
func PacketCommitmentSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -319,13 +319,13 @@ func PacketCommitmentSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// PacketCommitmentDataBytes returns the packet commitment data bytes used in constructing
// SignBytes.
func PacketCommitmentDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
commitmentBytes []byte,
) ([]byte, error) {
@@ -334,7 +334,7 @@ func PacketCommitmentDataBytes(
Commitment: commitmentBytes,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -345,7 +345,7 @@ func PacketCommitmentDataBytes(
// PacketAcknowledgementSignBytes returns the sign bytes for verification of
// the acknowledgement.
func PacketAcknowledgementSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -364,13 +364,13 @@ func PacketAcknowledgementSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// PacketAcknowledgementDataBytes returns the packet acknowledgement data bytes used in constructing
// SignBytes.
func PacketAcknowledgementDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
acknowledgement []byte,
) ([]byte, error) {
@@ -379,7 +379,7 @@ func PacketAcknowledgementDataBytes(
Acknowledgement: acknowledgement,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -390,7 +390,7 @@ func PacketAcknowledgementDataBytes(
// PacketReceiptAbsenceSignBytes returns the sign bytes for verification
// of the absence of an receipt.
func PacketReceiptAbsenceSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -408,20 +408,20 @@ func PacketReceiptAbsenceSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// PacketReceiptAbsenceDataBytes returns the packet receipt absence data bytes
// used in constructing SignBytes.
func PacketReceiptAbsenceDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
) ([]byte, error) {
data := &PacketReceiptAbsenceData{
Path: []byte(path.String()),
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
@@ -432,7 +432,7 @@ func PacketReceiptAbsenceDataBytes(
// NextSequenceRecvSignBytes returns the sign bytes for verification of the next
// sequence to be received.
func NextSequenceRecvSignBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
sequence, timestamp uint64,
diversifier string,
path commitmenttypes.MerklePath,
@@ -451,13 +451,13 @@ func NextSequenceRecvSignBytes(
Data: dataBz,
}
- return cdc.MarshalBinaryBare(signBytes)
+ return cdc.Marshal(signBytes)
}
// NextSequenceRecvDataBytes returns the next sequence recv data bytes used in constructing
// SignBytes.
func NextSequenceRecvDataBytes(
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
path commitmenttypes.MerklePath, // nolint: interfacer
nextSequenceRecv uint64,
) ([]byte, error) {
@@ -466,7 +466,7 @@ func NextSequenceRecvDataBytes(
NextSeqRecv: nextSequenceRecv,
}
- dataBz, err := cdc.MarshalBinaryBare(data)
+ dataBz, err := cdc.Marshal(data)
if err != nil {
return nil, err
}
diff --git a/modules/light-clients/06-solomachine/types/proposal_handle.go b/modules/light-clients/06-solomachine/types/proposal_handle.go
index da96673c..342ab2e6 100644
--- a/modules/light-clients/06-solomachine/types/proposal_handle.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle.go
@@ -18,7 +18,7 @@ import (
// the substitute is not a solo machine, or the current public key equals
// the new public key.
func (cs ClientState) CheckSubstituteAndUpdateState(
- ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore,
+ ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore,
_ sdk.KVStore, substituteClient exported.ClientState,
_ exported.Height,
) (exported.ClientState, error) {
diff --git a/modules/light-clients/06-solomachine/types/solomachine_test.go b/modules/light-clients/06-solomachine/types/solomachine_test.go
index 2f8e559a..b6f7e818 100644
--- a/modules/light-clients/06-solomachine/types/solomachine_test.go
+++ b/modules/light-clients/06-solomachine/types/solomachine_test.go
@@ -58,7 +58,7 @@ func (suite *SoloMachineTestSuite) GetSequenceFromStore() uint64 {
}
func (suite *SoloMachineTestSuite) GetInvalidProof() []byte {
- invalidProof, err := suite.chainA.Codec.MarshalBinaryBare(&types.TimestampedSignatureData{Timestamp: suite.solomachine.Time})
+ invalidProof, err := suite.chainA.Codec.Marshal(&types.TimestampedSignatureData{Timestamp: suite.solomachine.Time})
suite.Require().NoError(err)
return invalidProof
diff --git a/modules/light-clients/06-solomachine/types/update.go b/modules/light-clients/06-solomachine/types/update.go
index fcd54954..32d2788c 100644
--- a/modules/light-clients/06-solomachine/types/update.go
+++ b/modules/light-clients/06-solomachine/types/update.go
@@ -15,7 +15,7 @@ import (
// - the header timestamp is less than the consensus state timestamp
// - the currently registered public key did not provide the update signature
func (cs ClientState) CheckHeaderAndUpdateState(
- ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
+ ctx sdk.Context, cdc codec.BinaryCodec, clientStore sdk.KVStore,
header exported.Header,
) (exported.ClientState, exported.ConsensusState, error) {
smHeader, ok := header.(*Header)
@@ -34,7 +34,7 @@ func (cs ClientState) CheckHeaderAndUpdateState(
}
// checkHeader checks if the Solo Machine update signature is valid.
-func checkHeader(cdc codec.BinaryMarshaler, clientState *ClientState, header *Header) error {
+func checkHeader(cdc codec.BinaryCodec, clientState *ClientState, header *Header) error {
// assert update sequence is current sequence
if header.Sequence != clientState.Sequence {
return sdkerrors.Wrapf(
diff --git a/modules/light-clients/06-solomachine/types/update_test.go b/modules/light-clients/06-solomachine/types/update_test.go
index e9f3db3a..5df5cb72 100644
--- a/modules/light-clients/06-solomachine/types/update_test.go
+++ b/modules/light-clients/06-solomachine/types/update_test.go
@@ -100,7 +100,7 @@ func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() {
NewDiversifier: h.NewDiversifier,
}
- dataBz, err := suite.chainA.Codec.MarshalBinaryBare(data)
+ dataBz, err := suite.chainA.Codec.Marshal(data)
suite.Require().NoError(err)
// generate invalid signature
@@ -112,7 +112,7 @@ func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() {
Data: dataBz,
}
- signBz, err := suite.chainA.Codec.MarshalBinaryBare(signBytes)
+ signBz, err := suite.chainA.Codec.Marshal(signBytes)
suite.Require().NoError(err)
sig := solomachine.GenerateSignature(signBz)
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index e16a9754..00d919ae 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -69,7 +69,7 @@ func (cs ClientState) GetLatestHeight() exported.Height {
func (cs ClientState) Status(
ctx sdk.Context,
clientStore sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
) exported.Status {
if !cs.FrozenHeight.IsZero() {
return exported.Frozen
@@ -172,7 +172,7 @@ func (cs ClientState) ZeroCustomFields() exported.ClientState {
// Initialize will check that initial consensus state is a Tendermint consensus state
// and will store ProcessedTime for initial consensus state as ctx.BlockTime()
-func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryMarshaler, clientStore sdk.KVStore, consState exported.ConsensusState) error {
+func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryCodec, clientStore sdk.KVStore, consState exported.ConsensusState) error {
if _, ok := consState.(*ConsensusState); !ok {
return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid initial consensus state. expected type: %T, got: %T",
&ConsensusState{}, consState)
@@ -186,7 +186,7 @@ func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryMarshaler, clien
// stored on the target machine
func (cs ClientState) VerifyClientState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
prefix exported.Prefix,
counterpartyClientIdentifier string,
@@ -225,7 +225,7 @@ func (cs ClientState) VerifyClientState(
// Tendermint client stored on the target machine.
func (cs ClientState) VerifyClientConsensusState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
counterpartyClientIdentifier string,
consensusHeight exported.Height,
@@ -269,7 +269,7 @@ func (cs ClientState) VerifyClientConsensusState(
// specified connection end stored on the target machine.
func (cs ClientState) VerifyConnectionState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
prefix exported.Prefix,
proof []byte,
@@ -292,7 +292,7 @@ func (cs ClientState) VerifyConnectionState(
return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid connection type %T", connectionEnd)
}
- bz, err := cdc.MarshalBinaryBare(&connection)
+ bz, err := cdc.Marshal(&connection)
if err != nil {
return err
}
@@ -308,7 +308,7 @@ func (cs ClientState) VerifyConnectionState(
// channel end, under the specified port, stored on the target machine.
func (cs ClientState) VerifyChannelState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
prefix exported.Prefix,
proof []byte,
@@ -332,7 +332,7 @@ func (cs ClientState) VerifyChannelState(
return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid channel type %T", channel)
}
- bz, err := cdc.MarshalBinaryBare(&channelEnd)
+ bz, err := cdc.Marshal(&channelEnd)
if err != nil {
return err
}
@@ -348,7 +348,7 @@ func (cs ClientState) VerifyChannelState(
// the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketCommitment(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -386,7 +386,7 @@ func (cs ClientState) VerifyPacketCommitment(
// acknowledgement at the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketAcknowledgement(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -425,7 +425,7 @@ func (cs ClientState) VerifyPacketAcknowledgement(
// specified sequence.
func (cs ClientState) VerifyPacketReceiptAbsence(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -462,7 +462,7 @@ func (cs ClientState) VerifyPacketReceiptAbsence(
// received of the specified channel at the specified port.
func (cs ClientState) VerifyNextSequenceRecv(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
height exported.Height,
currentTimestamp uint64,
delayPeriod uint64,
@@ -519,7 +519,7 @@ func verifyDelayPeriodPassed(store sdk.KVStore, proofHeight exported.Height, cur
// merkle proof, the consensus state and an error if one occurred.
func produceVerificationArgs(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
cs ClientState,
height exported.Height,
prefix exported.Prefix,
@@ -545,7 +545,7 @@ func produceVerificationArgs(
return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "proof cannot be empty")
}
- if err = cdc.UnmarshalBinaryBare(proof, &merkleProof); err != nil {
+ if err = cdc.Unmarshal(proof, &merkleProof); err != nil {
return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "failed to unmarshal proof into commitment merkle proof")
}
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_handle.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
index 0cda7858..fa00dad9 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
@@ -21,7 +21,7 @@ import (
// to misbehaviour.Header2
func (cs ClientState) CheckMisbehaviourAndUpdateState(
ctx sdk.Context,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
clientStore sdk.KVStore,
misbehaviour exported.Misbehaviour,
) (exported.ClientState, error) {
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle.go b/modules/light-clients/07-tendermint/types/proposal_handle.go
index f913ce9b..55c00d0f 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle.go
@@ -26,7 +26,7 @@ import (
// Note, that even if the subject is updated to the state of the substitute, an error may be
// returned if the updated client state is invalid or the client is expired.
func (cs ClientState) CheckSubstituteAndUpdateState(
- ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore,
+ ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore,
substituteClientStore sdk.KVStore, substituteClient exported.ClientState,
initialHeight exported.Height,
) (exported.ClientState, error) {
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index 726fdfd6..68793621 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -39,7 +39,7 @@ var (
)
// SetConsensusState stores the consensus state at the given height.
-func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, consensusState *ConsensusState, height exported.Height) {
+func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, consensusState *ConsensusState, height exported.Height) {
key := host.ConsensusStateKey(height)
val := clienttypes.MustMarshalConsensusState(cdc, consensusState)
clientStore.Set(key, val)
@@ -47,7 +47,7 @@ func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, conse
// GetConsensusState retrieves the consensus state from the client prefixed
// store. An error is returned if the consensus state does not exist.
-func GetConsensusState(store sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, error) {
+func GetConsensusState(store sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, error) {
bz := store.Get(host.ConsensusStateKey(height))
if bz == nil {
return nil, sdkerrors.Wrapf(
@@ -187,7 +187,7 @@ func IterateConsensusStateAscending(clientStore sdk.KVStore, cb func(height expo
// GetNextConsensusState returns the lowest consensus state that is larger than the given height.
// The Iterator returns a storetypes.Iterator which iterates from start (inclusive) to end (exclusive).
// Thus, to get the next consensus state, we must first call iterator.Next() and then get the value.
-func GetNextConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, bool) {
+func GetNextConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, bool) {
iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix))
iterator := iterateStore.Iterator(bigEndianHeightBytes(height), nil)
defer iterator.Close()
@@ -205,7 +205,7 @@ func GetNextConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, h
// GetPreviousConsensusState returns the highest consensus state that is lower than the given height.
// The Iterator returns a storetypes.Iterator which iterates from the end (exclusive) to start (inclusive).
// Thus to get previous consensus state we call iterator.Value() immediately.
-func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, bool) {
+func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, bool) {
iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix))
iterator := iterateStore.ReverseIterator(nil, bigEndianHeightBytes(height))
defer iterator.Close()
@@ -220,7 +220,7 @@ func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshale
}
// Helper function for GetNextConsensusState and GetPreviousConsensusState
-func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, key []byte) (*ConsensusState, bool) {
+func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, key []byte) (*ConsensusState, bool) {
bz := clientStore.Get(key)
if bz == nil {
return nil, false
diff --git a/modules/light-clients/07-tendermint/types/tendermint_test.go b/modules/light-clients/07-tendermint/types/tendermint_test.go
index b42f564c..7bb8977c 100644
--- a/modules/light-clients/07-tendermint/types/tendermint_test.go
+++ b/modules/light-clients/07-tendermint/types/tendermint_test.go
@@ -45,7 +45,7 @@ type TendermintTestSuite struct {
// TODO: deprecate usage in favor of testing package
ctx sdk.Context
- cdc codec.Marshaler
+ cdc codec.Codec
privVal tmtypes.PrivValidator
valSet *tmtypes.ValidatorSet
valsHash tmbytes.HexBytes
diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go
index 18cda8fb..bfabb324 100644
--- a/modules/light-clients/07-tendermint/types/update.go
+++ b/modules/light-clients/07-tendermint/types/update.go
@@ -43,7 +43,7 @@ import (
// that consensus state will be pruned from store along with all associated metadata. This will prevent the client store from
// becoming bloated with expired consensus states that can no longer be used for updates and packet verification.
func (cs ClientState) CheckHeaderAndUpdateState(
- ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
+ ctx sdk.Context, cdc codec.BinaryCodec, clientStore sdk.KVStore,
header exported.Header,
) (exported.ClientState, exported.ConsensusState, error) {
tmHeader, ok := header.(*Header)
diff --git a/modules/light-clients/07-tendermint/types/upgrade.go b/modules/light-clients/07-tendermint/types/upgrade.go
index 788a4a80..ab5ebcf4 100644
--- a/modules/light-clients/07-tendermint/types/upgrade.go
+++ b/modules/light-clients/07-tendermint/types/upgrade.go
@@ -24,7 +24,7 @@ import (
// - any Tendermint chain specified parameter in upgraded client such as ChainID, UnbondingPeriod,
// and ProofSpecs do not match parameters set by committed client
func (cs ClientState) VerifyUpgradeAndUpdateState(
- ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore,
+ ctx sdk.Context, cdc codec.BinaryCodec, clientStore sdk.KVStore,
upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState,
proofUpgradeClient, proofUpgradeConsState []byte,
) (exported.ClientState, exported.ConsensusState, error) {
@@ -56,10 +56,10 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
// unmarshal proofs
var merkleProofClient, merkleProofConsState commitmenttypes.MerkleProof
- if err := cdc.UnmarshalBinaryBare(proofUpgradeClient, &merkleProofClient); err != nil {
+ if err := cdc.Unmarshal(proofUpgradeClient, &merkleProofClient); err != nil {
return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal client merkle proof: %v", err)
}
- if err := cdc.UnmarshalBinaryBare(proofUpgradeConsState, &merkleProofConsState); err != nil {
+ if err := cdc.Unmarshal(proofUpgradeConsState, &merkleProofConsState); err != nil {
return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal consensus state merkle proof: %v", err)
}
diff --git a/modules/light-clients/09-localhost/types/client_state.go b/modules/light-clients/09-localhost/types/client_state.go
index 6ef27ba8..67da74d9 100644
--- a/modules/light-clients/09-localhost/types/client_state.go
+++ b/modules/light-clients/09-localhost/types/client_state.go
@@ -44,7 +44,7 @@ func (cs ClientState) GetLatestHeight() exported.Height {
}
// Status always returns Active. The localhost status cannot be changed.
-func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryMarshaler,
+func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec,
) exported.Status {
return exported.Active
}
@@ -76,7 +76,7 @@ func (cs ClientState) ZeroCustomFields() exported.ClientState {
}
// Initialize ensures that initial consensus state for localhost is nil
-func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, consState exported.ConsensusState) error {
+func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, consState exported.ConsensusState) error {
if consState != nil {
return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "initial consensus state for localhost must be nil.")
}
@@ -90,7 +90,7 @@ func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata {
// CheckHeaderAndUpdateState updates the localhost client. It only needs access to the context
func (cs *ClientState) CheckHeaderAndUpdateState(
- ctx sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, _ exported.Header,
+ ctx sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Header,
) (exported.ClientState, exported.ConsensusState, error) {
// use the chain ID from context since the localhost client is from the running chain (i.e self).
cs.ChainId = ctx.ChainID()
@@ -103,7 +103,7 @@ func (cs *ClientState) CheckHeaderAndUpdateState(
// Since localhost is the client of the running chain, misbehaviour cannot be submitted to it
// Thus, CheckMisbehaviourAndUpdateState returns an error for localhost
func (cs ClientState) CheckMisbehaviourAndUpdateState(
- _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, _ exported.Misbehaviour,
+ _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Misbehaviour,
) (exported.ClientState, error) {
return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "cannot submit misbehaviour to localhost client")
}
@@ -111,7 +111,7 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
// CheckSubstituteAndUpdateState returns an error. The localhost cannot be modified by
// proposals.
func (cs ClientState) CheckSubstituteAndUpdateState(
- ctx sdk.Context, _ codec.BinaryMarshaler, _, _ sdk.KVStore,
+ ctx sdk.Context, _ codec.BinaryCodec, _, _ sdk.KVStore,
_ exported.ClientState, _ exported.Height,
) (exported.ClientState, error) {
return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "cannot update localhost client with a proposal")
@@ -119,7 +119,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
// VerifyUpgradeAndUpdateState returns an error since localhost cannot be upgraded
func (cs ClientState) VerifyUpgradeAndUpdateState(
- _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore,
+ _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore,
_ exported.ClientState, _ exported.ConsensusState, _, _ []byte,
) (exported.ClientState, exported.ConsensusState, error) {
return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade localhost client")
@@ -127,7 +127,7 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
// VerifyClientState verifies that the localhost client state is stored locally
func (cs ClientState) VerifyClientState(
- store sdk.KVStore, cdc codec.BinaryMarshaler,
+ store sdk.KVStore, cdc codec.BinaryCodec,
_ exported.Height, _ exported.Prefix, _ string, _ []byte, clientState exported.ClientState,
) error {
path := host.KeyClientState
@@ -151,7 +151,7 @@ func (cs ClientState) VerifyClientState(
// VerifyClientConsensusState returns nil since a local host client does not store consensus
// states.
func (cs ClientState) VerifyClientConsensusState(
- sdk.KVStore, codec.BinaryMarshaler,
+ sdk.KVStore, codec.BinaryCodec,
exported.Height, string, exported.Height, exported.Prefix,
[]byte, exported.ConsensusState,
) error {
@@ -162,7 +162,7 @@ func (cs ClientState) VerifyClientConsensusState(
// specified connection end stored locally.
func (cs ClientState) VerifyConnectionState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
_ exported.Height,
_ exported.Prefix,
_ []byte,
@@ -176,7 +176,7 @@ func (cs ClientState) VerifyConnectionState(
}
var prevConnection connectiontypes.ConnectionEnd
- err := cdc.UnmarshalBinaryBare(bz, &prevConnection)
+ err := cdc.Unmarshal(bz, &prevConnection)
if err != nil {
return err
}
@@ -195,7 +195,7 @@ func (cs ClientState) VerifyConnectionState(
// channel end, under the specified port, stored on the local machine.
func (cs ClientState) VerifyChannelState(
store sdk.KVStore,
- cdc codec.BinaryMarshaler,
+ cdc codec.BinaryCodec,
_ exported.Height,
prefix exported.Prefix,
_ []byte,
@@ -210,7 +210,7 @@ func (cs ClientState) VerifyChannelState(
}
var prevChannel channeltypes.Channel
- err := cdc.UnmarshalBinaryBare(bz, &prevChannel)
+ err := cdc.Unmarshal(bz, &prevChannel)
if err != nil {
return err
}
@@ -229,7 +229,7 @@ func (cs ClientState) VerifyChannelState(
// the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketCommitment(
store sdk.KVStore,
- _ codec.BinaryMarshaler,
+ _ codec.BinaryCodec,
_ exported.Height,
_ uint64,
_ uint64,
@@ -261,7 +261,7 @@ func (cs ClientState) VerifyPacketCommitment(
// acknowledgement at the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketAcknowledgement(
store sdk.KVStore,
- _ codec.BinaryMarshaler,
+ _ codec.BinaryCodec,
_ exported.Height,
_ uint64,
_ uint64,
@@ -294,7 +294,7 @@ func (cs ClientState) VerifyPacketAcknowledgement(
// specified sequence.
func (cs ClientState) VerifyPacketReceiptAbsence(
store sdk.KVStore,
- _ codec.BinaryMarshaler,
+ _ codec.BinaryCodec,
_ exported.Height,
_ uint64,
_ uint64,
@@ -318,7 +318,7 @@ func (cs ClientState) VerifyPacketReceiptAbsence(
// received of the specified channel at the specified port.
func (cs ClientState) VerifyNextSequenceRecv(
store sdk.KVStore,
- _ codec.BinaryMarshaler,
+ _ codec.BinaryCodec,
_ exported.Height,
_ uint64,
_ uint64,
diff --git a/modules/light-clients/09-localhost/types/client_state_test.go b/modules/light-clients/09-localhost/types/client_state_test.go
index f9832303..358ac294 100644
--- a/modules/light-clients/09-localhost/types/client_state_test.go
+++ b/modules/light-clients/09-localhost/types/client_state_test.go
@@ -199,7 +199,7 @@ func (suite *LocalhostTestSuite) TestVerifyConnectionState() {
name: "proof verification success",
clientState: types.NewClientState("chainID", clientHeight),
malleate: func() {
- bz, err := suite.cdc.MarshalBinaryBare(&conn1)
+ bz, err := suite.cdc.Marshal(&conn1)
suite.Require().NoError(err)
suite.store.Set(host.ConnectionKey(testConnectionID), bz)
},
@@ -226,7 +226,7 @@ func (suite *LocalhostTestSuite) TestVerifyConnectionState() {
name: "proof verification failed: different connection stored",
clientState: types.NewClientState("chainID", clientHeight),
malleate: func() {
- bz, err := suite.cdc.MarshalBinaryBare(&conn2)
+ bz, err := suite.cdc.Marshal(&conn2)
suite.Require().NoError(err)
suite.store.Set(host.ConnectionKey(testConnectionID), bz)
},
@@ -271,7 +271,7 @@ func (suite *LocalhostTestSuite) TestVerifyChannelState() {
name: "proof verification success",
clientState: types.NewClientState("chainID", clientHeight),
malleate: func() {
- bz, err := suite.cdc.MarshalBinaryBare(&ch1)
+ bz, err := suite.cdc.Marshal(&ch1)
suite.Require().NoError(err)
suite.store.Set(host.ChannelKey(testPortID, testChannelID), bz)
},
@@ -299,7 +299,7 @@ func (suite *LocalhostTestSuite) TestVerifyChannelState() {
name: "proof verification failed: different channel stored",
clientState: types.NewClientState("chainID", clientHeight),
malleate: func() {
- bz, err := suite.cdc.MarshalBinaryBare(&ch2)
+ bz, err := suite.cdc.Marshal(&ch2)
suite.Require().NoError(err)
suite.store.Set(host.ChannelKey(testPortID, testChannelID), bz)
diff --git a/modules/light-clients/09-localhost/types/localhost_test.go b/modules/light-clients/09-localhost/types/localhost_test.go
index b4c267b6..46c29daa 100644
--- a/modules/light-clients/09-localhost/types/localhost_test.go
+++ b/modules/light-clients/09-localhost/types/localhost_test.go
@@ -24,7 +24,7 @@ var (
type LocalhostTestSuite struct {
suite.Suite
- cdc codec.Marshaler
+ cdc codec.Codec
ctx sdk.Context
store sdk.KVStore
}
diff --git a/testing/README.md b/testing/README.md
index 7808980b..17023bfc 100644
--- a/testing/README.md
+++ b/testing/README.md
@@ -54,7 +54,7 @@ type TestingApp interface {
GetTxConfig() client.TxConfig
// Implemented by SimApp
- AppCodec() codec.Marshaler
+ AppCodec() codec.Codec
// Implemented by BaseApp
LastCommitID() sdk.CommitID
@@ -102,7 +102,7 @@ Your application may need to define `AppCodec()` if it does not already exist:
//
// NOTE: This is solely to be used for testing purposes as it may be desirable
// for modules to register their own custom testing types.
-func (app *SimApp) AppCodec() codec.Marshaler {
+func (app *SimApp) AppCodec() codec.Codec {
return app.appCodec
}
```
diff --git a/testing/app.go b/testing/app.go
index fb4ecbcb..f14178b9 100644
--- a/testing/app.go
+++ b/testing/app.go
@@ -40,7 +40,7 @@ type TestingApp interface {
GetTxConfig() client.TxConfig
// Implemented by SimApp
- AppCodec() codec.Marshaler
+ AppCodec() codec.Codec
// Implemented by BaseApp
LastCommitID() sdk.CommitID
diff --git a/testing/chain.go b/testing/chain.go
index 92228eed..c012f383 100644
--- a/testing/chain.go
+++ b/testing/chain.go
@@ -51,7 +51,7 @@ type TestChain struct {
CurrentHeader tmproto.Header // header for current block height
QueryServer types.QueryServer
TxConfig client.TxConfig
- Codec codec.BinaryMarshaler
+ Codec codec.BinaryCodec
Vals *tmtypes.ValidatorSet
Signers []tmtypes.PrivValidator
@@ -153,7 +153,7 @@ func (chain *TestChain) QueryProofAtHeight(key []byte, height int64) ([]byte, cl
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
require.NoError(chain.t, err)
- proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof)
+ proof, err := chain.App.AppCodec().Marshal(&merkleProof)
require.NoError(chain.t, err)
revision := clienttypes.ParseChainID(chain.ChainID)
@@ -177,7 +177,7 @@ func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, cl
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
require.NoError(chain.t, err)
- proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof)
+ proof, err := chain.App.AppCodec().Marshal(&merkleProof)
require.NoError(chain.t, err)
revision := clienttypes.ParseChainID(chain.ChainID)
@@ -289,7 +289,7 @@ func (chain *TestChain) GetValsAtHeight(height int64) (*tmtypes.ValidatorSet, bo
valSet := stakingtypes.Validators(histInfo.Valset)
- tmValidators, err := teststaking.ToTmValidators(valSet)
+ tmValidators, err := teststaking.ToTmValidators(valSet, sdk.DefaultPowerReduction)
if err != nil {
panic(err)
}
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index 5bd1b123..d6673d6d 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -55,12 +55,12 @@ func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {}
func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) {}
// DefaultGenesis implements AppModuleBasic interface.
-func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage {
+func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage {
return nil
}
// ValidateGenesis implements the AppModuleBasic interface.
-func (AppModuleBasic) ValidateGenesis(codec.JSONMarshaler, client.TxEncodingConfig, json.RawMessage) error {
+func (AppModuleBasic) ValidateGenesis(codec.JSONCodec, client.TxEncodingConfig, json.RawMessage) error {
return nil
}
@@ -117,7 +117,7 @@ func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier {
func (am AppModule) RegisterServices(module.Configurator) {}
// InitGenesis implements the AppModule interface.
-func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate {
+func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate {
// bind mock port ID
cap := am.portKeeper.BindPort(ctx, ModuleName)
am.scopedKeeper.ClaimCapability(ctx, cap, host.PortPath(ModuleName))
@@ -126,7 +126,7 @@ func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data j
}
// ExportGenesis implements the AppModule interface.
-func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage {
+func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage {
return nil
}
diff --git a/testing/sdk_test.go b/testing/sdk_test.go
index 2375a2bc..caef571c 100644
--- a/testing/sdk_test.go
+++ b/testing/sdk_test.go
@@ -111,9 +111,9 @@ func DefaultConfig() network.Config {
NumValidators: 4,
BondDenom: sdk.DefaultBondDenom,
MinGasPrices: fmt.Sprintf("0.000006%s", sdk.DefaultBondDenom),
- AccountTokens: sdk.TokensFromConsensusPower(1000),
- StakingTokens: sdk.TokensFromConsensusPower(500),
- BondedTokens: sdk.TokensFromConsensusPower(100),
+ AccountTokens: sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction),
+ StakingTokens: sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction),
+ BondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction),
PruningStrategy: storetypes.PruningOptionNothing,
CleanupDir: true,
SigningAlgo: string(hd.Secp256k1Type),
@@ -231,7 +231,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
- fmt.Sprintf("--%s=foobar", flags.FlagMemo),
+ fmt.Sprintf("--%s=foobar", flags.FlagNote),
},
uint32(8),
},
@@ -246,7 +246,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
- fmt.Sprintf("--%s=foobar", flags.FlagMemo),
+ fmt.Sprintf("--%s=foobar", flags.FlagNote),
},
uint32(0),
},
@@ -257,7 +257,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
out, err := clitestutil.ExecTestCLICmd(val.ClientCtx, tc.cmd, tc.args)
s.Require().NoError(err)
var txRes sdk.TxResponse
- s.Require().NoError(val.ClientCtx.JSONMarshaler.UnmarshalJSON(out.Bytes(), &txRes))
+ s.Require().NoError(val.ClientCtx.JSONCodec.UnmarshalJSON(out.Bytes(), &txRes))
s.Require().Equal(tc.code, txRes.Code)
s.Require().NoError(s.network.WaitForNextBlock())
@@ -310,7 +310,7 @@ func (s *IntegrationTestSuite) testQueryIBCTx(txRes sdk.TxResponse, cmd *cobra.C
s.Require().NoError(err)
var getTxRes txtypes.GetTxResponse
- s.Require().NoError(val.ClientCtx.JSONMarshaler.UnmarshalJSON(grpcJSON, &getTxRes))
+ s.Require().NoError(val.ClientCtx.JSONCodec.UnmarshalJSON(grpcJSON, &getTxRes))
s.Require().Equal(getTxRes.Tx.Body.Memo, "foobar")
// generate broadcast only txn.
diff --git a/testing/simapp/app.go b/testing/simapp/app.go
index dc7de557..7eb60307 100644
--- a/testing/simapp/app.go
+++ b/testing/simapp/app.go
@@ -56,7 +56,6 @@ import (
evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper"
evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
feegrant "github.com/cosmos/cosmos-sdk/x/feegrant"
- feegrantante "github.com/cosmos/cosmos-sdk/x/feegrant/ante"
feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper"
feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant/types"
"github.com/cosmos/cosmos-sdk/x/genutil"
@@ -146,11 +145,6 @@ var (
govtypes.ModuleName: {authtypes.Burner},
ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner},
}
-
- // module accounts that are allowed to receive tokens
- allowedReceivingModAcc = map[string]bool{
- distrtypes.ModuleName: true,
- }
)
var (
@@ -164,7 +158,7 @@ var (
type SimApp struct {
*baseapp.BaseApp
legacyAmino *codec.LegacyAmino
- appCodec codec.Marshaler
+ appCodec codec.Codec
interfaceRegistry types.InterfaceRegistry
invCheckPeriod uint
@@ -229,7 +223,7 @@ func NewSimApp(
bApp := baseapp.NewBaseApp(appName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...)
bApp.SetCommitMultiStoreTracer(traceStore)
- bApp.SetAppVersion(version.Version)
+ bApp.SetVersion(version.Version)
bApp.SetInterfaceRegistry(interfaceRegistry)
keys := sdk.NewKVStoreKeys(
@@ -271,7 +265,7 @@ func NewSimApp(
appCodec, keys[authtypes.StoreKey], app.GetSubspace(authtypes.ModuleName), authtypes.ProtoBaseAccount, maccPerms,
)
app.BankKeeper = bankkeeper.NewBaseKeeper(
- appCodec, keys[banktypes.StoreKey], app.AccountKeeper, app.GetSubspace(banktypes.ModuleName), app.BlockedAddrs(),
+ appCodec, keys[banktypes.StoreKey], app.AccountKeeper, app.GetSubspace(banktypes.ModuleName), app.ModuleAccountAddrs(),
)
stakingKeeper := stakingkeeper.NewKeeper(
appCodec, keys[stakingtypes.StoreKey], app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName),
@@ -292,7 +286,7 @@ func NewSimApp(
)
app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, keys[feegranttypes.StoreKey], app.AccountKeeper)
- app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath)
+ app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp)
// register the staking hooks
// NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks
@@ -401,7 +395,7 @@ func NewSimApp(
app.mm.RegisterInvariants(&app.CrisisKeeper)
app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino)
- app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter())
+ app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
app.mm.RegisterServices(app.configurator)
// add test gRPC service for testing gRPC queries in isolation
@@ -438,12 +432,21 @@ func NewSimApp(
// initialize BaseApp
app.SetInitChainer(app.InitChainer)
app.SetBeginBlocker(app.BeginBlocker)
- app.SetAnteHandler(
- feegrantante.NewAnteHandler(
- app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, ante.DefaultSigVerificationGasConsumer,
- encodingConfig.TxConfig.SignModeHandler(),
- ),
+ anteHandler, err := ante.NewAnteHandler(
+ ante.HandlerOptions{
+ AccountKeeper: app.AccountKeeper,
+ BankKeeper: app.BankKeeper,
+ SignModeHandler: encodingConfig.TxConfig.SignModeHandler(),
+ FeegrantKeeper: app.FeeGrantKeeper,
+ SigGasConsumer: ante.DefaultSigVerificationGasConsumer,
+ },
)
+ if err != nil {
+ panic(err)
+ }
+
+ app.SetAnteHandler(anteHandler)
+
app.SetEndBlocker(app.EndBlocker)
if loadLatest {
@@ -491,6 +494,7 @@ func (app *SimApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.
if err := json.Unmarshal(req.AppStateBytes, &genesisState); err != nil {
panic(err)
}
+ app.UpgradeKeeper.SetModuleVersionMap(ctx, app.mm.GetVersionMap())
return app.mm.InitGenesis(ctx, app.appCodec, genesisState)
}
@@ -509,17 +513,6 @@ func (app *SimApp) ModuleAccountAddrs() map[string]bool {
return modAccAddrs
}
-// BlockedAddrs returns all the app's module account addresses that are not
-// allowed to receive external tokens.
-func (app *SimApp) BlockedAddrs() map[string]bool {
- blockedAddrs := make(map[string]bool)
- for acc := range maccPerms {
- blockedAddrs[authtypes.NewModuleAddress(acc).String()] = !allowedReceivingModAcc[acc]
- }
-
- return blockedAddrs
-}
-
// LegacyAmino returns SimApp's amino codec.
//
// NOTE: This is solely to be used for testing purposes as it may be desirable
@@ -532,7 +525,7 @@ func (app *SimApp) LegacyAmino() *codec.LegacyAmino {
//
// NOTE: This is solely to be used for testing purposes as it may be desirable
// for modules to register their own custom testing types.
-func (app *SimApp) AppCodec() codec.Marshaler {
+func (app *SimApp) AppCodec() codec.Codec {
return app.appCodec
}
@@ -634,28 +627,6 @@ func (app *SimApp) RegisterTendermintService(clientCtx client.Context) {
tmservice.RegisterTendermintService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.interfaceRegistry)
}
-// RunMigrations performs in-place store migrations for all modules. This
-// function MUST be only called by x/upgrade UpgradeHandler.
-//
-// `migrateFromVersions` is a map of moduleName to fromVersion (unit64), where
-// fromVersion denotes the version from which we should migrate the module, the
-// target version being the module's latest ConsensusVersion.
-//
-// Example:
-// cfg := module.NewConfigurator(...)
-// app.UpgradeKeeper.SetUpgradeHandler("store-migration", func(ctx sdk.Context, plan upgradetypes.Plan) {
-// err := app.RunMigrations(ctx, module.MigrationMap{
-// "bank": 1, // Migrate x/bank from v1 to current x/bank's ConsensusVersion
-// "staking": 8, // Migrate x/staking from v8 to current x/staking's ConsensusVersion
-// })
-// if err != nil {
-// panic(err)
-// }
-// })
-func (app *SimApp) RunMigrations(ctx sdk.Context, migrateFromVersions module.MigrationMap) error {
- return app.mm.RunMigrations(ctx, app.configurator, migrateFromVersions)
-}
-
// RegisterSwaggerAPI registers swagger route with API Server
func RegisterSwaggerAPI(ctx client.Context, rtr *mux.Router) {
statikFS, err := fs.New()
@@ -677,7 +648,7 @@ func GetMaccPerms() map[string][]string {
}
// initParamsKeeper init params keeper and its subspaces
-func initParamsKeeper(appCodec codec.BinaryMarshaler, legacyAmino *codec.LegacyAmino, key, tkey sdk.StoreKey) paramskeeper.Keeper {
+func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey sdk.StoreKey) paramskeeper.Keeper {
paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey)
paramsKeeper.Subspace(authtypes.ModuleName)
diff --git a/testing/simapp/app_test.go b/testing/simapp/app_test.go
deleted file mode 100644
index 0e6adcc1..00000000
--- a/testing/simapp/app_test.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package simapp
-
-import (
- "encoding/json"
- "os"
- "testing"
-
- "github.com/stretchr/testify/require"
- abci "github.com/tendermint/tendermint/abci/types"
- "github.com/tendermint/tendermint/libs/log"
- tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
- dbm "github.com/tendermint/tm-db"
-
- "github.com/cosmos/cosmos-sdk/baseapp"
- sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/types/module"
- "github.com/cosmos/cosmos-sdk/x/auth"
- "github.com/cosmos/cosmos-sdk/x/auth/vesting"
- "github.com/cosmos/cosmos-sdk/x/authz"
- banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
- "github.com/cosmos/cosmos-sdk/x/capability"
- "github.com/cosmos/cosmos-sdk/x/crisis"
- "github.com/cosmos/cosmos-sdk/x/distribution"
- "github.com/cosmos/cosmos-sdk/x/evidence"
- feegrant "github.com/cosmos/cosmos-sdk/x/feegrant"
- "github.com/cosmos/cosmos-sdk/x/genutil"
- "github.com/cosmos/cosmos-sdk/x/gov"
- "github.com/cosmos/cosmos-sdk/x/mint"
- "github.com/cosmos/cosmos-sdk/x/params"
- "github.com/cosmos/cosmos-sdk/x/slashing"
- "github.com/cosmos/cosmos-sdk/x/staking"
- "github.com/cosmos/cosmos-sdk/x/upgrade"
- transfer "github.com/cosmos/ibc-go/modules/apps/transfer"
- ibc "github.com/cosmos/ibc-go/modules/core"
-)
-
-func TestSimAppExportAndBlockedAddrs(t *testing.T) {
- encCfg := MakeTestEncodingConfig()
- db := dbm.NewMemDB()
- app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{})
-
- for acc := range maccPerms {
- require.Equal(t, !allowedReceivingModAcc[acc], app.BankKeeper.BlockedAddr(app.AccountKeeper.GetModuleAddress(acc)),
- "ensure that blocked addresses are properly set in bank keeper")
- }
-
- genesisState := NewDefaultGenesisState(encCfg.Marshaler)
- stateBytes, err := json.MarshalIndent(genesisState, "", " ")
- require.NoError(t, err)
-
- // Initialize the chain
- app.InitChain(
- abci.RequestInitChain{
- Validators: []abci.ValidatorUpdate{},
- AppStateBytes: stateBytes,
- },
- )
- app.Commit()
-
- // Making a new app object with the db, so that initchain hasn't been called
- app2 := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{})
- _, err = app2.ExportAppStateAndValidators(false, []string{})
- require.NoError(t, err, "ExportAppStateAndValidators should not have an error")
-}
-
-func TestGetMaccPerms(t *testing.T) {
- dup := GetMaccPerms()
- require.Equal(t, maccPerms, dup, "duplicated module account permissions differed from actual module account permissions")
-}
-
-func TestRunMigrations(t *testing.T) {
- db := dbm.NewMemDB()
- encCfg := MakeTestEncodingConfig()
- logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
- app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encCfg, EmptyAppOptions{})
-
- // Create a new baseapp and configurator for the purpose of this test.
- bApp := baseapp.NewBaseApp(appName, logger, db, encCfg.TxConfig.TxDecoder())
- bApp.SetCommitMultiStoreTracer(nil)
- bApp.SetInterfaceRegistry(encCfg.InterfaceRegistry)
- app.BaseApp = bApp
- app.configurator = module.NewConfigurator(app.MsgServiceRouter(), app.GRPCQueryRouter())
-
- // We register all modules on the Configurator, except x/bank. x/bank will
- // serve as the test subject on which we run the migration tests.
- //
- // The loop below is the same as calling `RegisterServices` on
- // ModuleManager, except that we skip x/bank.
- for _, module := range app.mm.Modules {
- if module.Name() == banktypes.ModuleName {
- continue
- }
-
- module.RegisterServices(app.configurator)
- }
-
- // Initialize the chain
- app.InitChain(abci.RequestInitChain{})
- app.Commit()
-
- testCases := []struct {
- name string
- moduleName string
- forVersion uint64
- expRegErr bool // errors while registering migration
- expRegErrMsg string
- expRunErr bool // errors while running migration
- expRunErrMsg string
- expCalled int
- }{
- {
- "cannot register migration for version 0",
- "bank", 0,
- true, "module migration versions should start at 1: invalid version", false, "", 0,
- },
- {
- "throws error on RunMigrations if no migration registered for bank",
- "", 1,
- false, "", true, "no migrations found for module bank: not found", 0,
- },
- {
- "can register and run migration handler for x/bank",
- "bank", 1,
- false, "", false, "", 1,
- },
- {
- "cannot register migration handler for same module & forVersion",
- "bank", 1,
- true, "another migration for module bank and version 1 already exists: internal logic error", false, "", 0,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- var err error
-
- // Since it's very hard to test actual in-place store migrations in
- // tests (due to the difficulty of maintaining multiple versions of a
- // module), we're just testing here that the migration logic is
- // called.
- called := 0
-
- if tc.moduleName != "" {
- // Register migration for module from version `forVersion` to `forVersion+1`.
- err = app.configurator.RegisterMigration(tc.moduleName, tc.forVersion, func(sdk.Context) error {
- called++
-
- return nil
- })
-
- if tc.expRegErr {
- require.EqualError(t, err, tc.expRegErrMsg)
-
- return
- }
- }
- require.NoError(t, err)
-
- // Run migrations only for bank. That's why we put the initial
- // version for bank as 1, and for all other modules, we put as
- // their latest ConsensusVersion.
- err = app.RunMigrations(
- app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()}),
- module.MigrationMap{
- "bank": 1,
- "auth": auth.AppModule{}.ConsensusVersion(),
- "authz": authz.AppModule{}.ConsensusVersion(),
- "staking": staking.AppModule{}.ConsensusVersion(),
- "mint": mint.AppModule{}.ConsensusVersion(),
- "distribution": distribution.AppModule{}.ConsensusVersion(),
- "slashing": slashing.AppModule{}.ConsensusVersion(),
- "gov": gov.AppModule{}.ConsensusVersion(),
- "params": params.AppModule{}.ConsensusVersion(),
- "ibc": ibc.AppModule{}.ConsensusVersion(),
- "upgrade": upgrade.AppModule{}.ConsensusVersion(),
- "vesting": vesting.AppModule{}.ConsensusVersion(),
- "feegrant": feegrant.AppModule{}.ConsensusVersion(),
- "transfer": transfer.AppModule{}.ConsensusVersion(),
- "evidence": evidence.AppModule{}.ConsensusVersion(),
- "crisis": crisis.AppModule{}.ConsensusVersion(),
- "genutil": genutil.AppModule{}.ConsensusVersion(),
- "capability": capability.AppModule{}.ConsensusVersion(),
- },
- )
- if tc.expRunErr {
- require.EqualError(t, err, tc.expRunErrMsg)
- } else {
- require.NoError(t, err)
- require.Equal(t, tc.expCalled, called)
- }
- })
- }
-}
diff --git a/testing/simapp/genesis.go b/testing/simapp/genesis.go
index dbb4e01c..772e452d 100644
--- a/testing/simapp/genesis.go
+++ b/testing/simapp/genesis.go
@@ -16,6 +16,6 @@ import (
type GenesisState map[string]json.RawMessage
// NewDefaultGenesisState generates the default state for the application.
-func NewDefaultGenesisState(cdc codec.JSONMarshaler) GenesisState {
+func NewDefaultGenesisState(cdc codec.JSONCodec) GenesisState {
return ModuleBasics.DefaultGenesis(cdc)
}
diff --git a/testing/simapp/params/encoding.go b/testing/simapp/params/encoding.go
index 698408da..3d634abf 100644
--- a/testing/simapp/params/encoding.go
+++ b/testing/simapp/params/encoding.go
@@ -10,7 +10,7 @@ import (
// This is provided for compatibility between protobuf and amino implementations.
type EncodingConfig struct {
InterfaceRegistry types.InterfaceRegistry
- Marshaler codec.Marshaler
+ Marshaler codec.Codec
TxConfig client.TxConfig
Amino *codec.LegacyAmino
}
diff --git a/testing/simapp/simd/cmd/cmd_test.go b/testing/simapp/simd/cmd/cmd_test.go
index 1ae137a6..363d06d8 100644
--- a/testing/simapp/simd/cmd/cmd_test.go
+++ b/testing/simapp/simd/cmd/cmd_test.go
@@ -7,9 +7,9 @@ import (
"github.com/stretchr/testify/require"
svrcmd "github.com/cosmos/cosmos-sdk/server/cmd"
- "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/ibc-go/testing/simapp/simd/cmd"
+ "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
)
func TestInitCmd(t *testing.T) {
diff --git a/testing/simapp/simd/cmd/genaccounts.go b/testing/simapp/simd/cmd/genaccounts.go
index 57de144c..5c04059b 100644
--- a/testing/simapp/simd/cmd/genaccounts.go
+++ b/testing/simapp/simd/cmd/genaccounts.go
@@ -40,8 +40,8 @@ contain valid denominations. Accounts may optionally be supplied with vesting pa
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx := client.GetClientContextFromCmd(cmd)
- depCdc := clientCtx.JSONMarshaler
- cdc := depCdc.(codec.Marshaler)
+ depCdc := clientCtx.JSONCodec
+ cdc := depCdc.(codec.Codec)
serverCtx := server.GetServerContextFromCmd(cmd)
config := serverCtx.Config
diff --git a/testing/simapp/simd/cmd/genaccounts_test.go b/testing/simapp/simd/cmd/genaccounts_test.go
index d1265b1f..cba1016b 100644
--- a/testing/simapp/simd/cmd/genaccounts_test.go
+++ b/testing/simapp/simd/cmd/genaccounts_test.go
@@ -63,7 +63,7 @@ func TestAddGenesisAccountCmd(t *testing.T) {
require.NoError(t, err)
serverCtx := server.NewContext(viper.New(), cfg, logger)
- clientCtx := client.Context{}.WithJSONMarshaler(appCodec).WithHomeDir(home)
+ clientCtx := client.Context{}.WithJSONCodec(appCodec).WithHomeDir(home)
ctx := context.Background()
ctx = context.WithValue(ctx, client.ClientContextKey, &clientCtx)
diff --git a/testing/simapp/simd/cmd/root.go b/testing/simapp/simd/cmd/root.go
index af9d3c1b..e195ba6b 100644
--- a/testing/simapp/simd/cmd/root.go
+++ b/testing/simapp/simd/cmd/root.go
@@ -14,6 +14,7 @@ import (
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
+ config "github.com/cosmos/cosmos-sdk/client/config"
"github.com/cosmos/cosmos-sdk/client/debug"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/keys"
@@ -23,7 +24,6 @@ import (
"github.com/cosmos/cosmos-sdk/snapshots"
"github.com/cosmos/cosmos-sdk/store"
sdk "github.com/cosmos/cosmos-sdk/types"
- authclient "github.com/cosmos/cosmos-sdk/x/auth/client"
authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
"github.com/cosmos/cosmos-sdk/x/auth/types"
vestingcli "github.com/cosmos/cosmos-sdk/x/auth/vesting/client/cli"
@@ -39,19 +39,31 @@ import (
func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
encodingConfig := simapp.MakeTestEncodingConfig()
initClientCtx := client.Context{}.
- WithJSONMarshaler(encodingConfig.Marshaler).
+ WithJSONCodec(encodingConfig.Marshaler).
WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
WithTxConfig(encodingConfig.TxConfig).
WithLegacyAmino(encodingConfig.Amino).
WithInput(os.Stdin).
WithAccountRetriever(types.AccountRetriever{}).
WithBroadcastMode(flags.BroadcastBlock).
- WithHomeDir(simapp.DefaultNodeHome)
+ WithHomeDir(simapp.DefaultNodeHome).
+ WithViper("") // In simapp, we don't use any prefix for env variables.
rootCmd := &cobra.Command{
Use: "simd",
Short: "simulation app",
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
+ // set the default command outputs
+ cmd.SetOut(cmd.OutOrStdout())
+ cmd.SetErr(cmd.ErrOrStderr())
+
+ initClientCtx = client.ReadHomeFlag(initClientCtx, cmd)
+
+ initClientCtx, err := config.ReadFromClientConfig(initClientCtx)
+ if err != nil {
+ return err
+ }
+
if err := client.SetCmdClientContextHandler(initClientCtx, cmd); err != nil {
return err
}
@@ -66,8 +78,6 @@ func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
}
func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) {
- authclient.Codec = encodingConfig.Marshaler
-
rootCmd.AddCommand(
genutilcli.InitCmd(simapp.ModuleBasics, simapp.DefaultNodeHome),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome),
@@ -78,6 +88,7 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) {
tmcli.NewCompletionCmd(rootCmd, true),
testnetCmd(simapp.ModuleBasics, banktypes.GenesisBalancesIterator{}),
debug.Cmd(),
+ config.Cmd(),
)
a := appCreator{encodingConfig}
diff --git a/testing/simapp/simd/cmd/testnet.go b/testing/simapp/simd/cmd/testnet.go
index 0717b398..d188253a 100644
--- a/testing/simapp/simd/cmd/testnet.go
+++ b/testing/simapp/simd/cmd/testnet.go
@@ -196,8 +196,8 @@ func InitTestnet(
return err
}
- accTokens := sdk.TokensFromConsensusPower(1000)
- accStakingTokens := sdk.TokensFromConsensusPower(500)
+ accTokens := sdk.TokensFromConsensusPower(1000, sdk.DefaultPowerReduction)
+ accStakingTokens := sdk.TokensFromConsensusPower(500, sdk.DefaultPowerReduction)
coins := sdk.Coins{
sdk.NewCoin(fmt.Sprintf("%stoken", nodeDirName), accTokens),
sdk.NewCoin(sdk.DefaultBondDenom, accStakingTokens),
@@ -206,7 +206,7 @@ func InitTestnet(
genBalances = append(genBalances, banktypes.Balance{Address: addr.String(), Coins: coins.Sort()})
genAccounts = append(genAccounts, authtypes.NewBaseAccount(addr, nil, 0, 0))
- valTokens := sdk.TokensFromConsensusPower(100)
+ valTokens := sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction)
createValMsg, err := stakingtypes.NewMsgCreateValidator(
sdk.ValAddress(addr),
valPubKeys[i],
@@ -271,11 +271,11 @@ func initGenFiles(
genFiles []string, numValidators int,
) error {
- appGenState := mbm.DefaultGenesis(clientCtx.JSONMarshaler)
+ appGenState := mbm.DefaultGenesis(clientCtx.JSONCodec)
// set the accounts in the genesis state
var authGenState authtypes.GenesisState
- clientCtx.JSONMarshaler.MustUnmarshalJSON(appGenState[authtypes.ModuleName], &authGenState)
+ clientCtx.JSONCodec.MustUnmarshalJSON(appGenState[authtypes.ModuleName], &authGenState)
accounts, err := authtypes.PackAccounts(genAccounts)
if err != nil {
@@ -283,14 +283,14 @@ func initGenFiles(
}
authGenState.Accounts = accounts
- appGenState[authtypes.ModuleName] = clientCtx.JSONMarshaler.MustMarshalJSON(&authGenState)
+ appGenState[authtypes.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(&authGenState)
// set the balances in the genesis state
var bankGenState banktypes.GenesisState
- clientCtx.JSONMarshaler.MustUnmarshalJSON(appGenState[banktypes.ModuleName], &bankGenState)
+ clientCtx.JSONCodec.MustUnmarshalJSON(appGenState[banktypes.ModuleName], &bankGenState)
bankGenState.Balances = genBalances
- appGenState[banktypes.ModuleName] = clientCtx.JSONMarshaler.MustMarshalJSON(&bankGenState)
+ appGenState[banktypes.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(&bankGenState)
appGenStateJSON, err := json.MarshalIndent(appGenState, "", " ")
if err != nil {
@@ -337,7 +337,7 @@ func collectGenFiles(
return err
}
- nodeAppState, err := genutil.GenAppStateFromConfig(clientCtx.JSONMarshaler, clientCtx.TxConfig, nodeConfig, initCfg, *genDoc, genBalIterator)
+ nodeAppState, err := genutil.GenAppStateFromConfig(clientCtx.JSONCodec, clientCtx.TxConfig, nodeConfig, initCfg, *genDoc, genBalIterator)
if err != nil {
return err
}
diff --git a/testing/simapp/state.go b/testing/simapp/state.go
index 6b52fb45..ebf1d301 100644
--- a/testing/simapp/state.go
+++ b/testing/simapp/state.go
@@ -25,7 +25,7 @@ import (
// AppStateFn returns the initial application state using a genesis or the simulation parameters.
// It panics if the user provides files for both of them.
// If a file is not given for the genesis or the sim params, it creates a randomized one.
-func AppStateFn(cdc codec.JSONMarshaler, simManager *module.SimulationManager) simtypes.AppStateFn {
+func AppStateFn(cdc codec.JSONCodec, simManager *module.SimulationManager) simtypes.AppStateFn {
return func(r *rand.Rand, accs []simtypes.Account, config simtypes.Config,
) (appState json.RawMessage, simAccs []simtypes.Account, chainID string, genesisTimestamp time.Time) {
@@ -129,7 +129,7 @@ func AppStateFn(cdc codec.JSONMarshaler, simManager *module.SimulationManager) s
// AppStateRandomizedFn creates calls each module's GenesisState generator function
// and creates the simulation params
func AppStateRandomizedFn(
- simManager *module.SimulationManager, r *rand.Rand, cdc codec.JSONMarshaler,
+ simManager *module.SimulationManager, r *rand.Rand, cdc codec.JSONCodec,
accs []simtypes.Account, genesisTimestamp time.Time, appParams simtypes.AppParams,
) (json.RawMessage, []simtypes.Account) {
numAccs := int64(len(accs))
@@ -183,7 +183,7 @@ func AppStateRandomizedFn(
// AppStateFromGenesisFileFn util function to generate the genesis AppState
// from a genesis.json file.
-func AppStateFromGenesisFileFn(r io.Reader, cdc codec.JSONMarshaler, genesisFile string) (tmtypes.GenesisDoc, []simtypes.Account) {
+func AppStateFromGenesisFileFn(r io.Reader, cdc codec.JSONCodec, genesisFile string) (tmtypes.GenesisDoc, []simtypes.Account) {
bytes, err := ioutil.ReadFile(genesisFile)
if err != nil {
panic(err)
diff --git a/testing/simapp/utils.go b/testing/simapp/utils.go
index cac61e94..8ee1a1f7 100644
--- a/testing/simapp/utils.go
+++ b/testing/simapp/utils.go
@@ -49,7 +49,7 @@ func SetupSimulation(dirPrefix, dbName string) (simtypes.Config, dbm.DB, string,
// SimulationOperations retrieves the simulation params from the provided file path
// and returns all the modules weighted operations
-func SimulationOperations(app App, cdc codec.JSONMarshaler, config simtypes.Config) []simtypes.WeightedOperation {
+func SimulationOperations(app App, cdc codec.JSONCodec, config simtypes.Config) []simtypes.WeightedOperation {
simState := module.SimulationState{
AppParams: make(simtypes.AppParams),
Cdc: cdc,
diff --git a/testing/simapp/utils_test.go b/testing/simapp/utils_test.go
index 6d8bb21f..0240c482 100644
--- a/testing/simapp/utils_test.go
+++ b/testing/simapp/utils_test.go
@@ -41,7 +41,7 @@ func TestGetSimulationLog(t *testing.T) {
},
{
authtypes.StoreKey,
- []kv.Pair{{Key: authtypes.GlobalAccountNumberKey, Value: cdc.MustMarshalBinaryBare(uint64(10))}},
+ []kv.Pair{{Key: authtypes.GlobalAccountNumberKey, Value: cdc.MustMarshal(uint64(10))}},
"10",
},
{
diff --git a/testing/solomachine.go b/testing/solomachine.go
index ff4cc651..75d4b58b 100644
--- a/testing/solomachine.go
+++ b/testing/solomachine.go
@@ -24,7 +24,7 @@ import (
type Solomachine struct {
t *testing.T
- cdc codec.BinaryMarshaler
+ cdc codec.BinaryCodec
ClientID string
PrivateKeys []cryptotypes.PrivKey // keys used for signing
PublicKeys []cryptotypes.PubKey // keys used for generating solo machine pub key
@@ -37,7 +37,7 @@ type Solomachine struct {
// NewSolomachine returns a new solomachine instance with an `nKeys` amount of
// generated private/public key pairs and a sequence starting at 1. If nKeys
// is greater than 1 then a multisig public key is used.
-func NewSolomachine(t *testing.T, cdc codec.BinaryMarshaler, clientID, diversifier string, nKeys uint64) *Solomachine {
+func NewSolomachine(t *testing.T, cdc codec.BinaryCodec, clientID, diversifier string, nKeys uint64) *Solomachine {
privKeys, pubKeys, pk := GenerateKeys(t, nKeys)
return &Solomachine{
@@ -119,7 +119,7 @@ func (solo *Solomachine) CreateHeader() *solomachinetypes.Header {
NewDiversifier: solo.Diversifier,
}
- dataBz, err := solo.cdc.MarshalBinaryBare(data)
+ dataBz, err := solo.cdc.Marshal(data)
require.NoError(solo.t, err)
signBytes := &solomachinetypes.SignBytes{
@@ -130,7 +130,7 @@ func (solo *Solomachine) CreateHeader() *solomachinetypes.Header {
Data: dataBz,
}
- bz, err := solo.cdc.MarshalBinaryBare(signBytes)
+ bz, err := solo.cdc.Marshal(signBytes)
require.NoError(solo.t, err)
sig := solo.GenerateSignature(bz)
@@ -171,7 +171,7 @@ func (solo *Solomachine) CreateMisbehaviour() *solomachinetypes.Misbehaviour {
Data: dataOne,
}
- bz, err := solo.cdc.MarshalBinaryBare(signBytes)
+ bz, err := solo.cdc.Marshal(signBytes)
require.NoError(solo.t, err)
sig := solo.GenerateSignature(bz)
@@ -193,7 +193,7 @@ func (solo *Solomachine) CreateMisbehaviour() *solomachinetypes.Misbehaviour {
Data: dataTwo,
}
- bz, err = solo.cdc.MarshalBinaryBare(signBytes)
+ bz, err = solo.cdc.Marshal(signBytes)
require.NoError(solo.t, err)
sig = solo.GenerateSignature(bz)
@@ -241,7 +241,7 @@ func (solo *Solomachine) GenerateSignature(signBytes []byte) []byte {
}
protoSigData := signing.SignatureDataToProto(sigData)
- bz, err := solo.cdc.MarshalBinaryBare(protoSigData)
+ bz, err := solo.cdc.Marshal(protoSigData)
require.NoError(solo.t, err)
return bz
From 146bc93795ad4354f22afa7239dd25e293b79748 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 6 May 2021 12:55:56 +0200
Subject: [PATCH 048/393] Bump github.com/armon/go-metrics from 0.3.7 to 0.3.8
(#155)
Bumps [github.com/armon/go-metrics](https://github.com/armon/go-metrics) from 0.3.7 to 0.3.8.
- [Release notes](https://github.com/armon/go-metrics/releases)
- [Commits](https://github.com/armon/go-metrics/compare/v0.3.7...v0.3.8)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
go.mod | 2 +-
go.sum | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/go.mod b/go.mod
index 06831989..f1e91d76 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ module github.com/cosmos/ibc-go
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
require (
- github.com/armon/go-metrics v0.3.7
+ github.com/armon/go-metrics v0.3.8
github.com/confio/ics23/go v0.6.6
github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b
github.com/gogo/protobuf v1.3.3
diff --git a/go.sum b/go.sum
index d1d4a9ef..016cc726 100644
--- a/go.sum
+++ b/go.sum
@@ -62,8 +62,9 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.7 h1:c/oCtWzYpboy6+6f6LjXRlyW7NwA2SWf+a9KMlHq/bM=
github.com/armon/go-metrics v0.3.7/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.8 h1:oOxq3KPj0WhCuy50EhzwiyMyG2ovRQZpZLXQuOh2a/M=
+github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
From 6537f248812780adf553a2aca63944607ea32468 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 6 May 2021 19:20:29 +0200
Subject: [PATCH 049/393] Revert proto package naming apps -> applications
(#157)
* rename proto apps -> applications
* regenerate protobuf files
---
docs/ibc/proto-docs.md | 98 +++++++-------
modules/apps/transfer/types/genesis.pb.go | 53 ++++----
modules/apps/transfer/types/query.pb.go | 123 +++++++++---------
modules/apps/transfer/types/query.pb.gw.go | 2 +-
modules/apps/transfer/types/transfer.pb.go | 70 +++++-----
modules/apps/transfer/types/tx.pb.go | 94 ++++++-------
.../transfer/v1/genesis.proto | 4 +-
.../transfer/v1/query.proto | 4 +-
.../transfer/v1/transfer.proto | 2 +-
.../transfer/v1/tx.proto | 2 +-
10 files changed, 229 insertions(+), 223 deletions(-)
rename proto/ibc/{apps => applications}/transfer/v1/genesis.proto (85%)
rename proto/ibc/{apps => applications}/transfer/v1/query.proto (96%)
rename proto/ibc/{apps => applications}/transfer/v1/transfer.proto (97%)
rename proto/ibc/{apps => applications}/transfer/v1/tx.proto (97%)
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index aee597f6..ecacd9f4 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -4,23 +4,23 @@
## Table of Contents
-- [ibc/apps/transfer/v1/transfer.proto](#ibc/apps/transfer/v1/transfer.proto)
- - [DenomTrace](#ibc.apps.transfer.v1.DenomTrace)
- - [FungibleTokenPacketData](#ibc.apps.transfer.v1.FungibleTokenPacketData)
- - [Params](#ibc.apps.transfer.v1.Params)
+- [ibc/applications/transfer/v1/transfer.proto](#ibc/applications/transfer/v1/transfer.proto)
+ - [DenomTrace](#ibc.applications.transfer.v1.DenomTrace)
+ - [FungibleTokenPacketData](#ibc.applications.transfer.v1.FungibleTokenPacketData)
+ - [Params](#ibc.applications.transfer.v1.Params)
-- [ibc/apps/transfer/v1/genesis.proto](#ibc/apps/transfer/v1/genesis.proto)
- - [GenesisState](#ibc.apps.transfer.v1.GenesisState)
+- [ibc/applications/transfer/v1/genesis.proto](#ibc/applications/transfer/v1/genesis.proto)
+ - [GenesisState](#ibc.applications.transfer.v1.GenesisState)
-- [ibc/apps/transfer/v1/query.proto](#ibc/apps/transfer/v1/query.proto)
- - [QueryDenomTraceRequest](#ibc.apps.transfer.v1.QueryDenomTraceRequest)
- - [QueryDenomTraceResponse](#ibc.apps.transfer.v1.QueryDenomTraceResponse)
- - [QueryDenomTracesRequest](#ibc.apps.transfer.v1.QueryDenomTracesRequest)
- - [QueryDenomTracesResponse](#ibc.apps.transfer.v1.QueryDenomTracesResponse)
- - [QueryParamsRequest](#ibc.apps.transfer.v1.QueryParamsRequest)
- - [QueryParamsResponse](#ibc.apps.transfer.v1.QueryParamsResponse)
+- [ibc/applications/transfer/v1/query.proto](#ibc/applications/transfer/v1/query.proto)
+ - [QueryDenomTraceRequest](#ibc.applications.transfer.v1.QueryDenomTraceRequest)
+ - [QueryDenomTraceResponse](#ibc.applications.transfer.v1.QueryDenomTraceResponse)
+ - [QueryDenomTracesRequest](#ibc.applications.transfer.v1.QueryDenomTracesRequest)
+ - [QueryDenomTracesResponse](#ibc.applications.transfer.v1.QueryDenomTracesResponse)
+ - [QueryParamsRequest](#ibc.applications.transfer.v1.QueryParamsRequest)
+ - [QueryParamsResponse](#ibc.applications.transfer.v1.QueryParamsResponse)
- - [Query](#ibc.apps.transfer.v1.Query)
+ - [Query](#ibc.applications.transfer.v1.Query)
- [ibc/core/client/v1/client.proto](#ibc/core/client/v1/client.proto)
- [ClientConsensusStates](#ibc.core.client.v1.ClientConsensusStates)
@@ -31,11 +31,11 @@
- [Params](#ibc.core.client.v1.Params)
- [UpgradeProposal](#ibc.core.client.v1.UpgradeProposal)
-- [ibc/apps/transfer/v1/tx.proto](#ibc/apps/transfer/v1/tx.proto)
- - [MsgTransfer](#ibc.apps.transfer.v1.MsgTransfer)
- - [MsgTransferResponse](#ibc.apps.transfer.v1.MsgTransferResponse)
+- [ibc/applications/transfer/v1/tx.proto](#ibc/applications/transfer/v1/tx.proto)
+ - [MsgTransfer](#ibc.applications.transfer.v1.MsgTransfer)
+ - [MsgTransferResponse](#ibc.applications.transfer.v1.MsgTransferResponse)
- - [Msg](#ibc.apps.transfer.v1.Msg)
+ - [Msg](#ibc.applications.transfer.v1.Msg)
- [ibc/core/channel/v1/channel.proto](#ibc/core/channel/v1/channel.proto)
- [Acknowledgement](#ibc.core.channel.v1.Acknowledgement)
@@ -225,14 +225,14 @@
-
+
Top
-## ibc/apps/transfer/v1/transfer.proto
+## ibc/applications/transfer/v1/transfer.proto
-
+
### DenomTrace
DenomTrace contains the base denomination for ICS20 fungible tokens and the
@@ -249,7 +249,7 @@ source tracing information path.
-
+
### FungibleTokenPacketData
FungibleTokenPacketData defines a struct for the packet payload
@@ -269,7 +269,7 @@ https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#d
-
+
### Params
Params defines the set of IBC transfer parameters.
@@ -297,14 +297,14 @@ parameter for the denomination to false.
-
+
Top
-## ibc/apps/transfer/v1/genesis.proto
+## ibc/applications/transfer/v1/genesis.proto
-
+
### GenesisState
GenesisState defines the ibc-transfer genesis state
@@ -313,8 +313,8 @@ GenesisState defines the ibc-transfer genesis state
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| `port_id` | [string](#string) | | |
-| `denom_traces` | [DenomTrace](#ibc.apps.transfer.v1.DenomTrace) | repeated | |
-| `params` | [Params](#ibc.apps.transfer.v1.Params) | | |
+| `denom_traces` | [DenomTrace](#ibc.applications.transfer.v1.DenomTrace) | repeated | |
+| `params` | [Params](#ibc.applications.transfer.v1.Params) | | |
@@ -330,14 +330,14 @@ GenesisState defines the ibc-transfer genesis state
-
+
Top
-## ibc/apps/transfer/v1/query.proto
+## ibc/applications/transfer/v1/query.proto
-
+
### QueryDenomTraceRequest
QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC
@@ -353,7 +353,7 @@ method
-
+
### QueryDenomTraceResponse
QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC
@@ -362,14 +362,14 @@ method.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
-| `denom_trace` | [DenomTrace](#ibc.apps.transfer.v1.DenomTrace) | | denom_trace returns the requested denomination trace information. |
+| `denom_trace` | [DenomTrace](#ibc.applications.transfer.v1.DenomTrace) | | denom_trace returns the requested denomination trace information. |
-
+
### QueryDenomTracesRequest
QueryConnectionsRequest is the request type for the Query/DenomTraces RPC
@@ -385,7 +385,7 @@ method
-
+
### QueryDenomTracesResponse
QueryConnectionsResponse is the response type for the Query/DenomTraces RPC
@@ -394,7 +394,7 @@ method.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
-| `denom_traces` | [DenomTrace](#ibc.apps.transfer.v1.DenomTrace) | repeated | denom_traces returns all denominations trace information. |
+| `denom_traces` | [DenomTrace](#ibc.applications.transfer.v1.DenomTrace) | repeated | denom_traces returns all denominations trace information. |
| `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | pagination defines the pagination in the response. |
@@ -402,7 +402,7 @@ method.
-
+
### QueryParamsRequest
QueryParamsRequest is the request type for the Query/Params RPC method.
@@ -412,7 +412,7 @@ QueryParamsRequest is the request type for the Query/Params RPC method.
-
+
### QueryParamsResponse
QueryParamsResponse is the response type for the Query/Params RPC method.
@@ -420,7 +420,7 @@ QueryParamsResponse is the response type for the Query/Params RPC method.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
-| `params` | [Params](#ibc.apps.transfer.v1.Params) | | params defines the parameters of the module. |
+| `params` | [Params](#ibc.applications.transfer.v1.Params) | | params defines the parameters of the module. |
@@ -433,16 +433,16 @@ QueryParamsResponse is the response type for the Query/Params RPC method.
-
+
### Query
Query provides defines the gRPC querier service.
| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
| ----------- | ------------ | ------------- | ------------| ------- | -------- |
-| `DenomTrace` | [QueryDenomTraceRequest](#ibc.apps.transfer.v1.QueryDenomTraceRequest) | [QueryDenomTraceResponse](#ibc.apps.transfer.v1.QueryDenomTraceResponse) | DenomTrace queries a denomination trace information. | GET|/ibc/apps/transfer/v1/denom_traces/{hash}|
-| `DenomTraces` | [QueryDenomTracesRequest](#ibc.apps.transfer.v1.QueryDenomTracesRequest) | [QueryDenomTracesResponse](#ibc.apps.transfer.v1.QueryDenomTracesResponse) | DenomTraces queries all denomination traces. | GET|/ibc/apps/transfer/v1/denom_traces|
-| `Params` | [QueryParamsRequest](#ibc.apps.transfer.v1.QueryParamsRequest) | [QueryParamsResponse](#ibc.apps.transfer.v1.QueryParamsResponse) | Params queries all parameters of the ibc-transfer module. | GET|/ibc/apps/transfer/v1/params|
+| `DenomTrace` | [QueryDenomTraceRequest](#ibc.applications.transfer.v1.QueryDenomTraceRequest) | [QueryDenomTraceResponse](#ibc.applications.transfer.v1.QueryDenomTraceResponse) | DenomTrace queries a denomination trace information. | GET|/ibc/apps/transfer/v1/denom_traces/{hash}|
+| `DenomTraces` | [QueryDenomTracesRequest](#ibc.applications.transfer.v1.QueryDenomTracesRequest) | [QueryDenomTracesResponse](#ibc.applications.transfer.v1.QueryDenomTracesResponse) | DenomTraces queries all denomination traces. | GET|/ibc/apps/transfer/v1/denom_traces|
+| `Params` | [QueryParamsRequest](#ibc.applications.transfer.v1.QueryParamsRequest) | [QueryParamsResponse](#ibc.applications.transfer.v1.QueryParamsResponse) | Params queries all parameters of the ibc-transfer module. | GET|/ibc/apps/transfer/v1/params|
@@ -598,14 +598,14 @@ upgrade.
-
+
Top
-## ibc/apps/transfer/v1/tx.proto
+## ibc/applications/transfer/v1/tx.proto
-
+
### MsgTransfer
MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between
@@ -628,7 +628,7 @@ https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#d
-
+
### MsgTransferResponse
MsgTransferResponse defines the Msg/Transfer response type.
@@ -644,14 +644,14 @@ MsgTransferResponse defines the Msg/Transfer response type.
-
+
### Msg
Msg defines the ibc/transfer Msg service.
| Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint |
| ----------- | ------------ | ------------- | ------------| ------- | -------- |
-| `Transfer` | [MsgTransfer](#ibc.apps.transfer.v1.MsgTransfer) | [MsgTransferResponse](#ibc.apps.transfer.v1.MsgTransferResponse) | Transfer defines a rpc handler method for MsgTransfer. | |
+| `Transfer` | [MsgTransfer](#ibc.applications.transfer.v1.MsgTransfer) | [MsgTransferResponse](#ibc.applications.transfer.v1.MsgTransferResponse) | Transfer defines a rpc handler method for MsgTransfer. | |
diff --git a/modules/apps/transfer/types/genesis.pb.go b/modules/apps/transfer/types/genesis.pb.go
index b0a9942e..94eb0108 100644
--- a/modules/apps/transfer/types/genesis.pb.go
+++ b/modules/apps/transfer/types/genesis.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/apps/transfer/v1/genesis.proto
+// source: ibc/applications/transfer/v1/genesis.proto
package types
@@ -34,7 +34,7 @@ func (m *GenesisState) Reset() { *m = GenesisState{} }
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
func (*GenesisState) ProtoMessage() {}
func (*GenesisState) Descriptor() ([]byte, []int) {
- return fileDescriptor_33776620329d43dd, []int{0}
+ return fileDescriptor_a4f788affd5bea89, []int{0}
}
func (m *GenesisState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -85,35 +85,36 @@ func (m *GenesisState) GetParams() Params {
}
func init() {
- proto.RegisterType((*GenesisState)(nil), "ibc.apps.transfer.v1.GenesisState")
+ proto.RegisterType((*GenesisState)(nil), "ibc.applications.transfer.v1.GenesisState")
}
func init() {
- proto.RegisterFile("ibc/apps/transfer/v1/genesis.proto", fileDescriptor_33776620329d43dd)
+ proto.RegisterFile("ibc/applications/transfer/v1/genesis.proto", fileDescriptor_a4f788affd5bea89)
}
-var fileDescriptor_33776620329d43dd = []byte{
- // 314 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xca, 0x4c, 0x4a, 0xd6,
- 0x4f, 0x2c, 0x28, 0x28, 0xd6, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33,
- 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12,
- 0xc9, 0x4c, 0x4a, 0xd6, 0x03, 0xa9, 0xd1, 0x83, 0xa9, 0xd1, 0x2b, 0x33, 0x94, 0x52, 0xc6, 0xaa,
- 0x13, 0xae, 0x02, 0xac, 0x55, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, 0x20,
- 0xa2, 0x4a, 0x8f, 0x19, 0xb9, 0x78, 0xdc, 0x21, 0x56, 0x04, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x69,
- 0x73, 0xb1, 0x17, 0xe4, 0x17, 0x95, 0xc4, 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x3a,
- 0x09, 0x7d, 0xba, 0x27, 0xcf, 0x57, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0x95, 0x50, 0x0a, 0x62,
- 0x03, 0xb1, 0x3c, 0x53, 0x84, 0xb2, 0xb8, 0x78, 0x52, 0x52, 0xf3, 0xf2, 0x73, 0xe3, 0x4b, 0x8a,
- 0x12, 0x93, 0x53, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0x14, 0xf4, 0xb0, 0xb9, 0x52,
- 0xcf, 0x05, 0xa4, 0x32, 0x04, 0xa4, 0xd0, 0x49, 0xf5, 0xc4, 0x3d, 0x79, 0x86, 0x4f, 0xf7, 0xe4,
- 0x85, 0x21, 0xe6, 0x22, 0x9b, 0xa1, 0xb4, 0xea, 0xbe, 0x3c, 0x1b, 0x58, 0x55, 0x71, 0x10, 0x77,
- 0x0a, 0x5c, 0x4b, 0xb1, 0x90, 0x15, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04, 0xb3,
- 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x0c, 0x76, 0x5b, 0x02, 0xc0, 0x6a, 0x9c, 0x58, 0x40, 0x36, 0x04,
- 0x41, 0x75, 0x38, 0xf9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72,
- 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x49,
- 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e, 0x7e,
- 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x6e, 0x7a, 0xbe, 0x7e, 0x6e, 0x7e, 0x4a, 0x69, 0x4e, 0x6a, 0x31,
- 0x5a, 0xb8, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0x03, 0xcf, 0x18, 0x10, 0x00, 0x00,
- 0xff, 0xff, 0x48, 0x6d, 0x4a, 0x3d, 0xb3, 0x01, 0x00, 0x00,
+var fileDescriptor_a4f788affd5bea89 = []byte{
+ // 323 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xc1, 0x4a, 0xf3, 0x40,
+ 0x14, 0x85, 0x33, 0x7f, 0x7f, 0x22, 0xa6, 0xc5, 0x45, 0x74, 0x51, 0x8a, 0x24, 0x25, 0x28, 0x04,
+ 0x8b, 0x33, 0xb4, 0xba, 0x72, 0x19, 0x04, 0x71, 0x23, 0x52, 0x5d, 0xb9, 0x29, 0x93, 0xc9, 0x18,
+ 0x07, 0x9a, 0xdc, 0x30, 0x77, 0x5a, 0xe8, 0x5b, 0xf8, 0x1c, 0x3e, 0x49, 0x97, 0x5d, 0xba, 0xaa,
+ 0xd2, 0xbe, 0x41, 0x7d, 0x01, 0x49, 0x5a, 0x4b, 0x57, 0xdd, 0x1d, 0x66, 0xbe, 0x73, 0xce, 0xe5,
+ 0x38, 0x17, 0x2a, 0x16, 0x8c, 0x17, 0xc5, 0x50, 0x09, 0x6e, 0x14, 0xe4, 0xc8, 0x8c, 0xe6, 0x39,
+ 0xbe, 0x4a, 0xcd, 0xc6, 0x5d, 0x96, 0xca, 0x5c, 0xa2, 0x42, 0x5a, 0x68, 0x30, 0xe0, 0x9e, 0xaa,
+ 0x58, 0xd0, 0x5d, 0x96, 0xfe, 0xb1, 0x74, 0xdc, 0x6d, 0x75, 0xf6, 0x26, 0x6d, 0xc9, 0x2a, 0xaa,
+ 0x75, 0x92, 0x42, 0x0a, 0x95, 0x64, 0xa5, 0x5a, 0xbf, 0x06, 0x3f, 0xc4, 0x69, 0xdc, 0xad, 0x2b,
+ 0x9f, 0x0c, 0x37, 0xd2, 0xed, 0x38, 0x07, 0x05, 0x68, 0x33, 0x50, 0x49, 0x93, 0xb4, 0x49, 0x78,
+ 0x18, 0xb9, 0xab, 0xb9, 0x7f, 0x34, 0xe1, 0xd9, 0xf0, 0x26, 0xd8, 0x7c, 0x04, 0x7d, 0xbb, 0x54,
+ 0xf7, 0x89, 0xab, 0x9d, 0x46, 0x22, 0x73, 0xc8, 0x06, 0x46, 0x73, 0x21, 0xb1, 0xf9, 0xaf, 0x5d,
+ 0x0b, 0xeb, 0xbd, 0x90, 0xee, 0xbb, 0x9a, 0xde, 0x96, 0x8e, 0xe7, 0xd2, 0x10, 0x9d, 0x4f, 0xe7,
+ 0xbe, 0xb5, 0x9a, 0xfb, 0xc7, 0xeb, 0xfc, 0xdd, 0xac, 0xe0, 0xe3, 0xcb, 0xb7, 0x2b, 0x0a, 0xfb,
+ 0xf5, 0x64, 0x6b, 0x41, 0x37, 0x72, 0xec, 0x82, 0x6b, 0x9e, 0x61, 0xb3, 0xd6, 0x26, 0x61, 0xbd,
+ 0x77, 0xb6, 0xbf, 0xed, 0xb1, 0x62, 0xa3, 0xff, 0x65, 0x53, 0x7f, 0xe3, 0x8c, 0x1e, 0xa6, 0x0b,
+ 0x8f, 0xcc, 0x16, 0x1e, 0xf9, 0x5e, 0x78, 0xe4, 0x7d, 0xe9, 0x59, 0xb3, 0xa5, 0x67, 0x7d, 0x2e,
+ 0x3d, 0xeb, 0xe5, 0x3a, 0x55, 0xe6, 0x6d, 0x14, 0x53, 0x01, 0x19, 0x13, 0x80, 0x19, 0x20, 0x53,
+ 0xb1, 0xb8, 0x4c, 0x81, 0x65, 0x90, 0x8c, 0x86, 0x12, 0xcb, 0xbd, 0x77, 0x76, 0x36, 0x93, 0x42,
+ 0x62, 0x6c, 0x57, 0x63, 0x5e, 0xfd, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x5d, 0xce, 0xa9, 0xdb,
+ 0x01, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
diff --git a/modules/apps/transfer/types/query.pb.go b/modules/apps/transfer/types/query.pb.go
index 2b99d429..082172d2 100644
--- a/modules/apps/transfer/types/query.pb.go
+++ b/modules/apps/transfer/types/query.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/apps/transfer/v1/query.proto
+// source: ibc/applications/transfer/v1/query.proto
package types
@@ -41,7 +41,7 @@ func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{}
func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTraceRequest) ProtoMessage() {}
func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bfda59865efaa24, []int{0}
+ return fileDescriptor_a638e2800a01538c, []int{0}
}
func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -88,7 +88,7 @@ func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse
func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTraceResponse) ProtoMessage() {}
func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bfda59865efaa24, []int{1}
+ return fileDescriptor_a638e2800a01538c, []int{1}
}
func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -135,7 +135,7 @@ func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest
func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTracesRequest) ProtoMessage() {}
func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bfda59865efaa24, []int{2}
+ return fileDescriptor_a638e2800a01538c, []int{2}
}
func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -184,7 +184,7 @@ func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesRespon
func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) }
func (*QueryDenomTracesResponse) ProtoMessage() {}
func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bfda59865efaa24, []int{3}
+ return fileDescriptor_a638e2800a01538c, []int{3}
}
func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -235,7 +235,7 @@ func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} }
func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryParamsRequest) ProtoMessage() {}
func (*QueryParamsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bfda59865efaa24, []int{4}
+ return fileDescriptor_a638e2800a01538c, []int{4}
}
func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -274,7 +274,7 @@ func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} }
func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryParamsResponse) ProtoMessage() {}
func (*QueryParamsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bfda59865efaa24, []int{5}
+ return fileDescriptor_a638e2800a01538c, []int{5}
}
func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -311,51 +311,54 @@ func (m *QueryParamsResponse) GetParams() *Params {
}
func init() {
- proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibc.apps.transfer.v1.QueryDenomTraceRequest")
- proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibc.apps.transfer.v1.QueryDenomTraceResponse")
- proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibc.apps.transfer.v1.QueryDenomTracesRequest")
- proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibc.apps.transfer.v1.QueryDenomTracesResponse")
- proto.RegisterType((*QueryParamsRequest)(nil), "ibc.apps.transfer.v1.QueryParamsRequest")
- proto.RegisterType((*QueryParamsResponse)(nil), "ibc.apps.transfer.v1.QueryParamsResponse")
-}
-
-func init() { proto.RegisterFile("ibc/apps/transfer/v1/query.proto", fileDescriptor_8bfda59865efaa24) }
-
-var fileDescriptor_8bfda59865efaa24 = []byte{
- // 523 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xc1, 0x6a, 0xd4, 0x40,
- 0x18, 0xc7, 0x77, 0x5a, 0x5d, 0xf0, 0x5b, 0xf1, 0x30, 0x16, 0x5d, 0xc2, 0x92, 0x2e, 0xb1, 0x68,
- 0x5b, 0xdb, 0x19, 0x52, 0xfb, 0x02, 0x16, 0xd1, 0x83, 0x20, 0x75, 0xf5, 0x24, 0x82, 0x4c, 0xb2,
- 0x63, 0x36, 0xd0, 0x64, 0xd2, 0xcc, 0xec, 0x42, 0x11, 0x2f, 0xfa, 0x02, 0x82, 0x17, 0x0f, 0x3e,
- 0x81, 0x4f, 0xe0, 0x1b, 0xd8, 0x63, 0xc1, 0x8b, 0x27, 0x95, 0x5d, 0x1f, 0x44, 0x32, 0x33, 0xe9,
- 0x66, 0xdd, 0x60, 0x73, 0x1b, 0x26, 0xff, 0xef, 0xfb, 0x7e, 0xff, 0xff, 0x37, 0x04, 0xfa, 0x71,
- 0x10, 0x52, 0x96, 0x65, 0x92, 0xaa, 0x9c, 0xa5, 0xf2, 0x35, 0xcf, 0xe9, 0xc4, 0xa7, 0xc7, 0x63,
- 0x9e, 0x9f, 0x90, 0x2c, 0x17, 0x4a, 0xe0, 0xb5, 0x38, 0x08, 0x49, 0xa1, 0x20, 0xa5, 0x82, 0x4c,
- 0x7c, 0x67, 0x2d, 0x12, 0x91, 0xd0, 0x02, 0x5a, 0x9c, 0x8c, 0xd6, 0xd9, 0x0e, 0x85, 0x4c, 0x84,
- 0xa4, 0x01, 0x93, 0xdc, 0x34, 0xa1, 0x13, 0x3f, 0xe0, 0x8a, 0xf9, 0x34, 0x63, 0x51, 0x9c, 0x32,
- 0x15, 0x8b, 0xd4, 0x6a, 0x6f, 0xd5, 0x4e, 0x3e, 0x9f, 0x61, 0x44, 0xbd, 0x48, 0x88, 0xe8, 0x88,
- 0x53, 0x96, 0xc5, 0x94, 0xa5, 0xa9, 0x50, 0xba, 0x83, 0x34, 0x5f, 0xbd, 0x1d, 0xb8, 0xf1, 0xb4,
- 0x18, 0xf2, 0x80, 0xa7, 0x22, 0x79, 0x9e, 0xb3, 0x90, 0x0f, 0xf8, 0xf1, 0x98, 0x4b, 0x85, 0x31,
- 0x5c, 0x1a, 0x31, 0x39, 0xea, 0xa2, 0x3e, 0xda, 0xbc, 0x32, 0xd0, 0x67, 0xef, 0x25, 0xdc, 0x5c,
- 0x52, 0xcb, 0x4c, 0xa4, 0x92, 0xe3, 0xfb, 0xd0, 0x19, 0x16, 0xb7, 0xaf, 0x54, 0x71, 0xad, 0xab,
- 0x3a, 0x7b, 0x7d, 0x52, 0xe7, 0x9c, 0x54, 0xca, 0x61, 0x78, 0x7e, 0xf6, 0xd8, 0x52, 0x77, 0x59,
- 0xc2, 0x3c, 0x04, 0x98, 0xbb, 0xb7, 0xcd, 0x6f, 0x13, 0x13, 0x15, 0x29, 0xa2, 0x22, 0x26, 0x6f,
- 0x1b, 0x15, 0x39, 0x64, 0x51, 0x69, 0x64, 0x50, 0xa9, 0xf4, 0xbe, 0x22, 0xe8, 0x2e, 0xcf, 0xb0,
- 0x16, 0x9e, 0xc1, 0xd5, 0x8a, 0x05, 0xd9, 0x45, 0xfd, 0xd5, 0x26, 0x1e, 0x0e, 0xae, 0x9d, 0xfe,
- 0x5c, 0x6f, 0x7d, 0xf9, 0xb5, 0xde, 0xb6, 0xfd, 0x3a, 0x73, 0x4f, 0x12, 0x3f, 0x5a, 0x20, 0x5f,
- 0xd1, 0xe4, 0x77, 0x2e, 0x24, 0x37, 0x44, 0x0b, 0xe8, 0x6b, 0x80, 0x35, 0xf9, 0x21, 0xcb, 0x59,
- 0x52, 0x06, 0xe3, 0x3d, 0x86, 0xeb, 0x0b, 0xb7, 0xd6, 0xca, 0x3e, 0xb4, 0x33, 0x7d, 0x63, 0xb3,
- 0xea, 0xd5, 0x9b, 0xb0, 0x55, 0x56, 0xbb, 0xf7, 0x6d, 0x15, 0x2e, 0xeb, 0x6e, 0xf8, 0x33, 0x02,
- 0x98, 0x3b, 0xc4, 0x3b, 0xf5, 0xe5, 0xf5, 0x2f, 0xc7, 0xd9, 0x6d, 0xa8, 0x36, 0xac, 0x9e, 0xff,
- 0xee, 0xfb, 0x9f, 0x8f, 0x2b, 0x77, 0xf1, 0x16, 0xad, 0x7d, 0xce, 0xd5, 0x95, 0xd0, 0x37, 0xc5,
- 0x33, 0x7c, 0x8b, 0x3f, 0x21, 0xe8, 0x54, 0x36, 0x88, 0x9b, 0x4d, 0x2c, 0x43, 0x73, 0x48, 0x53,
- 0xb9, 0x25, 0xdc, 0xd6, 0x84, 0x1b, 0xd8, 0xbb, 0x98, 0x10, 0xbf, 0x47, 0xd0, 0x36, 0xb1, 0xe2,
- 0xcd, 0xff, 0x8c, 0x59, 0xd8, 0xa2, 0xb3, 0xd5, 0x40, 0x69, 0x59, 0x36, 0x34, 0x8b, 0x8b, 0x7b,
- 0xf5, 0x2c, 0x66, 0x93, 0x07, 0x4f, 0x4e, 0xa7, 0x2e, 0x3a, 0x9b, 0xba, 0xe8, 0xf7, 0xd4, 0x45,
- 0x1f, 0x66, 0x6e, 0xeb, 0x6c, 0xe6, 0xb6, 0x7e, 0xcc, 0xdc, 0xd6, 0x8b, 0xfd, 0x28, 0x56, 0xa3,
- 0x71, 0x40, 0x42, 0x91, 0x50, 0xfb, 0xab, 0x89, 0x83, 0x70, 0x37, 0x12, 0x34, 0x11, 0xc3, 0xf1,
- 0x11, 0x97, 0xff, 0xf4, 0x54, 0x27, 0x19, 0x97, 0x41, 0x5b, 0xff, 0x2d, 0xee, 0xfd, 0x0d, 0x00,
- 0x00, 0xff, 0xff, 0xf9, 0xce, 0xc4, 0xab, 0xec, 0x04, 0x00, 0x00,
+ proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTraceRequest")
+ proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTraceResponse")
+ proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTracesRequest")
+ proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTracesResponse")
+ proto.RegisterType((*QueryParamsRequest)(nil), "ibc.applications.transfer.v1.QueryParamsRequest")
+ proto.RegisterType((*QueryParamsResponse)(nil), "ibc.applications.transfer.v1.QueryParamsResponse")
+}
+
+func init() {
+ proto.RegisterFile("ibc/applications/transfer/v1/query.proto", fileDescriptor_a638e2800a01538c)
+}
+
+var fileDescriptor_a638e2800a01538c = []byte{
+ // 529 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x8b, 0xd3, 0x40,
+ 0x14, 0xef, 0xec, 0x6a, 0xc1, 0x57, 0xf1, 0x30, 0x2e, 0x5a, 0x42, 0xc9, 0x2e, 0xa1, 0x68, 0xdd,
+ 0xd5, 0x19, 0xb3, 0xae, 0x9e, 0x3c, 0x2d, 0xa2, 0x78, 0x91, 0xb5, 0x7a, 0xd2, 0x83, 0x4c, 0xd2,
+ 0x31, 0x0d, 0x34, 0x99, 0x6c, 0x26, 0x2d, 0x2c, 0xe2, 0xc5, 0x4f, 0x20, 0xec, 0x97, 0x10, 0xf1,
+ 0x43, 0x78, 0xdc, 0xe3, 0x82, 0x17, 0x4f, 0x2a, 0xad, 0xdf, 0xc1, 0xab, 0x64, 0x66, 0xb2, 0x4d,
+ 0xec, 0xd2, 0x9a, 0xdb, 0xf0, 0xf2, 0x7e, 0xef, 0xf7, 0xe7, 0x3d, 0x02, 0xbd, 0xd0, 0xf3, 0x29,
+ 0x4b, 0x92, 0x51, 0xe8, 0xb3, 0x2c, 0x14, 0xb1, 0xa4, 0x59, 0xca, 0x62, 0xf9, 0x96, 0xa7, 0x74,
+ 0xe2, 0xd2, 0xc3, 0x31, 0x4f, 0x8f, 0x48, 0x92, 0x8a, 0x4c, 0xe0, 0x4e, 0xe8, 0xf9, 0xa4, 0xdc,
+ 0x49, 0x8a, 0x4e, 0x32, 0x71, 0xad, 0x8d, 0x40, 0x04, 0x42, 0x35, 0xd2, 0xfc, 0xa5, 0x31, 0xd6,
+ 0xb6, 0x2f, 0x64, 0x24, 0x24, 0xf5, 0x98, 0xe4, 0x7a, 0x18, 0x9d, 0xb8, 0x1e, 0xcf, 0x98, 0x4b,
+ 0x13, 0x16, 0x84, 0xb1, 0x1a, 0x64, 0x7a, 0x77, 0x96, 0x2a, 0x39, 0xe3, 0xd2, 0xcd, 0x9d, 0x40,
+ 0x88, 0x60, 0xc4, 0x29, 0x4b, 0x42, 0xca, 0xe2, 0x58, 0x64, 0x46, 0x92, 0xfa, 0xea, 0xdc, 0x86,
+ 0x6b, 0xcf, 0x73, 0xb2, 0x47, 0x3c, 0x16, 0xd1, 0xcb, 0x94, 0xf9, 0xbc, 0xcf, 0x0f, 0xc7, 0x5c,
+ 0x66, 0x18, 0xc3, 0x85, 0x21, 0x93, 0xc3, 0x36, 0xda, 0x42, 0xbd, 0x4b, 0x7d, 0xf5, 0x76, 0x06,
+ 0x70, 0x7d, 0xa1, 0x5b, 0x26, 0x22, 0x96, 0x1c, 0x3f, 0x85, 0xd6, 0x20, 0xaf, 0xbe, 0xc9, 0xf2,
+ 0xb2, 0x42, 0xb5, 0x76, 0x7b, 0x64, 0x59, 0x12, 0xa4, 0x34, 0x06, 0x06, 0x67, 0x6f, 0x87, 0x2d,
+ 0xb0, 0xc8, 0x42, 0xd4, 0x63, 0x80, 0x79, 0x1a, 0x86, 0xe4, 0x06, 0xd1, 0xd1, 0x91, 0x3c, 0x3a,
+ 0xa2, 0xf7, 0x60, 0xa2, 0x23, 0x07, 0x2c, 0x28, 0x0c, 0xf5, 0x4b, 0x48, 0xe7, 0x2b, 0x82, 0xf6,
+ 0x22, 0x87, 0xb1, 0xf2, 0x1a, 0x2e, 0x97, 0xac, 0xc8, 0x36, 0xda, 0x5a, 0xaf, 0xe3, 0x65, 0xff,
+ 0xca, 0xc9, 0x8f, 0xcd, 0xc6, 0xe7, 0x9f, 0x9b, 0x4d, 0x33, 0xb7, 0x35, 0xf7, 0x26, 0xf1, 0x93,
+ 0x8a, 0x83, 0x35, 0xe5, 0xe0, 0xe6, 0x4a, 0x07, 0x5a, 0x59, 0xc5, 0xc2, 0x06, 0x60, 0xe5, 0xe0,
+ 0x80, 0xa5, 0x2c, 0x2a, 0x02, 0x72, 0x5e, 0xc0, 0xd5, 0x4a, 0xd5, 0x58, 0x7a, 0x08, 0xcd, 0x44,
+ 0x55, 0x4c, 0x66, 0xdd, 0xe5, 0x66, 0x0c, 0xda, 0x60, 0x76, 0xff, 0xac, 0xc3, 0x45, 0x35, 0x15,
+ 0x7f, 0x41, 0x00, 0x73, 0xa7, 0x78, 0x6f, 0xf9, 0x98, 0xf3, 0x2f, 0xcb, 0xba, 0x5f, 0x13, 0xa5,
+ 0x3d, 0x38, 0xee, 0x87, 0x6f, 0xbf, 0x8f, 0xd7, 0x76, 0xf0, 0x2d, 0x6a, 0xce, 0xbf, 0x7a, 0xf6,
+ 0xe5, 0x95, 0xd1, 0x77, 0xf9, 0xb9, 0xbe, 0xc7, 0x9f, 0x10, 0xb4, 0x4a, 0x1b, 0xc6, 0xf5, 0x98,
+ 0x8b, 0x50, 0xad, 0x07, 0x75, 0x61, 0x46, 0xf1, 0xb6, 0x52, 0xdc, 0xc5, 0xce, 0x6a, 0xc5, 0xf8,
+ 0x18, 0x41, 0x53, 0xc7, 0x8e, 0xef, 0xfe, 0x07, 0x5d, 0x65, 0xeb, 0x96, 0x5b, 0x03, 0x61, 0xb4,
+ 0x75, 0x95, 0x36, 0x1b, 0x77, 0xce, 0xd7, 0xa6, 0x37, 0xbf, 0xff, 0xec, 0x64, 0x6a, 0xa3, 0xd3,
+ 0xa9, 0x8d, 0x7e, 0x4d, 0x6d, 0xf4, 0x71, 0x66, 0x37, 0x4e, 0x67, 0x76, 0xe3, 0xfb, 0xcc, 0x6e,
+ 0xbc, 0xda, 0x0b, 0xc2, 0x6c, 0x38, 0xf6, 0x88, 0x2f, 0x22, 0x6a, 0x7e, 0x5d, 0xa1, 0xe7, 0xdf,
+ 0x09, 0x04, 0x8d, 0xc4, 0x60, 0x3c, 0xe2, 0xf2, 0x9f, 0x99, 0xd9, 0x51, 0xc2, 0xa5, 0xd7, 0x54,
+ 0x7f, 0x9d, 0x7b, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x6f, 0xd8, 0x1c, 0x4c, 0x05, 0x00,
+ 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -388,7 +391,7 @@ func NewQueryClient(cc grpc1.ClientConn) QueryClient {
func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) {
out := new(QueryDenomTraceResponse)
- err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Query/DenomTrace", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTrace", in, out, opts...)
if err != nil {
return nil, err
}
@@ -397,7 +400,7 @@ func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest
func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) {
out := new(QueryDenomTracesResponse)
- err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Query/DenomTraces", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTraces", in, out, opts...)
if err != nil {
return nil, err
}
@@ -406,7 +409,7 @@ func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesReque
func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) {
out := new(QueryParamsResponse)
- err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Query/Params", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/Params", in, out, opts...)
if err != nil {
return nil, err
}
@@ -451,7 +454,7 @@ func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(in
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibc.apps.transfer.v1.Query/DenomTrace",
+ FullMethod: "/ibc.applications.transfer.v1.Query/DenomTrace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest))
@@ -469,7 +472,7 @@ func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(i
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibc.apps.transfer.v1.Query/DenomTraces",
+ FullMethod: "/ibc.applications.transfer.v1.Query/DenomTraces",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest))
@@ -487,7 +490,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibc.apps.transfer.v1.Query/Params",
+ FullMethod: "/ibc.applications.transfer.v1.Query/Params",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest))
@@ -496,7 +499,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
}
var _Query_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibc.apps.transfer.v1.Query",
+ ServiceName: "ibc.applications.transfer.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -513,7 +516,7 @@ var _Query_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibc/apps/transfer/v1/query.proto",
+ Metadata: "ibc/applications/transfer/v1/query.proto",
}
func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) {
diff --git a/modules/apps/transfer/types/query.pb.gw.go b/modules/apps/transfer/types/query.pb.gw.go
index b8d2eb74..0cd48139 100644
--- a/modules/apps/transfer/types/query.pb.gw.go
+++ b/modules/apps/transfer/types/query.pb.gw.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: ibc/apps/transfer/v1/query.proto
+// source: ibc/applications/transfer/v1/query.proto
/*
Package types is a reverse proxy.
diff --git a/modules/apps/transfer/types/transfer.pb.go b/modules/apps/transfer/types/transfer.pb.go
index 7bd2109e..f03b128f 100644
--- a/modules/apps/transfer/types/transfer.pb.go
+++ b/modules/apps/transfer/types/transfer.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/apps/transfer/v1/transfer.proto
+// source: ibc/applications/transfer/v1/transfer.proto
package types
@@ -41,7 +41,7 @@ func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData
func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) }
func (*FungibleTokenPacketData) ProtoMessage() {}
func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) {
- return fileDescriptor_1df6ef24f87610d6, []int{0}
+ return fileDescriptor_5041673e96e97901, []int{0}
}
func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -112,7 +112,7 @@ func (m *DenomTrace) Reset() { *m = DenomTrace{} }
func (m *DenomTrace) String() string { return proto.CompactTextString(m) }
func (*DenomTrace) ProtoMessage() {}
func (*DenomTrace) Descriptor() ([]byte, []int) {
- return fileDescriptor_1df6ef24f87610d6, []int{1}
+ return fileDescriptor_5041673e96e97901, []int{1}
}
func (m *DenomTrace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -172,7 +172,7 @@ func (m *Params) Reset() { *m = Params{} }
func (m *Params) String() string { return proto.CompactTextString(m) }
func (*Params) ProtoMessage() {}
func (*Params) Descriptor() ([]byte, []int) {
- return fileDescriptor_1df6ef24f87610d6, []int{2}
+ return fileDescriptor_5041673e96e97901, []int{2}
}
func (m *Params) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -216,40 +216,40 @@ func (m *Params) GetReceiveEnabled() bool {
}
func init() {
- proto.RegisterType((*FungibleTokenPacketData)(nil), "ibc.apps.transfer.v1.FungibleTokenPacketData")
- proto.RegisterType((*DenomTrace)(nil), "ibc.apps.transfer.v1.DenomTrace")
- proto.RegisterType((*Params)(nil), "ibc.apps.transfer.v1.Params")
+ proto.RegisterType((*FungibleTokenPacketData)(nil), "ibc.applications.transfer.v1.FungibleTokenPacketData")
+ proto.RegisterType((*DenomTrace)(nil), "ibc.applications.transfer.v1.DenomTrace")
+ proto.RegisterType((*Params)(nil), "ibc.applications.transfer.v1.Params")
}
func init() {
- proto.RegisterFile("ibc/apps/transfer/v1/transfer.proto", fileDescriptor_1df6ef24f87610d6)
-}
-
-var fileDescriptor_1df6ef24f87610d6 = []byte{
- // 357 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xc1, 0x6a, 0xea, 0x40,
- 0x14, 0x86, 0x8d, 0xd7, 0x2b, 0x3a, 0xf7, 0x72, 0x2f, 0x4c, 0x45, 0x83, 0xd0, 0x28, 0xe9, 0xc6,
- 0x4d, 0x13, 0xa4, 0x5d, 0xb9, 0x29, 0x58, 0xdb, 0x65, 0x91, 0xe0, 0xaa, 0x1b, 0x99, 0x99, 0x9c,
- 0xc6, 0x60, 0x92, 0x09, 0x33, 0x13, 0x41, 0xfa, 0x04, 0xdd, 0xf5, 0xb1, 0xba, 0x74, 0xd9, 0x95,
- 0x14, 0x7d, 0x03, 0x9f, 0xa0, 0x64, 0x12, 0x42, 0x71, 0x77, 0xbe, 0xf3, 0xff, 0xff, 0x39, 0x07,
- 0x0e, 0xba, 0x0a, 0x29, 0x73, 0x49, 0x9a, 0x4a, 0x57, 0x09, 0x92, 0xc8, 0x17, 0x10, 0xee, 0x66,
- 0x5c, 0xd5, 0x4e, 0x2a, 0xb8, 0xe2, 0xb8, 0x13, 0x52, 0xe6, 0xe4, 0x26, 0xa7, 0x12, 0x36, 0xe3,
- 0x7e, 0x27, 0xe0, 0x01, 0xd7, 0x06, 0x37, 0xaf, 0x0a, 0xaf, 0xfd, 0x8a, 0x7a, 0x8f, 0x59, 0x12,
- 0x84, 0x34, 0x82, 0x05, 0x5f, 0x43, 0x32, 0x27, 0x6c, 0x0d, 0x6a, 0x46, 0x14, 0xc1, 0x1d, 0xf4,
- 0xdb, 0x87, 0x84, 0xc7, 0xa6, 0x31, 0x34, 0x46, 0x6d, 0xaf, 0x00, 0xdc, 0x45, 0x4d, 0x12, 0xf3,
- 0x2c, 0x51, 0x66, 0x7d, 0x68, 0x8c, 0x1a, 0x5e, 0x49, 0x79, 0x5f, 0x42, 0xe2, 0x83, 0x30, 0x7f,
- 0x69, 0x7b, 0x49, 0xb8, 0x8f, 0x5a, 0x02, 0x18, 0x84, 0x1b, 0x10, 0x66, 0x43, 0x2b, 0x15, 0xdb,
- 0x77, 0x08, 0xcd, 0xf2, 0xa1, 0x0b, 0x41, 0x18, 0x60, 0x8c, 0x1a, 0x29, 0x51, 0xab, 0x72, 0x9d,
- 0xae, 0xf1, 0x25, 0x42, 0x94, 0x48, 0x58, 0x16, 0x87, 0xd4, 0xb5, 0xd2, 0xce, 0x3b, 0x3a, 0x67,
- 0xbf, 0x19, 0xa8, 0x39, 0x27, 0x82, 0xc4, 0x12, 0x4f, 0xd0, 0xdf, 0x7c, 0xe3, 0x12, 0x12, 0x42,
- 0x23, 0xf0, 0xf5, 0x94, 0xd6, 0xb4, 0x77, 0xda, 0x0f, 0x2e, 0xb6, 0x24, 0x8e, 0x26, 0xf6, 0x4f,
- 0xd5, 0xf6, 0xfe, 0xe4, 0xf8, 0x50, 0x10, 0xbe, 0x47, 0xff, 0xcb, 0x9b, 0xaa, 0x78, 0x5d, 0xc7,
- 0xfb, 0xa7, 0xfd, 0xa0, 0x5b, 0xc4, 0xcf, 0x0c, 0xb6, 0xf7, 0xaf, 0xec, 0x94, 0x43, 0xa6, 0x4f,
- 0x1f, 0x07, 0xcb, 0xd8, 0x1d, 0x2c, 0xe3, 0xeb, 0x60, 0x19, 0xef, 0x47, 0xab, 0xb6, 0x3b, 0x5a,
- 0xb5, 0xcf, 0xa3, 0x55, 0x7b, 0xbe, 0x0d, 0x42, 0xb5, 0xca, 0xa8, 0xc3, 0x78, 0xec, 0x32, 0x2e,
- 0x63, 0x2e, 0xdd, 0x90, 0xb2, 0xeb, 0x80, 0xbb, 0x31, 0xf7, 0xb3, 0x08, 0xe4, 0xd9, 0x47, 0xd5,
- 0x36, 0x05, 0x49, 0x9b, 0xfa, 0x41, 0x37, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x53, 0x27,
- 0x2c, 0xf3, 0x01, 0x00, 0x00,
+ proto.RegisterFile("ibc/applications/transfer/v1/transfer.proto", fileDescriptor_5041673e96e97901)
+}
+
+var fileDescriptor_5041673e96e97901 = []byte{
+ // 365 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xc1, 0xaa, 0xda, 0x40,
+ 0x14, 0x86, 0x8d, 0xb5, 0xa2, 0xd3, 0xd2, 0xc2, 0x54, 0x34, 0x48, 0x1b, 0x25, 0x2b, 0xa1, 0x34,
+ 0x41, 0xda, 0x95, 0x9b, 0x82, 0xb5, 0x5d, 0x16, 0x09, 0xae, 0xba, 0x91, 0x99, 0xc9, 0x69, 0x1c,
+ 0x4c, 0x66, 0xc2, 0xcc, 0x44, 0x90, 0x3e, 0x41, 0x77, 0x7d, 0xac, 0x2e, 0x5d, 0xde, 0x95, 0x5c,
+ 0xf4, 0x0d, 0x7c, 0x82, 0x4b, 0x26, 0x21, 0xc8, 0xdd, 0x9d, 0xef, 0x9c, 0xff, 0x3f, 0xe7, 0xc0,
+ 0x8f, 0x3e, 0x72, 0xca, 0x42, 0x92, 0xe7, 0x29, 0x67, 0xc4, 0x70, 0x29, 0x74, 0x68, 0x14, 0x11,
+ 0xfa, 0x37, 0xa8, 0xf0, 0x30, 0x6f, 0xea, 0x20, 0x57, 0xd2, 0x48, 0xfc, 0x9e, 0x53, 0x16, 0xdc,
+ 0x8b, 0x83, 0x46, 0x70, 0x98, 0x8f, 0x07, 0x89, 0x4c, 0xa4, 0x15, 0x86, 0x65, 0x55, 0x79, 0xfc,
+ 0x3f, 0x68, 0xf4, 0xa3, 0x10, 0x09, 0xa7, 0x29, 0x6c, 0xe4, 0x1e, 0xc4, 0x9a, 0xb0, 0x3d, 0x98,
+ 0x15, 0x31, 0x04, 0x0f, 0xd0, 0xcb, 0x18, 0x84, 0xcc, 0x5c, 0x67, 0xea, 0xcc, 0xfa, 0x51, 0x05,
+ 0x78, 0x88, 0xba, 0x24, 0x93, 0x85, 0x30, 0x6e, 0x7b, 0xea, 0xcc, 0x3a, 0x51, 0x4d, 0x65, 0x5f,
+ 0x83, 0x88, 0x41, 0xb9, 0x2f, 0xac, 0xbc, 0x26, 0x3c, 0x46, 0x3d, 0x05, 0x0c, 0xf8, 0x01, 0x94,
+ 0xdb, 0xb1, 0x93, 0x86, 0xfd, 0xaf, 0x08, 0xad, 0xca, 0xa5, 0x1b, 0x45, 0x18, 0x60, 0x8c, 0x3a,
+ 0x39, 0x31, 0xbb, 0xfa, 0x9c, 0xad, 0xf1, 0x07, 0x84, 0x28, 0xd1, 0xb0, 0xad, 0x1e, 0x69, 0xdb,
+ 0x49, 0xbf, 0xec, 0x58, 0x9f, 0xff, 0xd7, 0x41, 0xdd, 0x35, 0x51, 0x24, 0xd3, 0x78, 0x81, 0x5e,
+ 0x97, 0x17, 0xb7, 0x20, 0x08, 0x4d, 0x21, 0xb6, 0x5b, 0x7a, 0xcb, 0xd1, 0xed, 0x3c, 0x79, 0x77,
+ 0x24, 0x59, 0xba, 0xf0, 0xef, 0xa7, 0x7e, 0xf4, 0xaa, 0xc4, 0xef, 0x15, 0xe1, 0x6f, 0xe8, 0x6d,
+ 0xfd, 0x53, 0x63, 0x6f, 0x5b, 0xfb, 0xf8, 0x76, 0x9e, 0x0c, 0x2b, 0xfb, 0x33, 0x81, 0x1f, 0xbd,
+ 0xa9, 0x3b, 0xf5, 0x92, 0xe5, 0xcf, 0xff, 0x17, 0xcf, 0x39, 0x5d, 0x3c, 0xe7, 0xf1, 0xe2, 0x39,
+ 0xff, 0xae, 0x5e, 0xeb, 0x74, 0xf5, 0x5a, 0x0f, 0x57, 0xaf, 0xf5, 0xeb, 0x4b, 0xc2, 0xcd, 0xae,
+ 0xa0, 0x01, 0x93, 0x59, 0xc8, 0xa4, 0xce, 0xa4, 0x0e, 0x39, 0x65, 0x9f, 0x12, 0x19, 0x66, 0x32,
+ 0x2e, 0x52, 0xd0, 0x65, 0xc2, 0x77, 0xc9, 0x9a, 0x63, 0x0e, 0x9a, 0x76, 0x6d, 0x40, 0x9f, 0x9f,
+ 0x02, 0x00, 0x00, 0xff, 0xff, 0xbd, 0xbc, 0x5f, 0xc9, 0x03, 0x02, 0x00, 0x00,
}
func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) {
diff --git a/modules/apps/transfer/types/tx.pb.go b/modules/apps/transfer/types/tx.pb.go
index 2cfb9f6e..b8b14d0d 100644
--- a/modules/apps/transfer/types/tx.pb.go
+++ b/modules/apps/transfer/types/tx.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/apps/transfer/v1/tx.proto
+// source: ibc/applications/transfer/v1/tx.proto
package types
@@ -56,7 +56,7 @@ func (m *MsgTransfer) Reset() { *m = MsgTransfer{} }
func (m *MsgTransfer) String() string { return proto.CompactTextString(m) }
func (*MsgTransfer) ProtoMessage() {}
func (*MsgTransfer) Descriptor() ([]byte, []int) {
- return fileDescriptor_05d96e007505da4e, []int{0}
+ return fileDescriptor_7401ed9bed2f8e09, []int{0}
}
func (m *MsgTransfer) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -93,7 +93,7 @@ func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} }
func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) }
func (*MsgTransferResponse) ProtoMessage() {}
func (*MsgTransferResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_05d96e007505da4e, []int{1}
+ return fileDescriptor_7401ed9bed2f8e09, []int{1}
}
func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -123,45 +123,47 @@ func (m *MsgTransferResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo
func init() {
- proto.RegisterType((*MsgTransfer)(nil), "ibc.apps.transfer.v1.MsgTransfer")
- proto.RegisterType((*MsgTransferResponse)(nil), "ibc.apps.transfer.v1.MsgTransferResponse")
-}
-
-func init() { proto.RegisterFile("ibc/apps/transfer/v1/tx.proto", fileDescriptor_05d96e007505da4e) }
-
-var fileDescriptor_05d96e007505da4e = []byte{
- // 483 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3f, 0x6f, 0xd3, 0x40,
- 0x18, 0xc6, 0x6d, 0x92, 0x86, 0x70, 0x51, 0x2b, 0x38, 0xda, 0xca, 0x8d, 0xa8, 0x1d, 0x3c, 0x85,
- 0x81, 0x3b, 0xa5, 0x80, 0x90, 0x3a, 0xa1, 0x74, 0x81, 0xa1, 0x08, 0x59, 0x1d, 0x10, 0x4b, 0xb0,
- 0xaf, 0x2f, 0xce, 0x89, 0xd8, 0x67, 0xdd, 0x5d, 0x2c, 0xfa, 0x0d, 0x18, 0xf9, 0x08, 0x9d, 0xf9,
- 0x24, 0x1d, 0x3b, 0x32, 0x45, 0x28, 0x59, 0x98, 0xf3, 0x09, 0xd0, 0xd9, 0x97, 0x90, 0x20, 0x24,
- 0x26, 0xdf, 0xfb, 0x3e, 0xbf, 0xd7, 0x8f, 0xde, 0x3f, 0xe8, 0x98, 0x27, 0x8c, 0xc6, 0x45, 0xa1,
- 0xa8, 0x96, 0x71, 0xae, 0x3e, 0x81, 0xa4, 0xe5, 0x80, 0xea, 0x2f, 0xa4, 0x90, 0x42, 0x0b, 0xbc,
- 0xcf, 0x13, 0x46, 0x8c, 0x4c, 0x56, 0x32, 0x29, 0x07, 0xdd, 0xfd, 0x54, 0xa4, 0xa2, 0x02, 0xa8,
- 0x79, 0xd5, 0x6c, 0xd7, 0x67, 0x42, 0x65, 0x42, 0xd1, 0x24, 0x56, 0x40, 0xcb, 0x41, 0x02, 0x3a,
- 0x1e, 0x50, 0x26, 0x78, 0x6e, 0xf5, 0xc0, 0x58, 0x31, 0x21, 0x81, 0xb2, 0x09, 0x87, 0x5c, 0x1b,
- 0xa3, 0xfa, 0x55, 0x03, 0xe1, 0xf7, 0x06, 0xea, 0x9c, 0xab, 0xf4, 0xc2, 0x3a, 0xe1, 0x97, 0xa8,
- 0xa3, 0xc4, 0x54, 0x32, 0x18, 0x15, 0x42, 0x6a, 0xcf, 0xed, 0xb9, 0xfd, 0x7b, 0xc3, 0xc3, 0xe5,
- 0x2c, 0xc0, 0x57, 0x71, 0x36, 0x39, 0x0d, 0x37, 0xc4, 0x30, 0x42, 0x75, 0xf4, 0x4e, 0x48, 0x8d,
- 0x5f, 0xa1, 0x3d, 0xab, 0xb1, 0x71, 0x9c, 0xe7, 0x30, 0xf1, 0xee, 0x54, 0xb5, 0x47, 0xcb, 0x59,
- 0x70, 0xb0, 0x55, 0x6b, 0xf5, 0x30, 0xda, 0xad, 0x13, 0x67, 0x75, 0x8c, 0x5f, 0xa0, 0x1d, 0x2d,
- 0x3e, 0x43, 0xee, 0x35, 0x7a, 0x6e, 0xbf, 0x73, 0x72, 0x44, 0xea, 0xde, 0x88, 0xe9, 0x8d, 0xd8,
- 0xde, 0xc8, 0x99, 0xe0, 0xf9, 0xb0, 0x79, 0x33, 0x0b, 0x9c, 0xa8, 0xa6, 0xf1, 0x21, 0x6a, 0x29,
- 0xc8, 0x2f, 0x41, 0x7a, 0x4d, 0x63, 0x18, 0xd9, 0x08, 0x77, 0x51, 0x5b, 0x02, 0x03, 0x5e, 0x82,
- 0xf4, 0x76, 0x2a, 0x65, 0x1d, 0xe3, 0x8f, 0x68, 0x4f, 0xf3, 0x0c, 0xc4, 0x54, 0x8f, 0xc6, 0xc0,
- 0xd3, 0xb1, 0xf6, 0x5a, 0x95, 0x67, 0x97, 0x98, 0xd9, 0x9b, 0x79, 0x11, 0x3b, 0xa5, 0x72, 0x40,
- 0x5e, 0x57, 0xc4, 0xf0, 0xd8, 0x98, 0xfe, 0x69, 0x66, 0xbb, 0x3e, 0x8c, 0x76, 0x6d, 0xa2, 0xa6,
- 0xf1, 0x1b, 0xf4, 0x60, 0x45, 0x98, 0xaf, 0xd2, 0x71, 0x56, 0x78, 0x77, 0x7b, 0x6e, 0xbf, 0x39,
- 0x7c, 0xb4, 0x9c, 0x05, 0xde, 0xf6, 0x4f, 0xd6, 0x48, 0x18, 0xdd, 0xb7, 0xb9, 0x8b, 0x55, 0xea,
- 0xb4, 0xfd, 0xf5, 0x3a, 0x70, 0x7e, 0x5d, 0x07, 0x4e, 0x78, 0x80, 0x1e, 0x6e, 0xec, 0x2a, 0x02,
- 0x55, 0x88, 0x5c, 0xc1, 0xc9, 0x08, 0x35, 0xce, 0x55, 0x8a, 0xdf, 0xa3, 0xf6, 0x7a, 0x8d, 0x8f,
- 0xc9, 0xbf, 0x8e, 0x88, 0x6c, 0x54, 0x77, 0x9f, 0xfc, 0x17, 0x59, 0x19, 0x0c, 0xdf, 0xde, 0xcc,
- 0x7d, 0xf7, 0x76, 0xee, 0xbb, 0x3f, 0xe7, 0xbe, 0xfb, 0x6d, 0xe1, 0x3b, 0xb7, 0x0b, 0xdf, 0xf9,
- 0xb1, 0xf0, 0x9d, 0x0f, 0xcf, 0x53, 0xae, 0xc7, 0xd3, 0x84, 0x30, 0x91, 0x51, 0x7b, 0x8a, 0x3c,
- 0x61, 0x4f, 0x53, 0x41, 0x33, 0x71, 0x39, 0x9d, 0x80, 0xfa, 0xeb, 0xce, 0xf5, 0x55, 0x01, 0x2a,
- 0x69, 0x55, 0xb7, 0xf7, 0xec, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x82, 0x6b, 0x1c, 0x09,
- 0x03, 0x00, 0x00,
+ proto.RegisterType((*MsgTransfer)(nil), "ibc.applications.transfer.v1.MsgTransfer")
+ proto.RegisterType((*MsgTransferResponse)(nil), "ibc.applications.transfer.v1.MsgTransferResponse")
+}
+
+func init() {
+ proto.RegisterFile("ibc/applications/transfer/v1/tx.proto", fileDescriptor_7401ed9bed2f8e09)
+}
+
+var fileDescriptor_7401ed9bed2f8e09 = []byte{
+ // 491 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6f, 0xd3, 0x40,
+ 0x14, 0xc7, 0x6d, 0x92, 0x86, 0x70, 0x51, 0x2b, 0x30, 0xb4, 0x72, 0xa3, 0x62, 0x47, 0x96, 0x90,
+ 0xc2, 0xc0, 0x9d, 0x5c, 0x40, 0x48, 0x9d, 0x50, 0xba, 0xc0, 0x50, 0x84, 0xac, 0x4e, 0x2c, 0xc5,
+ 0xbe, 0x1e, 0xce, 0x89, 0xf8, 0x9e, 0x75, 0x77, 0x89, 0xe8, 0x37, 0x60, 0xe4, 0x23, 0x74, 0xe6,
+ 0x93, 0x74, 0xec, 0xc8, 0x14, 0xa1, 0x64, 0x61, 0xce, 0x27, 0x40, 0x67, 0x5f, 0x82, 0xb3, 0xa0,
+ 0x4e, 0xbe, 0xf7, 0xfe, 0xbf, 0x77, 0x7f, 0xbd, 0x7b, 0xcf, 0xe8, 0x19, 0xcf, 0x28, 0x49, 0xcb,
+ 0x72, 0xc2, 0x69, 0xaa, 0x39, 0x08, 0x45, 0xb4, 0x4c, 0x85, 0xfa, 0xc2, 0x24, 0x99, 0xc5, 0x44,
+ 0x7f, 0xc3, 0xa5, 0x04, 0x0d, 0xde, 0x11, 0xcf, 0x28, 0x6e, 0x62, 0x78, 0x8d, 0xe1, 0x59, 0xdc,
+ 0x7f, 0x92, 0x43, 0x0e, 0x15, 0x48, 0xcc, 0xa9, 0xae, 0xe9, 0x07, 0x14, 0x54, 0x01, 0x8a, 0x64,
+ 0xa9, 0x62, 0x64, 0x16, 0x67, 0x4c, 0xa7, 0x31, 0xa1, 0xc0, 0x85, 0xd5, 0x43, 0x63, 0x4d, 0x41,
+ 0x32, 0x42, 0x27, 0x9c, 0x09, 0x6d, 0x0c, 0xeb, 0x53, 0x0d, 0x44, 0x3f, 0x5b, 0xa8, 0x77, 0xa6,
+ 0xf2, 0x73, 0xeb, 0xe4, 0xbd, 0x41, 0x3d, 0x05, 0x53, 0x49, 0xd9, 0x45, 0x09, 0x52, 0xfb, 0xee,
+ 0xc0, 0x1d, 0x3e, 0x18, 0x1d, 0xac, 0xe6, 0xa1, 0x77, 0x95, 0x16, 0x93, 0x93, 0xa8, 0x21, 0x46,
+ 0x09, 0xaa, 0xa3, 0x8f, 0x20, 0xb5, 0xf7, 0x16, 0xed, 0x59, 0x8d, 0x8e, 0x53, 0x21, 0xd8, 0xc4,
+ 0xbf, 0x57, 0xd5, 0x1e, 0xae, 0xe6, 0xe1, 0xfe, 0x56, 0xad, 0xd5, 0xa3, 0x64, 0xb7, 0x4e, 0x9c,
+ 0xd6, 0xb1, 0xf7, 0x1a, 0xed, 0x68, 0xf8, 0xca, 0x84, 0xdf, 0x1a, 0xb8, 0xc3, 0xde, 0xf1, 0x21,
+ 0xae, 0x7b, 0xc3, 0xa6, 0x37, 0x6c, 0x7b, 0xc3, 0xa7, 0xc0, 0xc5, 0xa8, 0x7d, 0x33, 0x0f, 0x9d,
+ 0xa4, 0xa6, 0xbd, 0x03, 0xd4, 0x51, 0x4c, 0x5c, 0x32, 0xe9, 0xb7, 0x8d, 0x61, 0x62, 0x23, 0xaf,
+ 0x8f, 0xba, 0x92, 0x51, 0xc6, 0x67, 0x4c, 0xfa, 0x3b, 0x95, 0xb2, 0x89, 0xbd, 0xcf, 0x68, 0x4f,
+ 0xf3, 0x82, 0xc1, 0x54, 0x5f, 0x8c, 0x19, 0xcf, 0xc7, 0xda, 0xef, 0x54, 0x9e, 0x7d, 0x6c, 0x66,
+ 0x60, 0xde, 0x0b, 0xdb, 0x57, 0x9a, 0xc5, 0xf8, 0x5d, 0x45, 0x8c, 0x9e, 0x1a, 0xd3, 0x7f, 0xcd,
+ 0x6c, 0xd7, 0x47, 0xc9, 0xae, 0x4d, 0xd4, 0xb4, 0xf7, 0x1e, 0x3d, 0x5a, 0x13, 0xe6, 0xab, 0x74,
+ 0x5a, 0x94, 0xfe, 0xfd, 0x81, 0x3b, 0x6c, 0x8f, 0x8e, 0x56, 0xf3, 0xd0, 0xdf, 0xbe, 0x64, 0x83,
+ 0x44, 0xc9, 0x43, 0x9b, 0x3b, 0x5f, 0xa7, 0x4e, 0xba, 0xdf, 0xaf, 0x43, 0xe7, 0xcf, 0x75, 0xe8,
+ 0x44, 0xfb, 0xe8, 0x71, 0x63, 0x56, 0x09, 0x53, 0x25, 0x08, 0xc5, 0x8e, 0x01, 0xb5, 0xce, 0x54,
+ 0xee, 0x8d, 0x51, 0x77, 0x33, 0xc6, 0xe7, 0xf8, 0x7f, 0xcb, 0x84, 0x1b, 0xb7, 0xf4, 0xe3, 0x3b,
+ 0xa3, 0x6b, 0xc3, 0xd1, 0x87, 0x9b, 0x45, 0xe0, 0xde, 0x2e, 0x02, 0xf7, 0xf7, 0x22, 0x70, 0x7f,
+ 0x2c, 0x03, 0xe7, 0x76, 0x19, 0x38, 0xbf, 0x96, 0x81, 0xf3, 0xe9, 0x55, 0xce, 0xf5, 0x78, 0x9a,
+ 0x61, 0x0a, 0x05, 0xb1, 0xab, 0xc9, 0x33, 0xfa, 0x22, 0x07, 0x52, 0xc0, 0xe5, 0x74, 0xc2, 0x94,
+ 0xf9, 0x0f, 0x1a, 0xfb, 0xaf, 0xaf, 0x4a, 0xa6, 0xb2, 0x4e, 0xb5, 0x8b, 0x2f, 0xff, 0x06, 0x00,
+ 0x00, 0xff, 0xff, 0xc1, 0x18, 0x20, 0x80, 0x29, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -190,7 +192,7 @@ func NewMsgClient(cc grpc1.ClientConn) MsgClient {
func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) {
out := new(MsgTransferResponse)
- err := c.cc.Invoke(ctx, "/ibc.apps.transfer.v1.Msg/Transfer", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Msg/Transfer", in, out, opts...)
if err != nil {
return nil, err
}
@@ -225,7 +227,7 @@ func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interf
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/ibc.apps.transfer.v1.Msg/Transfer",
+ FullMethod: "/ibc.applications.transfer.v1.Msg/Transfer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer))
@@ -234,7 +236,7 @@ func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interf
}
var _Msg_serviceDesc = grpc.ServiceDesc{
- ServiceName: "ibc.apps.transfer.v1.Msg",
+ ServiceName: "ibc.applications.transfer.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
@@ -243,7 +245,7 @@ var _Msg_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
- Metadata: "ibc/apps/transfer/v1/tx.proto",
+ Metadata: "ibc/applications/transfer/v1/tx.proto",
}
func (m *MsgTransfer) Marshal() (dAtA []byte, err error) {
diff --git a/proto/ibc/apps/transfer/v1/genesis.proto b/proto/ibc/applications/transfer/v1/genesis.proto
similarity index 85%
rename from proto/ibc/apps/transfer/v1/genesis.proto
rename to proto/ibc/applications/transfer/v1/genesis.proto
index 84798eb4..9c6b78ac 100644
--- a/proto/ibc/apps/transfer/v1/genesis.proto
+++ b/proto/ibc/applications/transfer/v1/genesis.proto
@@ -1,10 +1,10 @@
syntax = "proto3";
-package ibc.apps.transfer.v1;
+package ibc.applications.transfer.v1;
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
-import "ibc/apps/transfer/v1/transfer.proto";
+import "ibc/applications/transfer/v1/transfer.proto";
import "gogoproto/gogo.proto";
// GenesisState defines the ibc-transfer genesis state
diff --git a/proto/ibc/apps/transfer/v1/query.proto b/proto/ibc/applications/transfer/v1/query.proto
similarity index 96%
rename from proto/ibc/apps/transfer/v1/query.proto
rename to proto/ibc/applications/transfer/v1/query.proto
index 09e5b458..cd428413 100644
--- a/proto/ibc/apps/transfer/v1/query.proto
+++ b/proto/ibc/applications/transfer/v1/query.proto
@@ -1,10 +1,10 @@
syntax = "proto3";
-package ibc.apps.transfer.v1;
+package ibc.applications.transfer.v1;
import "gogoproto/gogo.proto";
import "cosmos/base/query/v1beta1/pagination.proto";
-import "ibc/apps/transfer/v1/transfer.proto";
+import "ibc/applications/transfer/v1/transfer.proto";
import "google/api/annotations.proto";
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
diff --git a/proto/ibc/apps/transfer/v1/transfer.proto b/proto/ibc/applications/transfer/v1/transfer.proto
similarity index 97%
rename from proto/ibc/apps/transfer/v1/transfer.proto
rename to proto/ibc/applications/transfer/v1/transfer.proto
index 0aa0224a..b1c41f6a 100644
--- a/proto/ibc/apps/transfer/v1/transfer.proto
+++ b/proto/ibc/applications/transfer/v1/transfer.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibc.apps.transfer.v1;
+package ibc.applications.transfer.v1;
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
diff --git a/proto/ibc/apps/transfer/v1/tx.proto b/proto/ibc/applications/transfer/v1/tx.proto
similarity index 97%
rename from proto/ibc/apps/transfer/v1/tx.proto
rename to proto/ibc/applications/transfer/v1/tx.proto
index b7464310..eb56b470 100644
--- a/proto/ibc/apps/transfer/v1/tx.proto
+++ b/proto/ibc/applications/transfer/v1/tx.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibc.apps.transfer.v1;
+package ibc.applications.transfer.v1;
option go_package = "github.com/cosmos/ibc-go/modules/apps/transfer/types";
From 2d3132f29c3a580272758dcb5c64055dfd0f29f5 Mon Sep 17 00:00:00 2001
From: Aditya
Date: Mon, 10 May 2021 07:11:02 -0400
Subject: [PATCH 050/393] Time Monotonicity Enforcement (#141)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* implement update client fix and start changing tests
* fix bug and write identical case test
* write misbehaviour detection tests
* add misbehaviour events to UpdateClient
* fix client keeper and write tests
* add Freeze to ClientState interface
* add cache context and fix events
* Update modules/light-clients/07-tendermint/types/update.go
Co-authored-by: Zarko Milosevic
* address colin comments
* freeze entire client on misbehaviour
* add time misbehaviour and tests
* enforce trusted height less than current height in header.ValidateBasic
* cleanup and tests
* fix print statement
* enforce monotonicity in update
* add docs and remove unnecessary interface function
* first round of review comments
* CHANGELOG
* update updateclient test
* bump tendermint to 0.34.10
* remove caching and specific frozen height
* document in go code
* DRY FrozenHeight
* fix build
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
Co-authored-by: Zarko Milosevic
---
CHANGELOG.md | 4 +
modules/core/02-client/keeper/client.go | 92 +++++---
modules/core/02-client/keeper/client_test.go | 204 ++++++++++++++----
.../07-tendermint/types/header.go | 7 +-
.../07-tendermint/types/header_test.go | 4 +-
.../07-tendermint/types/misbehaviour.go | 16 +-
.../types/misbehaviour_handle.go | 31 ++-
.../types/misbehaviour_handle_test.go | 130 +++++++----
.../07-tendermint/types/misbehaviour_test.go | 26 +--
.../07-tendermint/types/store.go | 17 +-
.../07-tendermint/types/tendermint_test.go | 15 +-
.../07-tendermint/types/update.go | 49 ++++-
.../07-tendermint/types/update_test.go | 147 ++++++++++---
13 files changed, 564 insertions(+), 178 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 458dc36a..4bd8c426 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -52,6 +52,8 @@ Ref: https://keepachangelog.com/en/1.0.0/
### State Machine Breaking
* (modules/light-clients/07-tendermint) [\#99](https://github.com/cosmos/ibc-go/pull/99) Enforce maximum chain-id length for tendermint client.
+* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Allow a new form of misbehaviour that proves counterparty chain breaks time monotonicity, automatically enforce monotonicity in UpdateClient and freeze client if monotonicity is broken.
+* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Freeze the client if there's a conflicting header submitted for an existing consensus state.
* (modules/core/02-client) [\#8405](https://github.com/cosmos/cosmos-sdk/pull/8405) Refactor IBC client update governance proposals to use a substitute client to update a frozen or expired client.
* (modules/core/02-client) [\#8673](https://github.com/cosmos/cosmos-sdk/pull/8673) IBC upgrade logic moved to 02-client and an IBC UpgradeProposal is added.
@@ -60,6 +62,8 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
* (modules/core/04-channel) [\#144](https://github.com/cosmos/ibc-go/pull/144) Introduced a `packet_data_hex` attribute to emit the hex-encoded packet data in events. This allows for raw binary (proto-encoded message) to be sent over events and decoded correctly on relayer. Original `packet_data` is DEPRECATED. All relayers and IBC event consumers are encouraged to switch to `packet_data_hex` as soon as possible.
* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
+* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Return early in case there's a duplicate update call to save Gas.
+
## IBC in the Cosmos SDK Repository
diff --git a/modules/core/02-client/keeper/client.go b/modules/core/02-client/keeper/client.go
index c5416b9e..ddb9aab9 100644
--- a/modules/core/02-client/keeper/client.go
+++ b/modules/core/02-client/keeper/client.go
@@ -67,58 +67,86 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
return sdkerrors.Wrapf(types.ErrClientNotActive, "cannot update client (%s) with status %s", clientID, status)
}
- clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(ctx, k.cdc, clientStore, header)
+ eventType := types.EventTypeUpdateClient
+
+ // Any writes made in CheckHeaderAndUpdateState are persisted on both valid updates and misbehaviour updates.
+ // Light client implementations are responsible for writing the correct metadata (if any) in either case.
+ newClientState, newConsensusState, err := clientState.CheckHeaderAndUpdateState(ctx, k.cdc, clientStore, header)
if err != nil {
return sdkerrors.Wrapf(err, "cannot update client with ID %s", clientID)
}
- k.SetClientState(ctx, clientID, clientState)
-
- var consensusHeight exported.Height
-
- // we don't set consensus state for localhost client
- if header != nil && clientID != exported.Localhost {
- k.SetClientConsensusState(ctx, clientID, header.GetHeight(), consensusState)
- consensusHeight = header.GetHeight()
- } else {
- consensusHeight = types.GetSelfHeight(ctx)
- }
-
- k.Logger(ctx).Info("client state updated", "client-id", clientID, "height", consensusHeight.String())
-
- defer func() {
- telemetry.IncrCounterWithLabels(
- []string{"ibc", "client", "update"},
- 1,
- []metrics.Label{
- telemetry.NewLabel("client-type", clientState.ClientType()),
- telemetry.NewLabel("client-id", clientID),
- telemetry.NewLabel("update-type", "msg"),
- },
- )
- }()
-
// emit the full header in events
- var headerStr string
+ var (
+ headerStr string
+ consensusHeight exported.Height
+ )
if header != nil {
// Marshal the Header as an Any and encode the resulting bytes to hex.
// This prevents the event value from containing invalid UTF-8 characters
// which may cause data to be lost when JSON encoding/decoding.
headerStr = hex.EncodeToString(types.MustMarshalHeader(k.cdc, header))
+ // set default consensus height with header height
+ consensusHeight = header.GetHeight()
}
+ // set new client state regardless of if update is valid update or misbehaviour
+ k.SetClientState(ctx, clientID, newClientState)
+ // If client state is not frozen after clientState CheckHeaderAndUpdateState,
+ // then update was valid. Write the update state changes, and set new consensus state.
+ // Else the update was proof of misbehaviour and we must emit appropriate misbehaviour events.
+ if status := newClientState.Status(ctx, clientStore, k.cdc); status != exported.Frozen {
+ // if update is not misbehaviour then update the consensus state
+ // we don't set consensus state for localhost client
+ if header != nil && clientID != exported.Localhost {
+ k.SetClientConsensusState(ctx, clientID, header.GetHeight(), newConsensusState)
+ } else {
+ consensusHeight = types.GetSelfHeight(ctx)
+ }
+
+ k.Logger(ctx).Info("client state updated", "client-id", clientID, "height", consensusHeight.String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "update"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("client-type", clientState.ClientType()),
+ telemetry.NewLabel("client-id", clientID),
+ telemetry.NewLabel("update-type", "msg"),
+ },
+ )
+ }()
+ } else {
+ // set eventType to SubmitMisbehaviour
+ eventType = types.EventTypeSubmitMisbehaviour
+
+ k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", clientID, "height", header.GetHeight().String())
+
+ defer func() {
+ telemetry.IncrCounterWithLabels(
+ []string{"ibc", "client", "misbehaviour"},
+ 1,
+ []metrics.Label{
+ telemetry.NewLabel("client-type", clientState.ClientType()),
+ telemetry.NewLabel("client-id", clientID),
+ telemetry.NewLabel("msg-type", "update"),
+ },
+ )
+ }()
+ }
+
// emitting events in the keeper emits for both begin block and handler client updates
ctx.EventManager().EmitEvent(
sdk.NewEvent(
- types.EventTypeUpdateClient,
+ eventType,
sdk.NewAttribute(types.AttributeKeyClientID, clientID),
sdk.NewAttribute(types.AttributeKeyClientType, clientState.ClientType()),
sdk.NewAttribute(types.AttributeKeyConsensusHeight, consensusHeight.String()),
sdk.NewAttribute(types.AttributeKeyHeader, headerStr),
),
)
-
return nil
}
@@ -186,6 +214,10 @@ func (k Keeper) CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour ex
return sdkerrors.Wrapf(types.ErrClientNotActive, "cannot process misbehaviour for client (%s) with status %s", misbehaviour.GetClientID(), status)
}
+ if err := misbehaviour.ValidateBasic(); err != nil {
+ return err
+ }
+
clientState, err := clientState.CheckMisbehaviourAndUpdateState(ctx, k.cdc, clientStore, misbehaviour)
if err != nil {
return err
diff --git a/modules/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go
index 1e1f393d..96aed366 100644
--- a/modules/core/02-client/keeper/client_test.go
+++ b/modules/core/02-client/keeper/client_test.go
@@ -5,9 +5,9 @@ import (
"fmt"
"time"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
tmtypes "github.com/tendermint/tendermint/types"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
@@ -53,7 +53,6 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
suite.Require().NoError(err)
return header
}
-
createPastUpdateFn := func(fillHeight, trustedHeight clienttypes.Height) *ibctmtypes.Header {
consState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, trustedHeight)
suite.Require().True(found)
@@ -61,10 +60,12 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
return suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(fillHeight.RevisionHeight), trustedHeight, consState.(*ibctmtypes.ConsensusState).Timestamp.Add(time.Second*5),
suite.chainB.Vals, suite.chainB.Vals, suite.chainB.Signers)
}
+
cases := []struct {
- name string
- malleate func()
- expPass bool
+ name string
+ malleate func()
+ expPass bool
+ expFreeze bool
}{
{"valid update", func() {
clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
@@ -74,7 +75,7 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
path.EndpointA.UpdateClient()
updateHeader = createFutureUpdateFn(trustHeight)
- }, true},
+ }, true, false},
{"valid past update", func() {
clientState := path.EndpointA.GetClientState()
trustedHeight := clientState.GetLatestHeight().(types.Height)
@@ -95,12 +96,89 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
// updateHeader will fill in consensus state between prevConsState and suite.consState
// clientState should not be updated
updateHeader = createPastUpdateFn(fillHeight, trustedHeight)
- }, true},
+ }, true, false},
+ {"valid duplicate update", func() {
+ clientID := path.EndpointA.ClientID
+
+ height1 := types.NewHeight(0, 1)
+
+ // store previous consensus state
+ prevConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past,
+ NextValidatorsHash: suite.chainB.Vals.Hash(),
+ }
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, height1, prevConsState)
+
+ height5 := types.NewHeight(0, 5)
+ // store next consensus state to check that trustedHeight does not need to be hightest consensus state before header height
+ nextConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past.Add(time.Minute),
+ NextValidatorsHash: suite.chainB.Vals.Hash(),
+ }
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, height5, nextConsState)
+
+ height3 := types.NewHeight(0, 3)
+ // updateHeader will fill in consensus state between prevConsState and suite.consState
+ // clientState should not be updated
+ updateHeader = createPastUpdateFn(height3, height1)
+ // set updateHeader's consensus state in store to create duplicate UpdateClient scenario
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, updateHeader.GetHeight(), updateHeader.ConsensusState())
+ }, true, false},
+ {"misbehaviour detection: conflicting header", func() {
+ clientID := path.EndpointA.ClientID
+
+ height1 := types.NewHeight(0, 1)
+ // store previous consensus state
+ prevConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past,
+ NextValidatorsHash: suite.chainB.Vals.Hash(),
+ }
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, height1, prevConsState)
+
+ height5 := types.NewHeight(0, 5)
+ // store next consensus state to check that trustedHeight does not need to be hightest consensus state before header height
+ nextConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.past.Add(time.Minute),
+ NextValidatorsHash: suite.chainB.Vals.Hash(),
+ }
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, height5, nextConsState)
+
+ height3 := types.NewHeight(0, 3)
+ // updateHeader will fill in consensus state between prevConsState and suite.consState
+ // clientState should not be updated
+ updateHeader = createPastUpdateFn(height3, height1)
+ // set conflicting consensus state in store to create misbehaviour scenario
+ conflictConsState := updateHeader.ConsensusState()
+ conflictConsState.Root = commitmenttypes.NewMerkleRoot([]byte("conflicting apphash"))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, updateHeader.GetHeight(), conflictConsState)
+ }, true, true},
+ {"misbehaviour detection: monotonic time violation", func() {
+ clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
+ clientID := path.EndpointA.ClientID
+ trustedHeight := clientState.GetLatestHeight().(types.Height)
+
+ // store intermediate consensus state at a time greater than updateHeader time
+ // this will break time monotonicity
+ incrementedClientHeight := clientState.GetLatestHeight().Increment().(types.Height)
+ intermediateConsState := &ibctmtypes.ConsensusState{
+ Timestamp: suite.coordinator.CurrentTime.Add(2 * time.Hour),
+ NextValidatorsHash: suite.chainB.Vals.Hash(),
+ }
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, incrementedClientHeight, intermediateConsState)
+ // set iteration key
+ clientStore := suite.keeper.ClientStore(suite.ctx, clientID)
+ ibctmtypes.SetIterationKey(clientStore, incrementedClientHeight)
+
+ clientState.LatestHeight = incrementedClientHeight
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState)
+
+ updateHeader = createFutureUpdateFn(trustedHeight)
+ }, true, true},
{"client state not found", func() {
updateHeader = createFutureUpdateFn(path.EndpointA.GetClientState().GetLatestHeight().(types.Height))
path.EndpointA.ClientID = ibctesting.InvalidID
- }, false},
+ }, false, false},
{"consensus state not found", func() {
clientState := path.EndpointA.GetClientState()
tmClient, ok := clientState.(*ibctmtypes.ClientState)
@@ -109,17 +187,17 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientState)
updateHeader = createFutureUpdateFn(clientState.GetLatestHeight().(types.Height))
- }, false},
+ }, false, false},
{"client is not active", func() {
clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
clientState.FrozenHeight = types.NewHeight(0, 1)
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientState)
updateHeader = createFutureUpdateFn(clientState.GetLatestHeight().(types.Height))
- }, false},
+ }, false, false},
{"invalid header", func() {
updateHeader = createFutureUpdateFn(path.EndpointA.GetClientState().GetLatestHeight().(types.Height))
updateHeader.TrustedHeight = updateHeader.TrustedHeight.Increment().(types.Height)
- }, false},
+ }, false, false},
}
for _, tc := range cases {
@@ -141,28 +219,32 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() {
if tc.expPass {
suite.Require().NoError(err, err)
- expConsensusState := &ibctmtypes.ConsensusState{
- Timestamp: updateHeader.GetTime(),
- Root: commitmenttypes.NewMerkleRoot(updateHeader.Header.GetAppHash()),
- NextValidatorsHash: updateHeader.Header.NextValidatorsHash,
- }
-
newClientState := path.EndpointA.GetClientState()
- consensusState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, updateHeader.GetHeight())
- suite.Require().True(found)
-
- // Determine if clientState should be updated or not
- if updateHeader.GetHeight().GT(clientState.GetLatestHeight()) {
- // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
- suite.Require().Equal(updateHeader.GetHeight(), newClientState.GetLatestHeight(), "clientstate height did not update")
+ if tc.expFreeze {
+ suite.Require().True(!newClientState.(*ibctmtypes.ClientState).FrozenHeight.IsZero(), "client did not freeze after conflicting header was submitted to UpdateClient")
} else {
- // Update will add past consensus state, clientState should not be updated at all
- suite.Require().Equal(clientState.GetLatestHeight(), newClientState.GetLatestHeight(), "client state height updated for past header")
+ expConsensusState := &ibctmtypes.ConsensusState{
+ Timestamp: updateHeader.GetTime(),
+ Root: commitmenttypes.NewMerkleRoot(updateHeader.Header.GetAppHash()),
+ NextValidatorsHash: updateHeader.Header.NextValidatorsHash,
+ }
+
+ consensusState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, updateHeader.GetHeight())
+ suite.Require().True(found)
+
+ // Determine if clientState should be updated or not
+ if updateHeader.GetHeight().GT(clientState.GetLatestHeight()) {
+ // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
+ suite.Require().Equal(updateHeader.GetHeight(), newClientState.GetLatestHeight(), "clientstate height did not update")
+ } else {
+ // Update will add past consensus state, clientState should not be updated at all
+ suite.Require().Equal(clientState.GetLatestHeight(), newClientState.GetLatestHeight(), "client state height updated for past header")
+ }
+
+ suite.Require().NoError(err)
+ suite.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name)
}
-
- suite.Require().NoError(err)
- suite.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name)
} else {
suite.Require().Error(err)
}
@@ -381,8 +463,24 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
{
"trusting period misbehavior should pass",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = bothValsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ return err
+ },
+ true,
+ },
+ {
+ "time misbehavior should pass",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+5), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
ClientId: clientID,
},
func() error {
@@ -397,8 +495,8 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
{
"misbehavior at later height should pass",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), testClientHeight, altTime, bothValSet, valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners),
ClientId: clientID,
},
func() error {
@@ -409,7 +507,7 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
// store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height
intermediateConsState := &ibctmtypes.ConsensusState{
Timestamp: suite.now.Add(time.Minute),
- NextValidatorsHash: suite.valSetHash,
+ NextValidatorsHash: suite.chainB.Vals.Hash(),
}
suite.keeper.SetClientConsensusState(suite.ctx, clientID, heightPlus3, intermediateConsState)
@@ -423,8 +521,8 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
{
"misbehavior at later height with different trusted heights should pass",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), testClientHeight, altTime, bothValSet, valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
ClientId: clientID,
},
func() error {
@@ -446,11 +544,27 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
},
true,
},
+ {
+ "misbehavior ValidateBasic fails: misbehaviour height is at same height as trusted height",
+ &ibctmtypes.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ ClientId: clientID,
+ },
+ func() error {
+ suite.consensusState.NextValidatorsHash = bothValsHash
+ clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState)
+
+ return err
+ },
+ false,
+ },
{
"trusted ConsensusState1 not found",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, altTime, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), heightPlus3, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners),
ClientId: clientID,
},
func() error {
@@ -465,8 +579,8 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
{
"trusted ConsensusState2 not found",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), testClientHeight, altTime, bothValSet, valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight+1), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
ClientId: clientID,
},
func() error {
@@ -487,8 +601,8 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
{
"client already is not active - client is frozen",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners),
ClientId: clientID,
},
func() error {
@@ -506,8 +620,8 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
{
"misbehaviour check failed",
&ibctmtypes.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), altValSet, bothValSet, altSigners),
+ Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, altTime, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight+1), testClientHeight, suite.ctx.BlockTime(), altValSet, bothValSet, altSigners),
ClientId: clientID,
},
func() error {
@@ -544,8 +658,6 @@ func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() {
clientState, found := suite.keeper.GetClientState(suite.ctx, clientID)
suite.Require().True(found, "valid test case %d failed: %s", i, tc.name)
suite.Require().True(!clientState.(*ibctmtypes.ClientState).FrozenHeight.IsZero(), "valid test case %d failed: %s", i, tc.name)
- suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(),
- "valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight())
} else {
suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
}
diff --git a/modules/light-clients/07-tendermint/types/header.go b/modules/light-clients/07-tendermint/types/header.go
index 9bd59708..d3662bb7 100644
--- a/modules/light-clients/07-tendermint/types/header.go
+++ b/modules/light-clients/07-tendermint/types/header.go
@@ -62,10 +62,9 @@ func (h Header) ValidateBasic() error {
return sdkerrors.Wrap(err, "header failed basic validation")
}
- // TrustedHeight is less than Header for updates
- // and less than or equal to Header for misbehaviour
- if h.TrustedHeight.GT(h.GetHeight()) {
- return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "TrustedHeight %d must be less than or equal to header height %d",
+ // TrustedHeight is less than Header for updates and misbehaviour
+ if h.TrustedHeight.GTE(h.GetHeight()) {
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "TrustedHeight %d must be less than header height %d",
h.TrustedHeight, h.GetHeight())
}
diff --git a/modules/light-clients/07-tendermint/types/header_test.go b/modules/light-clients/07-tendermint/types/header_test.go
index 487b2794..37a7082d 100644
--- a/modules/light-clients/07-tendermint/types/header_test.go
+++ b/modules/light-clients/07-tendermint/types/header_test.go
@@ -43,8 +43,8 @@ func (suite *TendermintTestSuite) TestHeaderValidateBasic() {
header = suite.chainA.LastHeader
header.SignedHeader.Commit = nil
}, false},
- {"trusted height is greater than header height", func() {
- header.TrustedHeight = header.GetHeight().(clienttypes.Height).Increment().(clienttypes.Height)
+ {"trusted height is equal to header height", func() {
+ header.TrustedHeight = header.GetHeight().(clienttypes.Height)
}, false},
{"validator set nil", func() {
header.ValidatorSet = nil
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour.go b/modules/light-clients/07-tendermint/types/misbehaviour.go
index 51e59612..9a5ef74c 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour.go
@@ -1,7 +1,6 @@
package types
import (
- "bytes"
"time"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
@@ -15,6 +14,9 @@ import (
var _ exported.Misbehaviour = &Misbehaviour{}
+// Use the same FrozenHeight for all misbehaviour
+var FrozenHeight = clienttypes.NewHeight(0, 1)
+
// NewMisbehaviour creates a new Misbehaviour instance.
func NewMisbehaviour(clientID string, header1, header2 *Header) *Misbehaviour {
return &Misbehaviour{
@@ -35,8 +37,6 @@ func (misbehaviour Misbehaviour) GetClientID() string {
}
// GetHeight returns the height at which misbehaviour occurred
-//
-// NOTE: assumes that misbehaviour headers have the same height
func (misbehaviour Misbehaviour) GetHeight() exported.Height {
return misbehaviour.Header1.GetHeight()
}
@@ -93,9 +93,9 @@ func (misbehaviour Misbehaviour) ValidateBasic() error {
sdkerrors.Wrap(err, "header 2 failed validation").Error(),
)
}
- // Ensure that Heights are the same
- if misbehaviour.Header1.GetHeight() != misbehaviour.Header2.GetHeight() {
- return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "headers in misbehaviour are on different heights (%d ≠ %d)", misbehaviour.Header1.GetHeight(), misbehaviour.Header2.GetHeight())
+ // Ensure that Height1 is greater than or equal to Height2
+ if misbehaviour.Header1.GetHeight().LT(misbehaviour.Header2.GetHeight()) {
+ return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "Header1 height is less than Header2 height (%s < %s)", misbehaviour.Header1.GetHeight(), misbehaviour.Header2.GetHeight())
}
blockID1, err := tmtypes.BlockIDFromProto(&misbehaviour.Header1.SignedHeader.Commit.BlockID)
@@ -107,10 +107,6 @@ func (misbehaviour Misbehaviour) ValidateBasic() error {
return sdkerrors.Wrap(err, "invalid block ID from header 2 in misbehaviour")
}
- // Ensure that Commit Hashes are different
- if bytes.Equal(blockID1.Hash, blockID2.Hash) {
- return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers block hashes are equal")
- }
if err := validCommit(misbehaviour.Header1.Header.ChainID, *blockID1,
misbehaviour.Header1.Commit, misbehaviour.Header1.ValidatorSet); err != nil {
return err
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_handle.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
index fa00dad9..28697749 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle.go
@@ -1,6 +1,7 @@
package types
import (
+ "bytes"
"time"
tmtypes "github.com/tendermint/tendermint/types"
@@ -19,6 +20,7 @@ import (
// of misbehaviour.Header1
// Similarly, consensusState2 is the trusted consensus state that corresponds
// to misbehaviour.Header2
+// Misbehaviour sets frozen height to {0, 1} since it is only used as a boolean value (zero or non-zero).
func (cs ClientState) CheckMisbehaviourAndUpdateState(
ctx sdk.Context,
cdc codec.BinaryCodec,
@@ -32,6 +34,32 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
// The status of the client is checked in 02-client
+ // if heights are equal check that this is valid misbehaviour of a fork
+ // otherwise if heights are unequal check that this is valid misbehavior of BFT time violation
+ if tmMisbehaviour.Header1.GetHeight().EQ(tmMisbehaviour.Header2.GetHeight()) {
+ blockID1, err := tmtypes.BlockIDFromProto(&tmMisbehaviour.Header1.SignedHeader.Commit.BlockID)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "invalid block ID from header 1 in misbehaviour")
+ }
+ blockID2, err := tmtypes.BlockIDFromProto(&tmMisbehaviour.Header2.SignedHeader.Commit.BlockID)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "invalid block ID from header 2 in misbehaviour")
+ }
+
+ // Ensure that Commit Hashes are different
+ if bytes.Equal(blockID1.Hash, blockID2.Hash) {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers block hashes are equal")
+ }
+ } else {
+ // Header1 is at greater height than Header2, therefore Header1 time must be less than or equal to
+ // Header2 time in order to be valid misbehaviour (violation of monotonic time).
+ if tmMisbehaviour.Header1.SignedHeader.Header.Time.After(tmMisbehaviour.Header2.SignedHeader.Header.Time) {
+ return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers are not at same height and are monotonically increasing")
+ }
+ }
+
+ // Regardless of the type of misbehaviour, ensure that both headers are valid and would have been accepted by light-client
+
// Retrieve trusted consensus states for each Header in misbehaviour
// and unmarshal from clientStore
@@ -63,7 +91,8 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
return nil, sdkerrors.Wrap(err, "verifying Header2 in Misbehaviour failed")
}
- cs.FrozenHeight = tmMisbehaviour.GetHeight().(clienttypes.Height)
+ cs.FrozenHeight = FrozenHeight
+
return &cs, nil
}
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
index 1ce0b154..a257a4b8 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_handle_test.go
@@ -50,15 +50,45 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
expPass bool
}{
{
- "valid misbehavior misbehaviour",
+ "valid fork misbehaviour",
types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid time misbehaviour",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+3), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ true,
+ },
+ {
+ "valid time misbehaviour header 1 stricly less than header 2",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+3), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Hour), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -72,8 +102,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
heightMinus1,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -87,8 +117,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
heightMinus3,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -102,8 +132,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
heightMinus3,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight+1), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -147,13 +177,43 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, suite.valSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, suite.valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
ClientId: chainID,
},
suite.now,
true,
},
+ {
+ "invalid fork misbehaviour: identical headers",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
+ {
+ "invalid time misbehaviour: monotonically increasing time",
+ types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
+ height,
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+3), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ ClientId: chainID,
+ },
+ suite.now,
+ false,
+ },
{
"invalid misbehavior misbehaviour from different chain",
types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
@@ -162,8 +222,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -177,8 +237,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
heightMinus3,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -192,8 +252,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash),
heightMinus3,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus3, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -207,8 +267,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -222,8 +282,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -248,8 +308,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -263,8 +323,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now.Add(trustingPeriod),
@@ -278,8 +338,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, suite.valSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, suite.valSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -293,8 +353,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, altValSet, bothValSet, altSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, altValSet, bothValSet, altSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners),
ClientId: chainID,
},
suite.now,
@@ -308,8 +368,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, bothValSet, bothValSet, bothSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners),
ClientId: chainID,
},
suite.now,
@@ -323,8 +383,8 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash),
height,
&types.Misbehaviour{
- Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, altValSet, bothValSet, altSigners),
- Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners),
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now, altValSet, bothValSet, altSigners),
+ Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+1), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners),
ClientId: chainID,
},
suite.now,
@@ -352,7 +412,7 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
clientState, err := tc.clientState.CheckMisbehaviourAndUpdateState(
ctx,
- suite.cdc,
+ suite.chainA.App.AppCodec(),
suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID), // pass in clientID prefixed clientStore
tc.misbehaviour,
)
@@ -361,8 +421,6 @@ func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() {
suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
suite.Require().NotNil(clientState, "valid test case %d failed: %s", i, tc.name)
suite.Require().True(!clientState.(*types.ClientState).FrozenHeight.IsZero(), "valid test case %d failed: %s", i, tc.name)
- suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(),
- "valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight())
} else {
suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
suite.Require().Nil(clientState, "invalid test case %d passed: %s", i, tc.name)
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_test.go
index 1b67b729..30deec2c 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_test.go
@@ -60,7 +60,7 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() {
expPass bool
}{
{
- "valid misbehaviour",
+ "valid fork misbehaviour, two headers at same height have different time",
&types.Misbehaviour{
Header1: suite.header,
Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers),
@@ -69,6 +69,16 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() {
func(misbehaviour *types.Misbehaviour) error { return nil },
true,
},
+ {
+ "valid time misbehaviour, both headers at different heights are at same time",
+ &types.Misbehaviour{
+ Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+5), heightMinus1, suite.now, suite.valSet, suite.valSet, signers),
+ Header2: suite.header,
+ ClientId: clientID,
+ },
+ func(misbehaviour *types.Misbehaviour) error { return nil },
+ true,
+ },
{
"misbehaviour Header1 is nil",
types.NewMisbehaviour(clientID, nil, suite.header),
@@ -152,20 +162,10 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() {
false,
},
{
- "mismatched heights",
+ "header2 height is greater",
&types.Misbehaviour{
Header1: suite.header,
- Header2: suite.chainA.CreateTMClientHeader(chainID, 6, clienttypes.NewHeight(0, 4), suite.now, suite.valSet, suite.valSet, signers),
- ClientId: clientID,
- },
- func(misbehaviour *types.Misbehaviour) error { return nil },
- false,
- },
- {
- "same block id",
- &types.Misbehaviour{
- Header1: suite.header,
- Header2: suite.header,
+ Header2: suite.chainA.CreateTMClientHeader(chainID, 6, clienttypes.NewHeight(0, height.RevisionHeight+1), suite.now, suite.valSet, suite.valSet, signers),
ClientId: clientID,
},
func(misbehaviour *types.Misbehaviour) error { return nil },
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index 68793621..f261f4dc 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -1,6 +1,7 @@
package types
import (
+ "bytes"
"encoding/binary"
"strings"
@@ -170,6 +171,8 @@ func GetHeightFromIterationKey(iterKey []byte) exported.Height {
return clienttypes.NewHeight(revision, height)
}
+// IterateConsensusStateAscending iterates through the consensus states in ascending order. It calls the provided
+// callback on each height, until stop=true is returned.
func IterateConsensusStateAscending(clientStore sdk.KVStore, cb func(height exported.Height) (stop bool)) error {
iterator := sdk.KVStorePrefixIterator(clientStore, []byte(KeyIterateConsensusStatePrefix))
defer iterator.Close()
@@ -186,17 +189,25 @@ func IterateConsensusStateAscending(clientStore sdk.KVStore, cb func(height expo
// GetNextConsensusState returns the lowest consensus state that is larger than the given height.
// The Iterator returns a storetypes.Iterator which iterates from start (inclusive) to end (exclusive).
-// Thus, to get the next consensus state, we must first call iterator.Next() and then get the value.
+// If the starting height exists in store, we need to call iterator.Next() to get the next consenus state.
+// Otherwise, the iterator is already at the next consensus state so we can call iterator.Value() immediately.
func GetNextConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, height exported.Height) (*ConsensusState, bool) {
iterateStore := prefix.NewStore(clientStore, []byte(KeyIterateConsensusStatePrefix))
iterator := iterateStore.Iterator(bigEndianHeightBytes(height), nil)
defer iterator.Close()
- // ignore the consensus state at current height and get next height
- iterator.Next()
if !iterator.Valid() {
return nil, false
}
+ // if iterator is at current height, ignore the consensus state at current height and get next height
+ // if iterator value is not at current height, it is already at next height.
+ if bytes.Equal(iterator.Value(), host.ConsensusStateKey(height)) {
+ iterator.Next()
+ if !iterator.Valid() {
+ return nil, false
+ }
+ }
+
csKey := iterator.Value()
return getTmConsensusState(clientStore, cdc, csKey)
diff --git a/modules/light-clients/07-tendermint/types/tendermint_test.go b/modules/light-clients/07-tendermint/types/tendermint_test.go
index 7bb8977c..071f6f6c 100644
--- a/modules/light-clients/07-tendermint/types/tendermint_test.go
+++ b/modules/light-clients/07-tendermint/types/tendermint_test.go
@@ -10,12 +10,12 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
@@ -90,6 +90,19 @@ func (suite *TendermintTestSuite) SetupTest() {
suite.ctx = app.BaseApp.NewContext(checkTx, tmproto.Header{Height: 1, Time: suite.now})
}
+func getSuiteSigners(suite *TendermintTestSuite) []tmtypes.PrivValidator {
+ return []tmtypes.PrivValidator{suite.privVal}
+}
+
+func getBothSigners(suite *TendermintTestSuite, altVal *tmtypes.Validator, altPrivVal tmtypes.PrivValidator) (*tmtypes.ValidatorSet, []tmtypes.PrivValidator) {
+ // Create bothValSet with both suite validator and altVal. Would be valid update
+ bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
+ // Create signer array and ensure it is in same order as bothValSet
+ _, suiteVal := suite.valSet.GetByIndex(0)
+ bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal)
+ return bothValSet, bothSigners
+}
+
func TestTendermintTestSuite(t *testing.T) {
suite.Run(t, new(TendermintTestSuite))
}
diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go
index bfabb324..e1104e8d 100644
--- a/modules/light-clients/07-tendermint/types/update.go
+++ b/modules/light-clients/07-tendermint/types/update.go
@@ -2,6 +2,7 @@ package types
import (
"bytes"
+ "reflect"
"time"
"github.com/tendermint/tendermint/light"
@@ -38,6 +39,12 @@ import (
// Tendermint client validity checking uses the bisection algorithm described
// in the [Tendermint spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md).
//
+// Misbehaviour Detection:
+// UpdateClient will detect implicit misbehaviour by enforcing certain invariants on any new update call and will return a frozen client.
+// 1. Any valid update that creates a different consensus state for an already existing height is evidence of misbehaviour and will freeze client.
+// 2. Any valid update that breaks time monotonicity with respect to its neighboring consensus states is evidence of misbehaviour and will freeze client.
+// Misbehaviour sets frozen height to {0, 1} since it is only used as a boolean value (zero or non-zero).
+//
// Pruning:
// UpdateClient will additionally retrieve the earliest consensus state for this clientID and check if it is expired. If it is,
// that consensus state will be pruned from store along with all associated metadata. This will prevent the client store from
@@ -53,18 +60,56 @@ func (cs ClientState) CheckHeaderAndUpdateState(
)
}
+ // Check if the Client store already has a consensus state for the header's height
+ // If the consensus state exists, and it matches the header then we return early
+ // since header has already been submitted in a previous UpdateClient.
+ var conflictingHeader bool
+ prevConsState, _ := GetConsensusState(clientStore, cdc, header.GetHeight())
+ if prevConsState != nil {
+ // This header has already been submitted and the necessary state is already stored
+ // in client store, thus we can return early without further validation.
+ if reflect.DeepEqual(prevConsState, tmHeader.ConsensusState()) {
+ return &cs, prevConsState, nil
+ }
+ // A consensus state already exists for this height, but it does not match the provided header.
+ // Thus, we must check that this header is valid, and if so we will freeze the client.
+ conflictingHeader = true
+ }
+
// get consensus state from clientStore
- tmConsState, err := GetConsensusState(clientStore, cdc, tmHeader.TrustedHeight)
+ trustedConsState, err := GetConsensusState(clientStore, cdc, tmHeader.TrustedHeight)
if err != nil {
return nil, nil, sdkerrors.Wrapf(
err, "could not get consensus state from clientstore at TrustedHeight: %s", tmHeader.TrustedHeight,
)
}
- if err := checkValidity(&cs, tmConsState, tmHeader, ctx.BlockTime()); err != nil {
+ if err := checkValidity(&cs, trustedConsState, tmHeader, ctx.BlockTime()); err != nil {
return nil, nil, err
}
+ consState := tmHeader.ConsensusState()
+ // Header is different from existing consensus state and also valid, so freeze the client and return
+ if conflictingHeader {
+ cs.FrozenHeight = FrozenHeight
+ return &cs, consState, nil
+ }
+ // Check that consensus state timestamps are monotonic
+ prevCons, prevOk := GetPreviousConsensusState(clientStore, cdc, header.GetHeight())
+ nextCons, nextOk := GetNextConsensusState(clientStore, cdc, header.GetHeight())
+ // if previous consensus state exists, check consensus state time is greater than previous consensus state time
+ // if previous consensus state is not before current consensus state, freeze the client and return.
+ if prevOk && !prevCons.Timestamp.Before(consState.Timestamp) {
+ cs.FrozenHeight = FrozenHeight
+ return &cs, consState, nil
+ }
+ // if next consensus state exists, check consensus state time is less than next consensus state time
+ // if next consensus state is not after current consensus state, freeze the client and return.
+ if nextOk && !nextCons.Timestamp.After(consState.Timestamp) {
+ cs.FrozenHeight = FrozenHeight
+ return &cs, consState, nil
+ }
+
// Check the earliest consensus state to see if it is expired, if so then set the prune height
// so that we can delete consensus state and all associated metadata.
var (
diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go
index 14c34645..95a159ef 100644
--- a/modules/light-clients/07-tendermint/types/update_test.go
+++ b/modules/light-clients/07-tendermint/types/update_test.go
@@ -1,6 +1,7 @@
package types_test
import (
+ "fmt"
"time"
tmtypes "github.com/tendermint/tendermint/types"
@@ -40,13 +41,13 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
altVal := tmtypes.NewValidator(altPubKey, revisionHeight)
// Create alternative validator set with only altVal, invalid update (too much change in valSet)
altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal})
-
altSigners := []tmtypes.PrivValidator{altPrivVal}
testCases := []struct {
- name string
- setup func(suite *TendermintTestSuite)
- expPass bool
+ name string
+ setup func(*TendermintTestSuite)
+ expFrozen bool
+ expPass bool
}{
{
name: "successful update with next height and same validator set",
@@ -56,7 +57,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
currentTime = suite.now
},
- expPass: true,
+ expFrozen: false,
+ expPass: true,
},
{
name: "successful update with future height and different validator set",
@@ -66,7 +68,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners)
currentTime = suite.now
},
- expPass: true,
+ expFrozen: false,
+ expPass: true,
},
{
name: "successful update with next height and different validator set",
@@ -76,7 +79,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners)
currentTime = suite.now
},
- expPass: true,
+ expFrozen: false,
+ expPass: true,
},
{
name: "successful update for a previous height",
@@ -87,7 +91,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightMinus1.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners)
currentTime = suite.now
},
- expPass: true,
+ expFrozen: false,
+ expPass: true,
},
{
name: "successful update for a previous revision",
@@ -100,6 +105,72 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
},
expPass: true,
},
+ {
+ name: "successful update with identical header to a previous update",
+ setup: func(suite *TendermintTestSuite) {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, heightPlus1, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
+ // Store the header's consensus state in client store before UpdateClient call
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus1, newHeader.ConsensusState())
+ },
+ expFrozen: false,
+ expPass: true,
+ },
+ {
+ name: "misbehaviour detection: header conflicts with existing consensus state",
+ setup: func(suite *TendermintTestSuite) {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, heightPlus1, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
+ // Change the consensus state of header and store in client store to create a conflict
+ conflictConsState := newHeader.ConsensusState()
+ conflictConsState.Root = commitmenttypes.NewMerkleRoot([]byte("conflicting apphash"))
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus1, conflictConsState)
+ },
+ expFrozen: true,
+ expPass: true,
+ },
+ {
+ name: "misbehaviour detection: previous consensus state time is not before header time. time monotonicity violation",
+ setup: func(suite *TendermintTestSuite) {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ // create an intermediate consensus state with the same time as the newHeader to create a time violation.
+ // header time is after client time
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ prevConsensusState := types.NewConsensusState(suite.headerTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus1, prevConsensusState)
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID)
+ types.SetIterationKey(clientStore, heightPlus1)
+ },
+ expFrozen: true,
+ expPass: true,
+ },
+ {
+ name: "misbehaviour detection: next consensus state time is not after header time. time monotonicity violation",
+ setup: func(suite *TendermintTestSuite) {
+ clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ // create the next consensus state with the same time as the intermediate newHeader to create a time violation.
+ // header time is after clientTime
+ consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
+ currentTime = suite.now
+ nextConsensusState := types.NewConsensusState(suite.headerTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash)
+ ctx := suite.chainA.GetContext().WithBlockTime(currentTime)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ctx, clientID, heightPlus5, nextConsensusState)
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, clientID)
+ types.SetIterationKey(clientStore, heightPlus5)
+ },
+ expFrozen: true,
+ expPass: true,
+ },
{
name: "unsuccessful update with incorrect header chain-id",
setup: func(suite *TendermintTestSuite) {
@@ -108,7 +179,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader("ethermint", int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update to a future revision",
@@ -128,7 +200,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 3, height, suite.headerTime, suite.valSet, suite.valSet, signers)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update with next height: update header mismatches nextValSetHash",
@@ -138,7 +211,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update with next height: update header mismatches different nextValSetHash",
@@ -148,7 +222,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, bothValSet, signers)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update with future height: too much change in validator set",
@@ -158,7 +233,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, altValSet, suite.valSet, altSigners)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful updates, passed in incorrect trusted validators for given consensus state",
@@ -168,7 +244,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update: trusting period has passed since last client timestamp",
@@ -179,7 +256,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
// make current time pass trusting period from last timestamp on clientstate
currentTime = suite.now.Add(trustingPeriod)
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update: header timestamp is past current timestamp",
@@ -189,7 +267,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "unsuccessful update: header timestamp is not past last client timestamp",
@@ -199,7 +278,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.clientTime, suite.valSet, suite.valSet, signers)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "header basic validation failed",
@@ -211,7 +291,8 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader.SignedHeader.Commit.Height = revisionHeight - 1
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
{
name: "header height < consensus height",
@@ -222,13 +303,14 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightMinus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers)
currentTime = suite.now
},
- expPass: false,
+ expFrozen: false,
+ expPass: false,
},
}
for i, tc := range testCases {
tc := tc
- suite.Run(tc.name, func() {
+ suite.Run(fmt.Sprintf("Case: %s", tc.name), func() {
suite.SetupTest() // reset metadata writes
// Create bothValSet with both suite validator and altVal. Would be valid update
bothValSet = tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal))
@@ -265,17 +347,22 @@ func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() {
if tc.expPass {
suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name)
- // Determine if clientState should be updated or not
- // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height
- if height.GT(clientState.LatestHeight) {
- // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
- suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update")
- } else {
- // Update will add past consensus state, clientState should not be updated at all
- suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header")
- }
+ suite.Require().Equal(tc.expFrozen, !newClientState.(*types.ClientState).FrozenHeight.IsZero(), "client state status is unexpected after update")
+
+ // further writes only happen if update is not misbehaviour
+ if !tc.expFrozen {
+ // Determine if clientState should be updated or not
+ // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height
+ if height.GT(clientState.LatestHeight) {
+ // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight()
+ suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update")
+ } else {
+ // Update will add past consensus state, clientState should not be updated at all
+ suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header")
+ }
- suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name)
+ suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name)
+ }
} else {
suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name)
suite.Require().Nil(newClientState, "invalid test case %d passed: %s", i, tc.name)
From 07d8747e516c12aba3157d2ffda45bf0620546c1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 10 May 2021 17:53:18 +0200
Subject: [PATCH 051/393] remove Type(), Route(), GetSignBytes() (removed from
sdk.Msg interface) (#161)
* remove deprecated interface functions
* changelog
Co-authored-by: Aditya
---
CHANGELOG.md | 1 +
modules/core/02-client/types/msgs.go | 62 --------
modules/core/03-connection/types/events.go | 8 +-
modules/core/03-connection/types/msgs.go | 64 ---------
modules/core/04-channel/types/events.go | 23 +--
modules/core/04-channel/types/msgs.go | 160 ---------------------
modules/core/04-channel/types/msgs_test.go | 6 -
modules/core/keeper/msg_server.go | 4 +-
8 files changed, 19 insertions(+), 309 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4bd8c426..3c219578 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -44,6 +44,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (modules/core) [\#161](https://github.com/cosmos/ibc-go/pull/161) Remove Type(), Route(), GetSignBytes() from 02-client, 03-connection, and 04-channel messages.
* (modules) [\#140](https://github.com/cosmos/ibc-go/pull/140) IsFrozen() client state interface changed to Status(). gRPC `ClientStatus` route added.
* (modules/core) [\#109](https://github.com/cosmos/ibc-go/pull/109) Remove connection and channel handshake CLI commands.
* (modules) [\#107](https://github.com/cosmos/ibc-go/pull/107) Modify OnRecvPacket callback to return an acknowledgement which indicates if it is successful or not. Callback state changes are discarded for unsuccessful acknowledgements only.
diff --git a/modules/core/02-client/types/msgs.go b/modules/core/02-client/types/msgs.go
index 46538c95..e2ff1ca0 100644
--- a/modules/core/02-client/types/msgs.go
+++ b/modules/core/02-client/types/msgs.go
@@ -51,16 +51,6 @@ func NewMsgCreateClient(
}, nil
}
-// Route implements sdk.Msg
-func (msg MsgCreateClient) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgCreateClient) Type() string {
- return TypeMsgCreateClient
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgCreateClient) ValidateBasic() error {
_, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -90,12 +80,6 @@ func (msg MsgCreateClient) ValidateBasic() error {
return consensusState.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgCreateClient) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgCreateClient) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -132,16 +116,6 @@ func NewMsgUpdateClient(id string, header exported.Header, signer string) (*MsgU
}, nil
}
-// Route implements sdk.Msg
-func (msg MsgUpdateClient) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgUpdateClient) Type() string {
- return TypeMsgUpdateClient
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgUpdateClient) ValidateBasic() error {
_, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -161,12 +135,6 @@ func (msg MsgUpdateClient) ValidateBasic() error {
return host.ClientIdentifierValidator(msg.ClientId)
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgUpdateClient) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgUpdateClient) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -205,16 +173,6 @@ func NewMsgUpgradeClient(clientID string, clientState exported.ClientState, cons
}, nil
}
-// Route implements sdk.Msg
-func (msg MsgUpgradeClient) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgUpgradeClient) Type() string {
- return TypeMsgUpgradeClient
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgUpgradeClient) ValidateBasic() error {
// will not validate client state as committed client may not form a valid client state.
@@ -247,12 +205,6 @@ func (msg MsgUpgradeClient) ValidateBasic() error {
return host.ClientIdentifierValidator(msg.ClientId)
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgUpgradeClient) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgUpgradeClient) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -289,14 +241,6 @@ func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.Misbehaviou
}, nil
}
-// Route returns the MsgSubmitClientMisbehaviour's route.
-func (msg MsgSubmitMisbehaviour) Route() string { return host.RouterKey }
-
-// Type returns the MsgSubmitMisbehaviour's type.
-func (msg MsgSubmitMisbehaviour) Type() string {
- return TypeMsgSubmitMisbehaviour
-}
-
// ValidateBasic performs basic (non-state-dependant) validation on a MsgSubmitMisbehaviour.
func (msg MsgSubmitMisbehaviour) ValidateBasic() error {
_, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -321,12 +265,6 @@ func (msg MsgSubmitMisbehaviour) ValidateBasic() error {
return host.ClientIdentifierValidator(msg.ClientId)
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgSubmitMisbehaviour) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners returns the single expected signer for a MsgSubmitMisbehaviour.
func (msg MsgSubmitMisbehaviour) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
diff --git a/modules/core/03-connection/types/events.go b/modules/core/03-connection/types/events.go
index 37973ed5..94615c43 100644
--- a/modules/core/03-connection/types/events.go
+++ b/modules/core/03-connection/types/events.go
@@ -16,10 +16,10 @@ const (
// IBC connection events vars
var (
- EventTypeConnectionOpenInit = MsgConnectionOpenInit{}.Type()
- EventTypeConnectionOpenTry = MsgConnectionOpenTry{}.Type()
- EventTypeConnectionOpenAck = MsgConnectionOpenAck{}.Type()
- EventTypeConnectionOpenConfirm = MsgConnectionOpenConfirm{}.Type()
+ EventTypeConnectionOpenInit = "connection_open_init"
+ EventTypeConnectionOpenTry = "connection_open_try"
+ EventTypeConnectionOpenAck = "connection_open_ack"
+ EventTypeConnectionOpenConfirm = "connection_open_confirm"
AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName)
)
diff --git a/modules/core/03-connection/types/msgs.go b/modules/core/03-connection/types/msgs.go
index 0ef6f06b..7d6f88fa 100644
--- a/modules/core/03-connection/types/msgs.go
+++ b/modules/core/03-connection/types/msgs.go
@@ -39,16 +39,6 @@ func NewMsgConnectionOpenInit(
}
}
-// Route implements sdk.Msg
-func (msg MsgConnectionOpenInit) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgConnectionOpenInit) Type() string {
- return "connection_open_init"
-}
-
// ValidateBasic implements sdk.Msg.
func (msg MsgConnectionOpenInit) ValidateBasic() error {
if err := host.ClientIdentifierValidator(msg.ClientId); err != nil {
@@ -71,12 +61,6 @@ func (msg MsgConnectionOpenInit) ValidateBasic() error {
return msg.Counterparty.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgConnectionOpenInit) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgConnectionOpenInit) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -114,16 +98,6 @@ func NewMsgConnectionOpenTry(
}
}
-// Route implements sdk.Msg
-func (msg MsgConnectionOpenTry) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgConnectionOpenTry) Type() string {
- return "connection_open_try"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgConnectionOpenTry) ValidateBasic() error {
// an empty connection identifier indicates that a connection identifier should be generated
@@ -184,12 +158,6 @@ func (msg MsgConnectionOpenTry) UnpackInterfaces(unpacker codectypes.AnyUnpacker
return unpacker.UnpackAny(msg.ClientState, new(exported.ClientState))
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgConnectionOpenTry) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgConnectionOpenTry) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -228,16 +196,6 @@ func (msg MsgConnectionOpenAck) UnpackInterfaces(unpacker codectypes.AnyUnpacker
return unpacker.UnpackAny(msg.ClientState, new(exported.ClientState))
}
-// Route implements sdk.Msg
-func (msg MsgConnectionOpenAck) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgConnectionOpenAck) Type() string {
- return "connection_open_ack"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgConnectionOpenAck) ValidateBasic() error {
if !IsValidConnectionID(msg.ConnectionId) {
@@ -281,12 +239,6 @@ func (msg MsgConnectionOpenAck) ValidateBasic() error {
return nil
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgConnectionOpenAck) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgConnectionOpenAck) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -310,16 +262,6 @@ func NewMsgConnectionOpenConfirm(
}
}
-// Route implements sdk.Msg
-func (msg MsgConnectionOpenConfirm) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgConnectionOpenConfirm) Type() string {
- return "connection_open_confirm"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgConnectionOpenConfirm) ValidateBasic() error {
if !IsValidConnectionID(msg.ConnectionId) {
@@ -338,12 +280,6 @@ func (msg MsgConnectionOpenConfirm) ValidateBasic() error {
return nil
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgConnectionOpenConfirm) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgConnectionOpenConfirm) GetSigners() []sdk.AccAddress {
accAddr, err := sdk.AccAddressFromBech32(msg.Signer)
diff --git a/modules/core/04-channel/types/events.go b/modules/core/04-channel/types/events.go
index 6229ebaa..1ef14346 100644
--- a/modules/core/04-channel/types/events.go
+++ b/modules/core/04-channel/types/events.go
@@ -14,11 +14,12 @@ const (
AttributeCounterpartyPortID = "counterparty_port_id"
AttributeCounterpartyChannelID = "counterparty_channel_id"
- EventTypeSendPacket = "send_packet"
- EventTypeRecvPacket = "recv_packet"
- EventTypeWriteAck = "write_acknowledgement"
- EventTypeAcknowledgePacket = "acknowledge_packet"
- EventTypeTimeoutPacket = "timeout_packet"
+ EventTypeSendPacket = "send_packet"
+ EventTypeRecvPacket = "recv_packet"
+ EventTypeWriteAck = "write_acknowledgement"
+ EventTypeAcknowledgePacket = "acknowledge_packet"
+ EventTypeTimeoutPacket = "timeout_packet"
+ EventTypeTimeoutPacketOnClose = "timeout_on_close_packet"
// NOTE: DEPRECATED in favor of AttributeKeyDataHex
AttributeKeyData = "packet_data"
@@ -38,12 +39,12 @@ const (
// IBC channel events vars
var (
- EventTypeChannelOpenInit = MsgChannelOpenInit{}.Type()
- EventTypeChannelOpenTry = MsgChannelOpenTry{}.Type()
- EventTypeChannelOpenAck = MsgChannelOpenAck{}.Type()
- EventTypeChannelOpenConfirm = MsgChannelOpenConfirm{}.Type()
- EventTypeChannelCloseInit = MsgChannelCloseInit{}.Type()
- EventTypeChannelCloseConfirm = MsgChannelCloseConfirm{}.Type()
+ EventTypeChannelOpenInit = "channel_open_init"
+ EventTypeChannelOpenTry = "channel_open_try"
+ EventTypeChannelOpenAck = "channel_open_ack"
+ EventTypeChannelOpenConfirm = "channel_open_confirm"
+ EventTypeChannelCloseInit = "channel_close_init"
+ EventTypeChannelCloseConfirm = "channel_close_confirm"
AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName)
)
diff --git a/modules/core/04-channel/types/msgs.go b/modules/core/04-channel/types/msgs.go
index 151b5582..784aa6f9 100644
--- a/modules/core/04-channel/types/msgs.go
+++ b/modules/core/04-channel/types/msgs.go
@@ -28,16 +28,6 @@ func NewMsgChannelOpenInit(
}
}
-// Route implements sdk.Msg
-func (msg MsgChannelOpenInit) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgChannelOpenInit) Type() string {
- return "channel_open_init"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgChannelOpenInit) ValidateBasic() error {
if err := host.PortIdentifierValidator(msg.PortId); err != nil {
@@ -59,12 +49,6 @@ func (msg MsgChannelOpenInit) ValidateBasic() error {
return msg.Channel.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgChannelOpenInit) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgChannelOpenInit) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -96,16 +80,6 @@ func NewMsgChannelOpenTry(
}
}
-// Route implements sdk.Msg
-func (msg MsgChannelOpenTry) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgChannelOpenTry) Type() string {
- return "channel_open_try"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgChannelOpenTry) ValidateBasic() error {
if err := host.PortIdentifierValidator(msg.PortId); err != nil {
@@ -140,12 +114,6 @@ func (msg MsgChannelOpenTry) ValidateBasic() error {
return msg.Channel.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgChannelOpenTry) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgChannelOpenTry) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -174,16 +142,6 @@ func NewMsgChannelOpenAck(
}
}
-// Route implements sdk.Msg
-func (msg MsgChannelOpenAck) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgChannelOpenAck) Type() string {
- return "channel_open_ack"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgChannelOpenAck) ValidateBasic() error {
if err := host.PortIdentifierValidator(msg.PortId); err != nil {
@@ -208,12 +166,6 @@ func (msg MsgChannelOpenAck) ValidateBasic() error {
return nil
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgChannelOpenAck) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgChannelOpenAck) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -240,16 +192,6 @@ func NewMsgChannelOpenConfirm(
}
}
-// Route implements sdk.Msg
-func (msg MsgChannelOpenConfirm) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgChannelOpenConfirm) Type() string {
- return "channel_open_confirm"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgChannelOpenConfirm) ValidateBasic() error {
if err := host.PortIdentifierValidator(msg.PortId); err != nil {
@@ -271,12 +213,6 @@ func (msg MsgChannelOpenConfirm) ValidateBasic() error {
return nil
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgChannelOpenConfirm) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgChannelOpenConfirm) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -300,16 +236,6 @@ func NewMsgChannelCloseInit(
}
}
-// Route implements sdk.Msg
-func (msg MsgChannelCloseInit) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgChannelCloseInit) Type() string {
- return "channel_close_init"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgChannelCloseInit) ValidateBasic() error {
if err := host.PortIdentifierValidator(msg.PortId); err != nil {
@@ -325,12 +251,6 @@ func (msg MsgChannelCloseInit) ValidateBasic() error {
return nil
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgChannelCloseInit) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgChannelCloseInit) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -357,16 +277,6 @@ func NewMsgChannelCloseConfirm(
}
}
-// Route implements sdk.Msg
-func (msg MsgChannelCloseConfirm) Route() string {
- return host.RouterKey
-}
-
-// Type implements sdk.Msg
-func (msg MsgChannelCloseConfirm) Type() string {
- return "channel_close_confirm"
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgChannelCloseConfirm) ValidateBasic() error {
if err := host.PortIdentifierValidator(msg.PortId); err != nil {
@@ -388,12 +298,6 @@ func (msg MsgChannelCloseConfirm) ValidateBasic() error {
return nil
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgChannelCloseConfirm) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgChannelCloseConfirm) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -419,11 +323,6 @@ func NewMsgRecvPacket(
}
}
-// Route implements sdk.Msg
-func (msg MsgRecvPacket) Route() string {
- return host.RouterKey
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgRecvPacket) ValidateBasic() error {
if len(msg.ProofCommitment) == 0 {
@@ -439,12 +338,6 @@ func (msg MsgRecvPacket) ValidateBasic() error {
return msg.Packet.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgRecvPacket) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetDataSignBytes returns the base64-encoded bytes used for the
// data field when signing the packet.
func (msg MsgRecvPacket) GetDataSignBytes() []byte {
@@ -461,11 +354,6 @@ func (msg MsgRecvPacket) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{signer}
}
-// Type implements sdk.Msg
-func (msg MsgRecvPacket) Type() string {
- return "recv_packet"
-}
-
var _ sdk.Msg = &MsgTimeout{}
// NewMsgTimeout constructs new MsgTimeout
@@ -483,11 +371,6 @@ func NewMsgTimeout(
}
}
-// Route implements sdk.Msg
-func (msg MsgTimeout) Route() string {
- return host.RouterKey
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgTimeout) ValidateBasic() error {
if len(msg.ProofUnreceived) == 0 {
@@ -506,12 +389,6 @@ func (msg MsgTimeout) ValidateBasic() error {
return msg.Packet.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgTimeout) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgTimeout) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -521,11 +398,6 @@ func (msg MsgTimeout) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{signer}
}
-// Type implements sdk.Msg
-func (msg MsgTimeout) Type() string {
- return "timeout_packet"
-}
-
// NewMsgTimeoutOnClose constructs new MsgTimeoutOnClose
// nolint:interfacer
func NewMsgTimeoutOnClose(
@@ -543,11 +415,6 @@ func NewMsgTimeoutOnClose(
}
}
-// Route implements sdk.Msg
-func (msg MsgTimeoutOnClose) Route() string {
- return host.RouterKey
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgTimeoutOnClose) ValidateBasic() error {
if msg.NextSequenceRecv == 0 {
@@ -569,12 +436,6 @@ func (msg MsgTimeoutOnClose) ValidateBasic() error {
return msg.Packet.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgTimeoutOnClose) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgTimeoutOnClose) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -584,11 +445,6 @@ func (msg MsgTimeoutOnClose) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{signer}
}
-// Type implements sdk.Msg
-func (msg MsgTimeoutOnClose) Type() string {
- return "timeout_on_close_packet"
-}
-
var _ sdk.Msg = &MsgAcknowledgement{}
// NewMsgAcknowledgement constructs a new MsgAcknowledgement
@@ -608,11 +464,6 @@ func NewMsgAcknowledgement(
}
}
-// Route implements sdk.Msg
-func (msg MsgAcknowledgement) Route() string {
- return host.RouterKey
-}
-
// ValidateBasic implements sdk.Msg
func (msg MsgAcknowledgement) ValidateBasic() error {
if len(msg.ProofAcked) == 0 {
@@ -631,12 +482,6 @@ func (msg MsgAcknowledgement) ValidateBasic() error {
return msg.Packet.ValidateBasic()
}
-// GetSignBytes implements sdk.Msg. The function will panic since it is used
-// for amino transaction verification which IBC does not support.
-func (msg MsgAcknowledgement) GetSignBytes() []byte {
- panic("IBC messages do not support amino")
-}
-
// GetSigners implements sdk.Msg
func (msg MsgAcknowledgement) GetSigners() []sdk.AccAddress {
signer, err := sdk.AccAddressFromBech32(msg.Signer)
@@ -645,8 +490,3 @@ func (msg MsgAcknowledgement) GetSigners() []sdk.AccAddress {
}
return []sdk.AccAddress{signer}
}
-
-// Type implements sdk.Msg
-func (msg MsgAcknowledgement) Type() string {
- return "acknowledge_packet"
-}
diff --git a/modules/core/04-channel/types/msgs_test.go b/modules/core/04-channel/types/msgs_test.go
index a296520e..10401ebc 100644
--- a/modules/core/04-channel/types/msgs_test.go
+++ b/modules/core/04-channel/types/msgs_test.go
@@ -315,12 +315,6 @@ func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() {
}
}
-func (suite *TypesTestSuite) TestMsgRecvPacketType() {
- msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr)
-
- suite.Equal("recv_packet", msg.Type())
-}
-
func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() {
testCases := []struct {
name string
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index a64cb2ec..8c3d879a 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -462,7 +462,7 @@ func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacke
defer func() {
telemetry.IncrCounterWithLabels(
- []string{"tx", "msg", "ibc", msg.Type()},
+ []string{"tx", "msg", "ibc", channeltypes.EventTypeRecvPacket},
1,
[]metrics.Label{
telemetry.NewLabel("source-port", msg.Packet.SourcePort),
@@ -604,7 +604,7 @@ func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAckn
defer func() {
telemetry.IncrCounterWithLabels(
- []string{"tx", "msg", "ibc", msg.Type()},
+ []string{"tx", "msg", "ibc", channeltypes.EventTypeAcknowledgePacket},
1,
[]metrics.Label{
telemetry.NewLabel("source-port", msg.Packet.SourcePort),
From 67bcf560a4b3a3cddaffa5a579cc0106b4ae0027 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 10 May 2021 18:01:15 +0200
Subject: [PATCH 052/393] remove Handler types (#162)
* remove unused handler
* remove transfer handler
* changelog
---
CHANGELOG.md | 1 +
modules/apps/transfer/handler.go | 23 -----
modules/apps/transfer/module.go | 2 +-
.../{handler_test.go => transfer_test.go} | 0
modules/core/handler.go | 98 -------------------
modules/core/module.go | 2 +-
6 files changed, 3 insertions(+), 123 deletions(-)
delete mode 100644 modules/apps/transfer/handler.go
rename modules/apps/transfer/{handler_test.go => transfer_test.go} (100%)
delete mode 100644 modules/core/handler.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3c219578..0b2866e6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -44,6 +44,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (modules) [\#162](https://github.com/cosmos/ibc-go/pull/162) Remove deprecated Handler types in core IBC and the ICS 20 transfer module.
* (modules/core) [\#161](https://github.com/cosmos/ibc-go/pull/161) Remove Type(), Route(), GetSignBytes() from 02-client, 03-connection, and 04-channel messages.
* (modules) [\#140](https://github.com/cosmos/ibc-go/pull/140) IsFrozen() client state interface changed to Status(). gRPC `ClientStatus` route added.
* (modules/core) [\#109](https://github.com/cosmos/ibc-go/pull/109) Remove connection and channel handshake CLI commands.
diff --git a/modules/apps/transfer/handler.go b/modules/apps/transfer/handler.go
deleted file mode 100644
index f6a0ace3..00000000
--- a/modules/apps/transfer/handler.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package transfer
-
-import (
- sdk "github.com/cosmos/cosmos-sdk/types"
- sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- "github.com/cosmos/ibc-go/modules/apps/transfer/types"
-)
-
-// NewHandler returns sdk.Handler for IBC token transfer module messages
-func NewHandler(k types.MsgServer) sdk.Handler {
- return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
- ctx = ctx.WithEventManager(sdk.NewEventManager())
-
- switch msg := msg.(type) {
- case *types.MsgTransfer:
- res, err := k.Transfer(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- default:
- return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ICS-20 transfer message type: %T", msg)
- }
- }
-}
diff --git a/modules/apps/transfer/module.go b/modules/apps/transfer/module.go
index c19a740b..b0addfaf 100644
--- a/modules/apps/transfer/module.go
+++ b/modules/apps/transfer/module.go
@@ -109,7 +109,7 @@ func (AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {
// Route implements the AppModule interface
func (am AppModule) Route() sdk.Route {
- return sdk.NewRoute(types.RouterKey, NewHandler(am.keeper))
+ return sdk.Route{}
}
// QuerierRoute implements the AppModule interface
diff --git a/modules/apps/transfer/handler_test.go b/modules/apps/transfer/transfer_test.go
similarity index 100%
rename from modules/apps/transfer/handler_test.go
rename to modules/apps/transfer/transfer_test.go
diff --git a/modules/core/handler.go b/modules/core/handler.go
deleted file mode 100644
index 3384bbcf..00000000
--- a/modules/core/handler.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package ibc
-
-import (
- sdk "github.com/cosmos/cosmos-sdk/types"
- sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
- channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
- "github.com/cosmos/ibc-go/modules/core/keeper"
-)
-
-// NewHandler defines the IBC handler
-func NewHandler(k keeper.Keeper) sdk.Handler {
- return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) {
- ctx = ctx.WithEventManager(sdk.NewEventManager())
-
- switch msg := msg.(type) {
- // IBC client msg interface types
- case *clienttypes.MsgCreateClient:
- res, err := k.CreateClient(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *clienttypes.MsgUpdateClient:
- res, err := k.UpdateClient(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *clienttypes.MsgUpgradeClient:
- res, err := k.UpgradeClient(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *clienttypes.MsgSubmitMisbehaviour:
- res, err := k.SubmitMisbehaviour(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- // IBC connection msgs
- case *connectiontypes.MsgConnectionOpenInit:
- res, err := k.ConnectionOpenInit(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *connectiontypes.MsgConnectionOpenTry:
- res, err := k.ConnectionOpenTry(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *connectiontypes.MsgConnectionOpenAck:
- res, err := k.ConnectionOpenAck(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *connectiontypes.MsgConnectionOpenConfirm:
- res, err := k.ConnectionOpenConfirm(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- // IBC channel msgs
- case *channeltypes.MsgChannelOpenInit:
- res, err := k.ChannelOpenInit(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgChannelOpenTry:
- res, err := k.ChannelOpenTry(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgChannelOpenAck:
- res, err := k.ChannelOpenAck(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgChannelOpenConfirm:
- res, err := k.ChannelOpenConfirm(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgChannelCloseInit:
- res, err := k.ChannelCloseInit(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgChannelCloseConfirm:
- res, err := k.ChannelCloseConfirm(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- // IBC packet msgs get routed to the appropriate module callback
- case *channeltypes.MsgRecvPacket:
- res, err := k.RecvPacket(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgAcknowledgement:
- res, err := k.Acknowledgement(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgTimeout:
- res, err := k.Timeout(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- case *channeltypes.MsgTimeoutOnClose:
- res, err := k.TimeoutOnClose(sdk.WrapSDKContext(ctx), msg)
- return sdk.WrapServiceResult(ctx, res, err)
-
- default:
- return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized IBC message type: %T", msg)
- }
- }
-}
diff --git a/modules/core/module.go b/modules/core/module.go
index c00d6448..1d338dcb 100644
--- a/modules/core/module.go
+++ b/modules/core/module.go
@@ -117,7 +117,7 @@ func (am AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {
// Route returns the message routing key for the ibc module.
func (am AppModule) Route() sdk.Route {
- return sdk.NewRoute(host.RouterKey, NewHandler(*am.keeper))
+ return sdk.Route{}
}
// QuerierRoute returns the ibc module's querier route name.
From 5991e8630dbff5da25e4f2ce22c4cc4a2af93967 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 10 May 2021 18:51:56 +0200
Subject: [PATCH 053/393] fix solo machine as per ics spec changes (#153)
* fix solo machine as per ics spec changes
* add changelog
---
CHANGELOG.md | 1 +
.../light-clients/06-solomachine/types/client_state.go | 6 ++++++
.../06-solomachine/types/client_state_test.go | 10 ++++++++--
3 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0b2866e6..c5cf8818 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (modules/light-clients/06-solomachine) [\153](https://github.com/cosmos/ibc-go/pull/153) Fix solo machine proof height sequence mismatch bug.
* (modules/light-clients/06-solomachine) [\#122](https://github.com/cosmos/ibc-go/pull/122) Fix solo machine merkle prefix casting bug.
* (modules/light-clients/06-solomachine) [\#120](https://github.com/cosmos/ibc-go/pull/120) Fix solo machine handshake verification bug.
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index 7ce5fa1f..8a757494 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -121,6 +121,9 @@ func (cs *ClientState) VerifyClientState(
proof []byte,
clientState exported.ClientState,
) error {
+ // NOTE: the proof height sequence is incremented by one due to the connection handshake verification ordering
+ height = clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+1)
+
publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
if err != nil {
return err
@@ -159,6 +162,9 @@ func (cs *ClientState) VerifyClientConsensusState(
proof []byte,
consensusState exported.ConsensusState,
) error {
+ // NOTE: the proof height sequence is incremented by two due to the connection handshake verification ordering
+ height = clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+2)
+
publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof)
if err != nil {
return err
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index a3e6b703..c2907c0d 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -256,8 +256,11 @@ func (suite *SoloMachineTestSuite) TestVerifyClientState() {
expSeq = tc.clientState.Sequence + 1
}
+ // NOTE: to replicate the ordering of connection handshake, we must decrement proof height by 1
+ height := clienttypes.NewHeight(solomachine.GetHeight().GetRevisionNumber(), solomachine.GetHeight().GetRevisionHeight()-1)
+
err := tc.clientState.VerifyClientState(
- suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, counterpartyClientIdentifier, tc.proof, clientState,
+ suite.store, suite.chainA.Codec, height, tc.prefix, counterpartyClientIdentifier, tc.proof, clientState,
)
if tc.expPass {
@@ -386,8 +389,11 @@ func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
expSeq = tc.clientState.Sequence + 1
}
+ // NOTE: to replicate the ordering of connection handshake, we must decrement proof height by 1
+ height := clienttypes.NewHeight(solomachine.GetHeight().GetRevisionNumber(), solomachine.GetHeight().GetRevisionHeight()-2)
+
err := tc.clientState.VerifyClientConsensusState(
- suite.store, suite.chainA.Codec, solomachine.GetHeight(), counterpartyClientIdentifier, consensusHeight, tc.prefix, tc.proof, consensusState,
+ suite.store, suite.chainA.Codec, height, counterpartyClientIdentifier, consensusHeight, tc.prefix, tc.proof, consensusState,
)
if tc.expPass {
From 4007cfa3d636ef37bc3bcfaa8ff651ddbff961d2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 11 May 2021 17:40:17 +0200
Subject: [PATCH 054/393] remove misbehaviour GetHeight (#166)
* remove misbehaviour GetHeight
* add changelog
* remove unused misbehaviour event attribute
---
CHANGELOG.md | 1 +
docs/migrations/ibc-migration-043.md | 2 ++
modules/core/02-client/keeper/client.go | 4 ++--
modules/core/exported/client.go | 3 ---
modules/core/keeper/msg_server.go | 1 -
modules/light-clients/06-solomachine/types/misbehaviour.go | 7 -------
.../06-solomachine/types/misbehaviour_test.go | 2 --
modules/light-clients/07-tendermint/types/misbehaviour.go | 5 -----
.../light-clients/07-tendermint/types/misbehaviour_test.go | 1 -
9 files changed, 5 insertions(+), 21 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c5cf8818..0394dbfd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -45,6 +45,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (modules) [\#166](https://github.com/cosmos/ibc-go/pull/166) Remove GetHeight from the misbehaviour interface. The `consensus_height` attribute has been removed from Misbehaviour events.
* (modules) [\#162](https://github.com/cosmos/ibc-go/pull/162) Remove deprecated Handler types in core IBC and the ICS 20 transfer module.
* (modules/core) [\#161](https://github.com/cosmos/ibc-go/pull/161) Remove Type(), Route(), GetSignBytes() from 02-client, 03-connection, and 04-channel messages.
* (modules) [\#140](https://github.com/cosmos/ibc-go/pull/140) IsFrozen() client state interface changed to Status(). gRPC `ClientStatus` route added.
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index f2d20966..7ac0c292 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -108,6 +108,8 @@ The `OnRecvPacket` callback has been modified to only return the acknowledgement
The `packet_data` attribute has been deprecated in favor of `packet_data_hex`, in order to provide standardized encoding/decoding of packet data in events. While the `packet_data` event still exists, all relayers and IBC Event consumers are strongly encouraged to switch over to using `packet_data_hex` as soon as possible.
+The `consensus_height` attribute has been removed in the Misbehaviour event emitted. IBC clients no longer have a frozen height and misbehaviour does not necessarily have an associated height.
+
## Relevant SDK changes
* (codec) [\#9226](https://github.com/cosmos/cosmos-sdk/pull/9226) Rename codec interfaces and methods, to follow a general Go interfaces:
diff --git a/modules/core/02-client/keeper/client.go b/modules/core/02-client/keeper/client.go
index ddb9aab9..1ebccead 100644
--- a/modules/core/02-client/keeper/client.go
+++ b/modules/core/02-client/keeper/client.go
@@ -122,7 +122,7 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
// set eventType to SubmitMisbehaviour
eventType = types.EventTypeSubmitMisbehaviour
- k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", clientID, "height", header.GetHeight().String())
+ k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", clientID)
defer func() {
telemetry.IncrCounterWithLabels(
@@ -224,7 +224,7 @@ func (k Keeper) CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour ex
}
k.SetClientState(ctx, misbehaviour.GetClientID(), clientState)
- k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", misbehaviour.GetClientID(), "height", misbehaviour.GetHeight().String())
+ k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", misbehaviour.GetClientID())
defer func() {
telemetry.IncrCounterWithLabels(
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index b79106c1..5879bc9f 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -201,9 +201,6 @@ type Misbehaviour interface {
ClientType() string
GetClientID() string
ValidateBasic() error
-
- // Height at which the infraction occurred
- GetHeight() Height
}
// Header is the consensus state update information
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index 8c3d879a..2edcdd15 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -123,7 +123,6 @@ func (k Keeper) SubmitMisbehaviour(goCtx context.Context, msg *clienttypes.MsgSu
clienttypes.EventTypeSubmitMisbehaviour,
sdk.NewAttribute(clienttypes.AttributeKeyClientID, msg.ClientId),
sdk.NewAttribute(clienttypes.AttributeKeyClientType, misbehaviour.ClientType()),
- sdk.NewAttribute(clienttypes.AttributeKeyConsensusHeight, misbehaviour.GetHeight().String()),
),
)
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour.go b/modules/light-clients/06-solomachine/types/misbehaviour.go
index 7a870ad4..9fea2fb1 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour.go
@@ -26,13 +26,6 @@ func (misbehaviour Misbehaviour) Type() string {
return exported.TypeClientMisbehaviour
}
-// GetHeight returns the sequence at which misbehaviour occurred.
-// Return exported.Height to satisfy interface
-// Revision number is always 0 for a solo-machine
-func (misbehaviour Misbehaviour) GetHeight() exported.Height {
- return clienttypes.NewHeight(0, misbehaviour.Sequence)
-}
-
// ValidateBasic implements Evidence interface.
func (misbehaviour Misbehaviour) ValidateBasic() error {
if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil {
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour_test.go b/modules/light-clients/06-solomachine/types/misbehaviour_test.go
index 00f97219..77acd2c2 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour_test.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_test.go
@@ -11,8 +11,6 @@ func (suite *SoloMachineTestSuite) TestMisbehaviour() {
suite.Require().Equal(exported.Solomachine, misbehaviour.ClientType())
suite.Require().Equal(suite.solomachine.ClientID, misbehaviour.GetClientID())
- suite.Require().Equal(uint64(0), misbehaviour.GetHeight().GetRevisionNumber())
- suite.Require().Equal(suite.solomachine.Sequence, misbehaviour.GetHeight().GetRevisionHeight())
}
func (suite *SoloMachineTestSuite) TestMisbehaviourValidateBasic() {
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour.go b/modules/light-clients/07-tendermint/types/misbehaviour.go
index 9a5ef74c..6c82d9af 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour.go
@@ -36,11 +36,6 @@ func (misbehaviour Misbehaviour) GetClientID() string {
return misbehaviour.ClientId
}
-// GetHeight returns the height at which misbehaviour occurred
-func (misbehaviour Misbehaviour) GetHeight() exported.Height {
- return misbehaviour.Header1.GetHeight()
-}
-
// GetTime returns the timestamp at which misbehaviour occurred. It uses the
// maximum value from both headers to prevent producing an invalid header outside
// of the misbehaviour age range.
diff --git a/modules/light-clients/07-tendermint/types/misbehaviour_test.go b/modules/light-clients/07-tendermint/types/misbehaviour_test.go
index 30deec2c..d3bb7bc6 100644
--- a/modules/light-clients/07-tendermint/types/misbehaviour_test.go
+++ b/modules/light-clients/07-tendermint/types/misbehaviour_test.go
@@ -26,7 +26,6 @@ func (suite *TendermintTestSuite) TestMisbehaviour() {
suite.Require().Equal(exported.Tendermint, misbehaviour.ClientType())
suite.Require().Equal(clientID, misbehaviour.GetClientID())
- suite.Require().Equal(height, misbehaviour.GetHeight())
}
func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() {
From 744199ada60fb8312ca4de68a8a7e958a4d7c301 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 11 May 2021 18:07:21 +0200
Subject: [PATCH 055/393] remove GetFrozenHeight from ClientState interface
(#165)
* remove GetFrozenHeight from ClientState interface
* add changelog
---
CHANGELOG.md | 1 +
modules/core/exported/client.go | 1 -
modules/light-clients/06-solomachine/types/client_state.go | 7 -------
modules/light-clients/07-tendermint/types/client_state.go | 6 ------
.../07-tendermint/types/proposal_handle_test.go | 2 +-
modules/light-clients/09-localhost/types/client_state.go | 5 -----
6 files changed, 2 insertions(+), 20 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0394dbfd..ff34fa8d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -45,6 +45,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (module/core/02-client) [\#165](https://github.com/cosmos/ibc-go/pull/165) Remove GetFrozenHeight from the ClientState interface.
* (modules) [\#166](https://github.com/cosmos/ibc-go/pull/166) Remove GetHeight from the misbehaviour interface. The `consensus_height` attribute has been removed from Misbehaviour events.
* (modules) [\#162](https://github.com/cosmos/ibc-go/pull/162) Remove deprecated Handler types in core IBC and the ICS 20 transfer module.
* (modules/core) [\#161](https://github.com/cosmos/ibc-go/pull/161) Remove Type(), Route(), GetSignBytes() from 02-client, 03-connection, and 04-channel messages.
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index 5879bc9f..77268b1b 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -44,7 +44,6 @@ type ClientState interface {
ClientType() string
GetLatestHeight() Height
- GetFrozenHeight() Height
Validate() error
GetProofSpecs() []*ics23.ProofSpec
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index 8a757494..fc6b69ff 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -57,13 +57,6 @@ func (cs ClientState) IsFrozen() bool {
return cs.FrozenSequence != 0
}
-// GetFrozenHeight returns the frozen sequence of the client.
-// Return exported.Height to satisfy interface
-// Revision number is always 0 for a solo-machine
-func (cs ClientState) GetFrozenHeight() exported.Height {
- return clienttypes.NewHeight(0, cs.FrozenSequence)
-}
-
// GetProofSpecs returns nil proof specs since client state verification uses signatures.
func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec {
return nil
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 00d919ae..4928a681 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -88,12 +88,6 @@ func (cs ClientState) Status(
return exported.Active
}
-// GetFrozenHeight returns the height at which client is frozen
-// NOTE: FrozenHeight is zero if client is unfrozen
-func (cs ClientState) GetFrozenHeight() exported.Height {
- return cs.FrozenHeight
-}
-
// IsExpired returns whether or not the client has passed the trusting period since the last
// update (in which case no headers are considered valid).
func (cs ClientState) IsExpired(latestTimestamp, now time.Time) bool {
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle_test.go b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
index 6208aec3..ce099178 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
@@ -319,7 +319,7 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
if tc.expPass {
suite.Require().NoError(err)
- suite.Require().Equal(clienttypes.ZeroHeight(), updatedClient.GetFrozenHeight())
+ suite.Require().Equal(clienttypes.ZeroHeight(), updatedClient.(*types.ClientState).FrozenHeight)
} else {
suite.Require().Error(err)
suite.Require().Nil(updatedClient)
diff --git a/modules/light-clients/09-localhost/types/client_state.go b/modules/light-clients/09-localhost/types/client_state.go
index 67da74d9..294ee4d9 100644
--- a/modules/light-clients/09-localhost/types/client_state.go
+++ b/modules/light-clients/09-localhost/types/client_state.go
@@ -49,11 +49,6 @@ func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec,
return exported.Active
}
-// GetFrozenHeight returns an uninitialized IBC Height.
-func (cs ClientState) GetFrozenHeight() exported.Height {
- return clienttypes.ZeroHeight()
-}
-
// Validate performs a basic validation of the client state fields.
func (cs ClientState) Validate() error {
if strings.TrimSpace(cs.ChainId) == "" {
From 31166f28da275e3993688540a52ed1b27dc586df Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 12 May 2021 11:59:09 +0200
Subject: [PATCH 056/393] Bump technote-space/get-diff-action from 4 to 4.1.1
(#172)
Bumps [technote-space/get-diff-action](https://github.com/technote-space/get-diff-action) from 4 to 4.1.1.
- [Release notes](https://github.com/technote-space/get-diff-action/releases)
- [Changelog](https://github.com/technote-space/get-diff-action/blob/master/.releasegarc)
- [Commits](https://github.com/technote-space/get-diff-action/compare/v4...v4.1.1)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/test.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index b2057a57..901f7a8a 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -41,7 +41,7 @@ jobs:
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.15
- - uses: technote-space/get-diff-action@v4
+ - uses: technote-space/get-diff-action@v4.1.1
id: git_diff
with:
PATTERNS: |
@@ -89,7 +89,7 @@ jobs:
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.15
- - uses: technote-space/get-diff-action@v4
+ - uses: technote-space/get-diff-action@v4.1.1
with:
PATTERNS: |
**/**.go
@@ -113,7 +113,7 @@ jobs:
needs: tests
steps:
- uses: actions/checkout@v2
- - uses: technote-space/get-diff-action@v4
+ - uses: technote-space/get-diff-action@v4.1.1
with:
PATTERNS: |
**/**.go
From d70f49c8f612d60f1b7e2f1d1f160f28988962e1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 12 May 2021 12:24:37 +0200
Subject: [PATCH 057/393] update to SDK beta release (#167)
* update SDK commit
* bump to SDK v0.43.0-beta1 tag
---
go.mod | 2 +-
go.sum | 12 ++++----
modules/apps/transfer/client/cli/tx.go | 10 +------
modules/core/02-client/client/cli/tx.go | 37 +++----------------------
testing/simapp/app.go | 30 ++++++++++----------
testing/simapp/sim_test.go | 4 +--
6 files changed, 29 insertions(+), 66 deletions(-)
diff --git a/go.mod b/go.mod
index f1e91d76..f4100fcb 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@ replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alp
require (
github.com/armon/go-metrics v0.3.8
github.com/confio/ics23/go v0.6.6
- github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b
+ github.com/cosmos/cosmos-sdk v0.43.0-beta1
github.com/gogo/protobuf v1.3.3
github.com/golang/protobuf v1.5.2
github.com/gorilla/mux v1.8.0
diff --git a/go.sum b/go.sum
index 016cc726..3f9f99e4 100644
--- a/go.sum
+++ b/go.sum
@@ -62,7 +62,6 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.7/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-metrics v0.3.8 h1:oOxq3KPj0WhCuy50EhzwiyMyG2ovRQZpZLXQuOh2a/M=
github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@@ -129,15 +128,16 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b h1:5AOuxO9jaK+89wdCMj2e75GC1qQRXeYGYOUfqdN/Kb0=
-github.com/cosmos/cosmos-sdk v0.43.0-alpha1.0.20210504090054-e3e89f52607b/go.mod h1:u/SsFuAiyrBlVafgDQF/hbLzeEaA7w9XmKw3QiJm3G4=
+github.com/cosmos/cosmos-sdk v0.43.0-beta1 h1:cfRZY+opamo+zF+MuEbvriZwoSzfCuEh1fqUM8fFHbg=
+github.com/cosmos/cosmos-sdk v0.43.0-beta1/go.mod h1:rpCPaC3MnityU4Io4CDZqZB4GMtPqNeYXxPk8iRqmYM=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd/go.mod h1:3xOIaNNX19p0QrX0VqWa6voPRoJRGGYtny+DH8NEPvE=
github.com/cosmos/iavl v0.15.0-rc5/go.mod h1:WqoPL9yPTQ85QBMT45OOUzPxG/U/JcJoN7uMjgxke/I=
-github.com/cosmos/iavl v0.15.3 h1:xE9r6HW8GeKeoYJN4zefpljZ1oukVScP/7M8oj6SUts=
github.com/cosmos/iavl v0.15.3/go.mod h1:OLjQiAQ4fGD2KDZooyJG9yz+p2ao2IAYSbke8mVvSA4=
+github.com/cosmos/iavl v0.16.0 h1:ICIOB8xysirTX27GmVAaoeSpeozzgSu9d49w36xkVJA=
+github.com/cosmos/iavl v0.16.0/go.mod h1:2A8O/Jz9YwtjqXMO0CjnnbTYEEaovE8jWcwrakH3PoE=
github.com/cosmos/ledger-cosmos-go v0.11.1 h1:9JIYsGnXP613pb2vPjFeMMjBI5lEDsEaF6oYorTy6J4=
github.com/cosmos/ledger-cosmos-go v0.11.1/go.mod h1:J8//BsAGTo3OC/vDLjMRFLW6q0WAaXvHnVc7ZmE8iUY=
github.com/cosmos/ledger-go v0.9.2 h1:Nnao/dLwaVTk1Q5U9THldpUMMXU94BOTWPddSmVB6pI=
@@ -525,8 +525,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/otiai10/copy v1.5.1 h1:a/cs2E1/1V0az8K5nblbl+ymEa4E11AfaOLMar8V34w=
-github.com/otiai10/copy v1.5.1/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
+github.com/otiai10/copy v1.6.0 h1:IinKAryFFuPONZ7cm6T6E2QX/vcJwSnlaA5lfoaXIiQ=
+github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
diff --git a/modules/apps/transfer/client/cli/tx.go b/modules/apps/transfer/client/cli/tx.go
index c1cbc9f9..a524fceb 100644
--- a/modules/apps/transfer/client/cli/tx.go
+++ b/modules/apps/transfer/client/cli/tx.go
@@ -10,7 +10,6 @@ import (
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/tx"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/types/msgservice"
"github.com/cosmos/cosmos-sdk/version"
"github.com/cosmos/ibc-go/modules/apps/transfer/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
@@ -97,14 +96,7 @@ to the counterparty channel. Any timeout set to 0 is disabled.`),
msg := types.NewMsgTransfer(
srcPort, srcChannel, coin, sender, receiver, timeoutHeight, timeoutTimestamp,
)
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.Transfer(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
diff --git a/modules/core/02-client/client/cli/tx.go b/modules/core/02-client/client/cli/tx.go
index 64d7b97f..f195f0fa 100644
--- a/modules/core/02-client/client/cli/tx.go
+++ b/modules/core/02-client/client/cli/tx.go
@@ -13,7 +13,6 @@ import (
"github.com/cosmos/cosmos-sdk/client/tx"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
- "github.com/cosmos/cosmos-sdk/types/msgservice"
"github.com/cosmos/cosmos-sdk/version"
govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli"
govtypes "github.com/cosmos/cosmos-sdk/x/gov/types"
@@ -76,14 +75,7 @@ func NewCreateClientCmd() *cobra.Command {
return err
}
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.CreateClient(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
@@ -129,14 +121,7 @@ func NewUpdateClientCmd() *cobra.Command {
return err
}
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.UpdateClient(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
}
@@ -177,14 +162,7 @@ func NewSubmitMisbehaviourCmd() *cobra.Command {
return err
}
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.SubmitMisbehaviour(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
}
@@ -247,14 +225,7 @@ func NewUpgradeClientCmd() *cobra.Command {
return err
}
- svcMsgClientConn := &msgservice.ServiceMsgClientConn{}
- msgClient := types.NewMsgClient(svcMsgClientConn)
- _, err = msgClient.UpgradeClient(cmd.Context(), msg)
- if err != nil {
- return err
- }
-
- return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...)
+ return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}
diff --git a/testing/simapp/app.go b/testing/simapp/app.go
index 7eb60307..817996c4 100644
--- a/testing/simapp/app.go
+++ b/testing/simapp/app.go
@@ -55,9 +55,9 @@ import (
"github.com/cosmos/cosmos-sdk/x/evidence"
evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper"
evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
- feegrant "github.com/cosmos/cosmos-sdk/x/feegrant"
+ "github.com/cosmos/cosmos-sdk/x/feegrant"
feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper"
- feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant/types"
+ feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module"
"github.com/cosmos/cosmos-sdk/x/genutil"
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
"github.com/cosmos/cosmos-sdk/x/gov"
@@ -95,7 +95,7 @@ import (
authz "github.com/cosmos/cosmos-sdk/x/authz"
authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
- authztypes "github.com/cosmos/cosmos-sdk/x/authz/types"
+ authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module"
// unnamed import of statik for swagger UI support
_ "github.com/cosmos/cosmos-sdk/client/docs/statik"
@@ -126,12 +126,12 @@ var (
crisis.AppModuleBasic{},
slashing.AppModuleBasic{},
ibc.AppModuleBasic{},
- feegrant.AppModuleBasic{},
+ feegrantmodule.AppModuleBasic{},
upgrade.AppModuleBasic{},
evidence.AppModuleBasic{},
transfer.AppModuleBasic{},
ibcmock.AppModuleBasic{},
- authz.AppModuleBasic{},
+ authzmodule.AppModuleBasic{},
vesting.AppModuleBasic{},
)
@@ -229,9 +229,9 @@ func NewSimApp(
keys := sdk.NewKVStoreKeys(
authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey,
- govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegranttypes.StoreKey,
+ govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey,
evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey,
- authztypes.StoreKey,
+ authzkeeper.StoreKey,
)
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey)
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
@@ -285,7 +285,7 @@ func NewSimApp(
app.GetSubspace(crisistypes.ModuleName), invCheckPeriod, app.BankKeeper, authtypes.FeeCollectorName,
)
- app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, keys[feegranttypes.StoreKey], app.AccountKeeper)
+ app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, keys[feegrant.StoreKey], app.AccountKeeper)
app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp)
// register the staking hooks
@@ -299,7 +299,7 @@ func NewSimApp(
appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper,
)
- app.AuthzKeeper = authzkeeper.NewKeeper(keys[authztypes.StoreKey], appCodec, app.BaseApp.MsgServiceRouter())
+ app.AuthzKeeper = authzkeeper.NewKeeper(keys[authzkeeper.StoreKey], appCodec, app.BaseApp.MsgServiceRouter())
// register the proposal types
govRouter := govtypes.NewRouter()
@@ -356,7 +356,7 @@ func NewSimApp(
bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper),
capability.NewAppModule(appCodec, *app.CapabilityKeeper),
crisis.NewAppModule(&app.CrisisKeeper, skipGenesisInvariants),
- feegrant.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper),
mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper),
slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper),
@@ -366,7 +366,7 @@ func NewSimApp(
evidence.NewAppModule(app.EvidenceKeeper),
ibc.NewAppModule(app.IBCKeeper),
params.NewAppModule(app.ParamsKeeper),
- authz.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
transferModule,
mockModule,
)
@@ -389,8 +389,8 @@ func NewSimApp(
app.mm.SetOrderInitGenesis(
capabilitytypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName,
slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName,
- ibchost.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authztypes.ModuleName, ibctransfertypes.ModuleName,
- ibcmock.ModuleName, feegranttypes.ModuleName,
+ ibchost.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName,
+ ibcmock.ModuleName, feegrant.ModuleName,
)
app.mm.RegisterInvariants(&app.CrisisKeeper)
@@ -409,7 +409,7 @@ func NewSimApp(
auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts),
bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper),
capability.NewAppModule(appCodec, *app.CapabilityKeeper),
- feegrant.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
+ feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry),
gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper),
mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper),
staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper),
@@ -417,7 +417,7 @@ func NewSimApp(
slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper),
params.NewAppModule(app.ParamsKeeper),
evidence.NewAppModule(app.EvidenceKeeper),
- authz.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
+ authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry),
ibc.NewAppModule(app.IBCKeeper),
transferModule,
)
diff --git a/testing/simapp/sim_test.go b/testing/simapp/sim_test.go
index 6a297c11..f37dffad 100644
--- a/testing/simapp/sim_test.go
+++ b/testing/simapp/sim_test.go
@@ -18,7 +18,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
- authztypes "github.com/cosmos/cosmos-sdk/x/authz/types"
+ authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
@@ -178,7 +178,7 @@ func TestAppImportExport(t *testing.T) {
{app.keys[capabilitytypes.StoreKey], newApp.keys[capabilitytypes.StoreKey], [][]byte{}},
{app.keys[ibchost.StoreKey], newApp.keys[ibchost.StoreKey], [][]byte{}},
{app.keys[ibctransfertypes.StoreKey], newApp.keys[ibctransfertypes.StoreKey], [][]byte{}},
- {app.keys[authztypes.StoreKey], newApp.keys[authztypes.StoreKey], [][]byte{}},
+ {app.keys[authzkeeper.StoreKey], newApp.keys[authzkeeper.StoreKey], [][]byte{}},
}
for _, skp := range storeKeysPrefixes {
From 9c16e45bc94330d4913e1143c7871d52d05ac4dd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 12 May 2021 10:54:16 +0000
Subject: [PATCH 058/393] Bump actions/checkout from 2 to 2.3.4 (#173)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 2.3.4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v2...v2.3.4)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
.github/workflows/test.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 901f7a8a..bc79999c 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -37,7 +37,7 @@ jobs:
matrix:
go-arch: ["amd64", "arm", "arm64"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v2.3.4
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.15
@@ -54,7 +54,7 @@ jobs:
split-test-files:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v2.3.4
- name: Create a file with all the pkgs
run: go list ./... > pkgs.txt
- name: Split pkgs into 4 files
@@ -85,7 +85,7 @@ jobs:
matrix:
part: ["00", "01", "02", "03"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v2.3.4
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.15
@@ -112,7 +112,7 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v4.1.1
with:
PATTERNS: |
From 6dbf32f38ee39b5948bb99362871e72eb9faef50 Mon Sep 17 00:00:00 2001
From: Parth
Date: Wed, 12 May 2021 15:44:43 +0400
Subject: [PATCH 059/393] ADR 027: Add support for WASM based light client
(#163)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* add adr 027
* add to old readme
* Apply suggestions from code review
Co-authored-by: Aditya
* update README
Co-authored-by: Federico Kunze <31522760+fedekunze@users.noreply.github.com>
Co-authored-by: Aditya
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
docs/OLD_README.md | 1 +
docs/architecture/README.md | 2 +
docs/architecture/adr-027-ibc-wasm.md | 147 ++++++++++++++++++++++++++
3 files changed, 150 insertions(+)
create mode 100644 docs/architecture/adr-027-ibc-wasm.md
diff --git a/docs/OLD_README.md b/docs/OLD_README.md
index a699c10a..7a19ba34 100644
--- a/docs/OLD_README.md
+++ b/docs/OLD_README.md
@@ -68,6 +68,7 @@ consensus states in order to verify their membership in the counterparty clients
* [ADR 020 - Protocol Buffer Transaction Encoding](./../../docs/architecture/adr-020-protobuf-transaction-encoding.md): Client side migration to Protobuf.
* [ADR 021 - Protocol Buffer Query Encoding](../../../docs/architecture/adr-020-protobuf-query-encoding.md): Queries migration to Protobuf.
* [ADR 026 - IBC Client Recovery Mechanisms](../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md): Allows IBC Clients to be recovered after freezing or expiry.
+* [ADR 027 - IBC WASM Client](../../../docs/architecture/adr-027-ibc-wasm.md)
### SDK Modules
diff --git a/docs/architecture/README.md b/docs/architecture/README.md
index 0cc93bae..b38cab5e 100644
--- a/docs/architecture/README.md
+++ b/docs/architecture/README.md
@@ -31,4 +31,6 @@ To suggest an ADR, please make use of the [ADR template](./adr-template.md) prov
| [015](./adr-015-ibc-packet-receiver.md) | IBC Packet Routing | Accepted |
| [025](./adr-025-ibc-passive-channels.md) | IBC passive channels | Deprecated |
| [026](./adr-026-ibc-client-recovery-mechanisms.md) | IBC client recovery mechansisms | Accepted |
+| [027](./adr-027-ibc-wasm.md) | Wasm based light clients | Accepted |
+
diff --git a/docs/architecture/adr-027-ibc-wasm.md b/docs/architecture/adr-027-ibc-wasm.md
new file mode 100644
index 00000000..d105c985
--- /dev/null
+++ b/docs/architecture/adr-027-ibc-wasm.md
@@ -0,0 +1,147 @@
+# ADR 27: Add support for Wasm based light client
+
+## Changelog
+
+- 26/11/2020: Initial Draft
+
+## Status
+
+*Draft*
+
+## Abstract
+
+In the Cosmos SDK light clients are current hardcoded in Go. This makes upgrading existing IBC light clients or adding
+support for new light client a multi step process involving on-chain governance which is time-consuming.
+
+To remedy this, we are proposing a WASM VM to host light client bytecode, which allows easier upgrading of
+existing IBC light clients as well as adding support for new IBC light clients without requiring a code release and corresponding
+hard-fork event.
+
+## Context
+Currently in the SDK, light clients are defined as part of the codebase and are implemented as submodules under
+`ibc-go/core/modules/light-clients/`.
+
+Adding support for new light client or update an existing light client in the event of security
+issue or consensus update is multi-step process which is both time consuming and error prone:
+
+1. To add support for new light client or update an existing light client in the
+ event of security issue or consensus update, we need to modify the codebase and integrate it in numerous places.
+
+2. Governance voting: Adding new light client implementations require governance support and is expensive: This is
+ not ideal as chain governance is gatekeeper for new light client implementations getting added. If a small community
+ want support for light client X, they may not be able to convince governance to support it.
+
+3. Validator upgrade: After governance voting succeeds, validators need to upgrade their nodes in order to enable new
+ IBC light client implementation.
+
+Another problem stemming from the above process is that if a chain wants to upgrade its own consensus, it will need to convince every chain
+or hub connected to it to upgrade its light client in order to stay connected. Due to time consuming process required
+to upgrade light client, a chain with lots of connections needs to be disconnected for quite some time after upgrading
+its consensus, which can be very expensive in terms of time and effort.
+
+We are proposing simplifying this workflow by integrating a WASM light client module which makes adding support for
+a new light client a simple transaction. The light client bytecode, written in Wasm-compilable Rust, runs inside a WASM
+VM. The Wasm light client submodule exposes a proxy light client interface that routes incoming messages to the
+appropriate handler function, inside the Wasm VM for execution.
+
+With WASM light client module, anybody can add new IBC light client in the form of WASM bytecode (provided they are able to pay the requisite gas fee for the transaction)
+as well as instantiate clients using any created client type. This allows any chain to update its own light client in other chains
+without going through steps outlined above.
+
+
+## Decision
+
+We decided to use WASM light client module as a light client proxy which will interface with the actual light client
+uploaded as WASM bytecode. This will require changing client selection method to allow any client if the client type
+has prefix of `wasm/`.
+
+```go
+// IsAllowedClient checks if the given client type is registered on the allowlist.
+func (p Params) IsAllowedClient(clientType string) bool {
+ if p.AreWASMClientsAllowed && isWASMClient(clientType) {
+ return true
+ }
+
+ for _, allowedClient := range p.AllowedClients {
+ if allowedClient == clientType {
+ return true
+ }
+ }
+
+ return false
+}
+```
+
+To upload new light client, user need to create a transaction with Wasm byte code which will be
+processed by IBC Wasm module.
+
+```go
+func (k Keeper) UploadLightClient (wasmCode: []byte, description: String) {
+ wasmRegistry = getWASMRegistry()
+ id := hex.EncodeToString(sha256.Sum256(wasmCode))
+ assert(!wasmRegistry.Exists(id))
+ assert(wasmRegistry.ValidateAndStoreCode(id, description, wasmCode, false))
+}
+```
+
+As name implies, Wasm registry is a registry which stores set of Wasm client code indexed by its hash and allows
+client code to retrieve latest code uploaded.
+
+`ValidateAndStoreCode` checks if the wasm bytecode uploaded is valid and confirms to VM interface.
+
+### How light client proxy works?
+
+The light client proxy behind the scenes will call a cosmwasm smart contract instance with incoming arguments in json
+serialized format with appropriate environment information. Data returned by the smart contract is deserialized and
+returned to the caller.
+
+Consider an example of `CheckProposedHeaderAndUpdateState` function of `ClientState` interface. Incoming arguments are
+packaged inside a payload which is json serialized and passed to `callContract` which calls `vm.Execute` and returns the
+array of bytes returned by the smart contract. This data is deserialized and passed as return argument.
+
+```go
+func (c *ClientState) CheckProposedHeaderAndUpdateState(context sdk.Context, marshaler codec.BinaryMarshaler, store sdk.KVStore, header exported.Header) (exported.ClientState, exported.ConsensusState, error) {
+ // get consensus state corresponding to client state to check if the client is expired
+ consensusState, err := GetConsensusState(store, marshaler, c.LatestHeight)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrapf(
+ err, "could not get consensus state from clientstore at height: %d", c.LatestHeight,
+ )
+ }
+
+ payload := make(map[string]map[string]interface{})
+ payload[CheckProposedHeaderAndUpdateState] = make(map[string]interface{})
+ inner := payload[CheckProposedHeaderAndUpdateState]
+ inner["me"] = c
+ inner["header"] = header
+ inner["consensus_state"] = consensusState
+
+ encodedData, err := json.Marshal(payload)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrapf(ErrUnableToMarshalPayload, fmt.Sprintf("underlying error: %s", err.Error()))
+ }
+ out, err := callContract(c.CodeId, context, store, encodedData)
+ if err != nil {
+ return nil, nil, sdkerrors.Wrapf(ErrUnableToCall, fmt.Sprintf("underlying error: %s", err.Error()))
+ }
+ output := clientStateCallResponse{}
+ if err := json.Unmarshal(out.Data, &output); err != nil {
+ return nil, nil, sdkerrors.Wrapf(ErrUnableToUnmarshalPayload, fmt.Sprintf("underlying error: %s", err.Error()))
+ }
+ if !output.Result.IsValid {
+ return nil, nil, fmt.Errorf("%s error ocurred while updating client state", output.Result.ErrorMsg)
+ }
+ output.resetImmutables(c)
+ return output.NewClientState, output.NewConsensusState, nil
+}
+```
+
+## Consequences
+
+### Positive
+- Adding support for new light client or upgrading existing light client is way easier than before and only requires single transaction.
+- Improves maintainability of Cosmos SDK, since no change in codebase is required to support new client or upgrade it.
+
+### Negative
+- Light clients need to be written in subset of rust which could compile in Wasm.
+- Introspecting light client code is difficult as only compiled bytecode exists in the blockchain.
From f4123347754d814759c9208273fe6772880063cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 12 May 2021 18:18:30 +0200
Subject: [PATCH 060/393] change FrozenSequence to boolean in solo machine
(#169)
* change frozen sequence to is frozen in solo machine
* add changelog
* solo machine protobuf v1 -> v2
* update docs
---
CHANGELOG.md | 1 +
docs/ibc/proto-docs.md | 86 +++---
docs/migrations/ibc-migration-043.md | 4 +-
.../06-solomachine/types/client_state.go | 13 +-
.../06-solomachine/types/client_state_test.go | 98 +-----
.../types/misbehaviour_handle.go | 6 +-
.../types/misbehaviour_handle_test.go | 12 +-
.../06-solomachine/types/proposal_handle.go | 2 +-
.../types/proposal_handle_test.go | 2 +-
.../06-solomachine/types/solomachine.pb.go | 278 +++++++++---------
.../06-solomachine/types/update_test.go | 2 +-
.../solomachine/{v1 => v2}/solomachine.proto | 4 +-
testing/sdk_test.go | 6 +-
13 files changed, 202 insertions(+), 312 deletions(-)
rename proto/ibc/lightclients/solomachine/{v1 => v2}/solomachine.proto (98%)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ff34fa8d..8119e101 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -45,6 +45,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (06-solomachine) [\#169](https://github.com/cosmos/ibc-go/pull/169) Change FrozenSequence to boolean in solomachine ClientState. The solo machine proto package has been bumped from `v1` to `v2`.
* (module/core/02-client) [\#165](https://github.com/cosmos/ibc-go/pull/165) Remove GetFrozenHeight from the ClientState interface.
* (modules) [\#166](https://github.com/cosmos/ibc-go/pull/166) Remove GetHeight from the misbehaviour interface. The `consensus_height` attribute has been removed from Misbehaviour events.
* (modules) [\#162](https://github.com/cosmos/ibc-go/pull/162) Remove deprecated Handler types in core IBC and the ICS 20 transfer module.
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index ecacd9f4..67c1741e 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -194,25 +194,25 @@
- [ibc/lightclients/localhost/v1/localhost.proto](#ibc/lightclients/localhost/v1/localhost.proto)
- [ClientState](#ibc.lightclients.localhost.v1.ClientState)
-- [ibc/lightclients/solomachine/v1/solomachine.proto](#ibc/lightclients/solomachine/v1/solomachine.proto)
- - [ChannelStateData](#ibc.lightclients.solomachine.v1.ChannelStateData)
- - [ClientState](#ibc.lightclients.solomachine.v1.ClientState)
- - [ClientStateData](#ibc.lightclients.solomachine.v1.ClientStateData)
- - [ConnectionStateData](#ibc.lightclients.solomachine.v1.ConnectionStateData)
- - [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState)
- - [ConsensusStateData](#ibc.lightclients.solomachine.v1.ConsensusStateData)
- - [Header](#ibc.lightclients.solomachine.v1.Header)
- - [HeaderData](#ibc.lightclients.solomachine.v1.HeaderData)
- - [Misbehaviour](#ibc.lightclients.solomachine.v1.Misbehaviour)
- - [NextSequenceRecvData](#ibc.lightclients.solomachine.v1.NextSequenceRecvData)
- - [PacketAcknowledgementData](#ibc.lightclients.solomachine.v1.PacketAcknowledgementData)
- - [PacketCommitmentData](#ibc.lightclients.solomachine.v1.PacketCommitmentData)
- - [PacketReceiptAbsenceData](#ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData)
- - [SignBytes](#ibc.lightclients.solomachine.v1.SignBytes)
- - [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData)
- - [TimestampedSignatureData](#ibc.lightclients.solomachine.v1.TimestampedSignatureData)
+- [ibc/lightclients/solomachine/v2/solomachine.proto](#ibc/lightclients/solomachine/v2/solomachine.proto)
+ - [ChannelStateData](#ibc.lightclients.solomachine.v2.ChannelStateData)
+ - [ClientState](#ibc.lightclients.solomachine.v2.ClientState)
+ - [ClientStateData](#ibc.lightclients.solomachine.v2.ClientStateData)
+ - [ConnectionStateData](#ibc.lightclients.solomachine.v2.ConnectionStateData)
+ - [ConsensusState](#ibc.lightclients.solomachine.v2.ConsensusState)
+ - [ConsensusStateData](#ibc.lightclients.solomachine.v2.ConsensusStateData)
+ - [Header](#ibc.lightclients.solomachine.v2.Header)
+ - [HeaderData](#ibc.lightclients.solomachine.v2.HeaderData)
+ - [Misbehaviour](#ibc.lightclients.solomachine.v2.Misbehaviour)
+ - [NextSequenceRecvData](#ibc.lightclients.solomachine.v2.NextSequenceRecvData)
+ - [PacketAcknowledgementData](#ibc.lightclients.solomachine.v2.PacketAcknowledgementData)
+ - [PacketCommitmentData](#ibc.lightclients.solomachine.v2.PacketCommitmentData)
+ - [PacketReceiptAbsenceData](#ibc.lightclients.solomachine.v2.PacketReceiptAbsenceData)
+ - [SignBytes](#ibc.lightclients.solomachine.v2.SignBytes)
+ - [SignatureAndData](#ibc.lightclients.solomachine.v2.SignatureAndData)
+ - [TimestampedSignatureData](#ibc.lightclients.solomachine.v2.TimestampedSignatureData)
- - [DataType](#ibc.lightclients.solomachine.v1.DataType)
+ - [DataType](#ibc.lightclients.solomachine.v2.DataType)
- [ibc/lightclients/tendermint/v1/tendermint.proto](#ibc/lightclients/tendermint/v1/tendermint.proto)
- [ClientState](#ibc.lightclients.tendermint.v1.ClientState)
@@ -2905,14 +2905,14 @@ access to keys outside the client prefix.
-
+
Top
-## ibc/lightclients/solomachine/v1/solomachine.proto
+## ibc/lightclients/solomachine/v2/solomachine.proto
-
+
### ChannelStateData
ChannelStateData returns the SignBytes data for channel state
@@ -2929,7 +2929,7 @@ verification.
-
+
### ClientState
ClientState defines a solo machine client that tracks the current consensus
@@ -2939,8 +2939,8 @@ state and if the client is frozen.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| `sequence` | [uint64](#uint64) | | latest sequence of the client state |
-| `frozen_sequence` | [uint64](#uint64) | | frozen sequence of the solo machine |
-| `consensus_state` | [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) | | |
+| `is_frozen` | [bool](#bool) | | frozen sequence of the solo machine |
+| `consensus_state` | [ConsensusState](#ibc.lightclients.solomachine.v2.ConsensusState) | | |
| `allow_update_after_proposal` | [bool](#bool) | | when set to true, will allow governance to update a solo machine client. The client will be unfrozen if it is frozen. |
@@ -2948,7 +2948,7 @@ state and if the client is frozen.
-
+
### ClientStateData
ClientStateData returns the SignBytes data for client state verification.
@@ -2964,7 +2964,7 @@ ClientStateData returns the SignBytes data for client state verification.
-
+
### ConnectionStateData
ConnectionStateData returns the SignBytes data for connection state
@@ -2981,7 +2981,7 @@ verification.
-
+
### ConsensusState
ConsensusState defines a solo machine consensus state. The sequence of a
@@ -3000,7 +3000,7 @@ consensus state.
-
+
### ConsensusStateData
ConsensusStateData returns the SignBytes data for consensus state
@@ -3017,7 +3017,7 @@ verification.
-
+
### Header
Header defines a solo machine consensus header
@@ -3036,7 +3036,7 @@ Header defines a solo machine consensus header
-
+
### HeaderData
HeaderData returns the SignBytes data for update verification.
@@ -3052,7 +3052,7 @@ HeaderData returns the SignBytes data for update verification.
-
+
### Misbehaviour
Misbehaviour defines misbehaviour for a solo machine which consists
@@ -3063,15 +3063,15 @@ of a sequence and two signatures over different messages at that sequence.
| ----- | ---- | ----- | ----------- |
| `client_id` | [string](#string) | | |
| `sequence` | [uint64](#uint64) | | |
-| `signature_one` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | |
-| `signature_two` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | |
+| `signature_one` | [SignatureAndData](#ibc.lightclients.solomachine.v2.SignatureAndData) | | |
+| `signature_two` | [SignatureAndData](#ibc.lightclients.solomachine.v2.SignatureAndData) | | |
-
+
### NextSequenceRecvData
NextSequenceRecvData returns the SignBytes data for verification of the next
@@ -3088,7 +3088,7 @@ sequence to be received.
-
+
### PacketAcknowledgementData
PacketAcknowledgementData returns the SignBytes data for acknowledgement
@@ -3105,7 +3105,7 @@ verification.
-
+
### PacketCommitmentData
PacketCommitmentData returns the SignBytes data for packet commitment
@@ -3122,7 +3122,7 @@ verification.
-
+
### PacketReceiptAbsenceData
PacketReceiptAbsenceData returns the SignBytes data for
@@ -3138,7 +3138,7 @@ packet receipt absence verification.
-
+
### SignBytes
SignBytes defines the signed bytes used for signature verification.
@@ -3149,7 +3149,7 @@ SignBytes defines the signed bytes used for signature verification.
| `sequence` | [uint64](#uint64) | | |
| `timestamp` | [uint64](#uint64) | | |
| `diversifier` | [string](#string) | | |
-| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | type of the data used |
+| `data_type` | [DataType](#ibc.lightclients.solomachine.v2.DataType) | | type of the data used |
| `data` | [bytes](#bytes) | | marshaled data |
@@ -3157,7 +3157,7 @@ SignBytes defines the signed bytes used for signature verification.
-
+
### SignatureAndData
SignatureAndData contains a signature and the data signed over to create that
@@ -3167,7 +3167,7 @@ signature.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| `signature` | [bytes](#bytes) | | |
-| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | |
+| `data_type` | [DataType](#ibc.lightclients.solomachine.v2.DataType) | | |
| `data` | [bytes](#bytes) | | |
| `timestamp` | [uint64](#uint64) | | |
@@ -3176,7 +3176,7 @@ signature.
-
+
### TimestampedSignatureData
TimestampedSignatureData contains the signature data and the timestamp of the
@@ -3195,7 +3195,7 @@ signature.
-
+
### DataType
DataType defines the type of solo machine proof being created. This is done
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 7ac0c292..194cca4c 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -94,7 +94,9 @@ REST routes are not supported for these proposals.
## Proto file changes
-The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1`, this has been updated to `v1`.
+The gRPC querier service endpoints have changed slightly. The previous files used `v1beta1` gRPC route, this has been updated to `v1`.
+
+The solo machine has replaced the FrozenSequence uint64 field with a IsFrozen boolean field. The package has been bumped from `v1` to `v2`
## IBC callback changes
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index fc6b69ff..dd07273e 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -22,7 +22,7 @@ var _ exported.ClientState = (*ClientState)(nil)
func NewClientState(latestSequence uint64, consensusState *ConsensusState, allowUpdateAfterProposal bool) *ClientState {
return &ClientState{
Sequence: latestSequence,
- FrozenSequence: 0,
+ IsFrozen: false,
ConsensusState: consensusState,
AllowUpdateAfterProposal: allowUpdateAfterProposal,
}
@@ -45,18 +45,13 @@ func (cs ClientState) GetLatestHeight() exported.Height {
// - Active: if frozen sequence is 0
// - Frozen: otherwise solo machine is frozen
func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec) exported.Status {
- if cs.FrozenSequence != 0 {
+ if cs.IsFrozen {
return exported.Frozen
}
return exported.Active
}
-// IsFrozen returns true if the client is frozen.
-func (cs ClientState) IsFrozen() bool {
- return cs.FrozenSequence != 0
-}
-
// GetProofSpecs returns nil proof specs since client state verification uses signatures.
func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec {
return nil
@@ -438,10 +433,6 @@ func produceVerificationArgs(
}
// sequence is encoded in the revision height of height struct
sequence := height.GetRevisionHeight()
- if cs.IsFrozen() {
- return nil, nil, 0, 0, clienttypes.ErrClientFrozen
- }
-
if prefix == nil {
return nil, nil, 0, 0, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty")
}
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index c2907c0d..82b9ac57 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -32,7 +32,7 @@ func (suite *SoloMachineTestSuite) TestStatus() {
suite.Require().Equal(exported.Active, status)
// freeze solo machine
- clientState.FrozenSequence = 1
+ clientState.IsFrozen = true
status = clientState.Status(suite.chainA.GetContext(), nil, nil)
suite.Require().Equal(exported.Frozen, status)
}
@@ -188,18 +188,6 @@ func (suite *SoloMachineTestSuite) TestVerifyClientState() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"consensus state in client state is nil",
types.NewClientState(1, nil, false),
@@ -321,18 +309,6 @@ func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"consensus state in client state is nil",
types.NewClientState(1, nil, false),
@@ -450,18 +426,6 @@ func (suite *SoloMachineTestSuite) TestVerifyConnectionState() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"proof is nil",
solomachine.ClientState(),
@@ -540,18 +504,6 @@ func (suite *SoloMachineTestSuite) TestVerifyChannelState() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"proof is nil",
solomachine.ClientState(),
@@ -629,18 +581,6 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketCommitment() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"proof is nil",
solomachine.ClientState(),
@@ -716,18 +656,6 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgement() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"proof is nil",
solomachine.ClientState(),
@@ -803,18 +731,6 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketReceiptAbsence() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"proof is nil",
solomachine.ClientState(),
@@ -890,18 +806,6 @@ func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() {
proof,
false,
},
- {
- "client is frozen",
- &types.ClientState{
- Sequence: 1,
- FrozenSequence: 1,
- ConsensusState: solomachine.ConsensusState(),
- AllowUpdateAfterProposal: false,
- },
- prefix,
- proof,
- false,
- },
{
"proof is nil",
solomachine.ClientState(),
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour_handle.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
index b50c3883..dd0188d3 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle.go
@@ -29,10 +29,6 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
)
}
- if cs.IsFrozen() {
- return nil, sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "client is already frozen")
- }
-
// NOTE: a check that the misbehaviour message data are not equal is done by
// misbehaviour.ValidateBasic which is called by the 02-client keeper.
@@ -46,7 +42,7 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
return nil, sdkerrors.Wrap(err, "failed to verify signature two")
}
- cs.FrozenSequence = soloMisbehaviour.Sequence
+ cs.IsFrozen = true
return &cs, nil
}
diff --git a/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
index cb13f4a7..b5945656 100644
--- a/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/misbehaviour_handle_test.go
@@ -37,16 +37,6 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
misbehaviour = solomachine.CreateMisbehaviour()
}, true,
},
- {
- "client is frozen",
- func() {
- cs := solomachine.ClientState()
- cs.FrozenSequence = 1
- clientState = cs
- misbehaviour = solomachine.CreateMisbehaviour()
- },
- false,
- },
{
"wrong client state type",
func() {
@@ -264,7 +254,7 @@ func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() {
if tc.expPass {
suite.Require().NoError(err)
- suite.Require().True(clientState.(*types.ClientState).FrozenSequence != 0, "client not frozen")
+ suite.Require().True(clientState.(*types.ClientState).IsFrozen, "client not frozen")
} else {
suite.Require().Error(err)
suite.Require().Nil(clientState)
diff --git a/modules/light-clients/06-solomachine/types/proposal_handle.go b/modules/light-clients/06-solomachine/types/proposal_handle.go
index 342ab2e6..a4a89006 100644
--- a/modules/light-clients/06-solomachine/types/proposal_handle.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle.go
@@ -58,7 +58,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
// update to substitute parameters
clientState.Sequence = substituteClientState.Sequence
clientState.ConsensusState = substituteClientState.ConsensusState
- clientState.FrozenSequence = 0
+ clientState.IsFrozen = false
return clientState, nil
}
diff --git a/modules/light-clients/06-solomachine/types/proposal_handle_test.go b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
index db99bbe2..bc8e69ac 100644
--- a/modules/light-clients/06-solomachine/types/proposal_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
@@ -77,7 +77,7 @@ func (suite *SoloMachineTestSuite) TestCheckSubstituteAndUpdateState() {
suite.Require().Equal(substituteClientState.(*types.ClientState).ConsensusState, updatedClient.(*types.ClientState).ConsensusState)
suite.Require().Equal(substituteClientState.(*types.ClientState).Sequence, updatedClient.(*types.ClientState).Sequence)
- suite.Require().Equal(uint64(0), updatedClient.(*types.ClientState).FrozenSequence)
+ suite.Require().Equal(false, updatedClient.(*types.ClientState).IsFrozen)
} else {
suite.Require().Error(err)
suite.Require().Nil(updatedClient)
diff --git a/modules/light-clients/06-solomachine/types/solomachine.pb.go b/modules/light-clients/06-solomachine/types/solomachine.pb.go
index 1bdca7b8..aa050302 100644
--- a/modules/light-clients/06-solomachine/types/solomachine.pb.go
+++ b/modules/light-clients/06-solomachine/types/solomachine.pb.go
@@ -1,5 +1,5 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: ibc/lightclients/solomachine/v1/solomachine.proto
+// source: ibc/lightclients/solomachine/v2/solomachine.proto
package types
@@ -84,7 +84,7 @@ func (x DataType) String() string {
}
func (DataType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{0}
+ return fileDescriptor_141333b361aae010, []int{0}
}
// ClientState defines a solo machine client that tracks the current consensus
@@ -93,7 +93,7 @@ type ClientState struct {
// latest sequence of the client state
Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
// frozen sequence of the solo machine
- FrozenSequence uint64 `protobuf:"varint,2,opt,name=frozen_sequence,json=frozenSequence,proto3" json:"frozen_sequence,omitempty" yaml:"frozen_sequence"`
+ IsFrozen bool `protobuf:"varint,2,opt,name=is_frozen,json=isFrozen,proto3" json:"is_frozen,omitempty" yaml:"is_frozen"`
ConsensusState *ConsensusState `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
// when set to true, will allow governance to update a solo machine client.
// The client will be unfrozen if it is frozen.
@@ -104,7 +104,7 @@ func (m *ClientState) Reset() { *m = ClientState{} }
func (m *ClientState) String() string { return proto.CompactTextString(m) }
func (*ClientState) ProtoMessage() {}
func (*ClientState) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{0}
+ return fileDescriptor_141333b361aae010, []int{0}
}
func (m *ClientState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -150,7 +150,7 @@ func (m *ConsensusState) Reset() { *m = ConsensusState{} }
func (m *ConsensusState) String() string { return proto.CompactTextString(m) }
func (*ConsensusState) ProtoMessage() {}
func (*ConsensusState) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{1}
+ return fileDescriptor_141333b361aae010, []int{1}
}
func (m *ConsensusState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -193,7 +193,7 @@ func (m *Header) Reset() { *m = Header{} }
func (m *Header) String() string { return proto.CompactTextString(m) }
func (*Header) ProtoMessage() {}
func (*Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{2}
+ return fileDescriptor_141333b361aae010, []int{2}
}
func (m *Header) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -235,7 +235,7 @@ func (m *Misbehaviour) Reset() { *m = Misbehaviour{} }
func (m *Misbehaviour) String() string { return proto.CompactTextString(m) }
func (*Misbehaviour) ProtoMessage() {}
func (*Misbehaviour) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{3}
+ return fileDescriptor_141333b361aae010, []int{3}
}
func (m *Misbehaviour) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -268,7 +268,7 @@ var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo
// signature.
type SignatureAndData struct {
Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"`
- DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v2.DataType" json:"data_type,omitempty" yaml:"data_type"`
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
@@ -277,7 +277,7 @@ func (m *SignatureAndData) Reset() { *m = SignatureAndData{} }
func (m *SignatureAndData) String() string { return proto.CompactTextString(m) }
func (*SignatureAndData) ProtoMessage() {}
func (*SignatureAndData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{4}
+ return fileDescriptor_141333b361aae010, []int{4}
}
func (m *SignatureAndData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -317,7 +317,7 @@ func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureDa
func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) }
func (*TimestampedSignatureData) ProtoMessage() {}
func (*TimestampedSignatureData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{5}
+ return fileDescriptor_141333b361aae010, []int{5}
}
func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -352,7 +352,7 @@ type SignBytes struct {
Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"`
// type of the data used
- DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v2.DataType" json:"data_type,omitempty" yaml:"data_type"`
// marshaled data
Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"`
}
@@ -361,7 +361,7 @@ func (m *SignBytes) Reset() { *m = SignBytes{} }
func (m *SignBytes) String() string { return proto.CompactTextString(m) }
func (*SignBytes) ProtoMessage() {}
func (*SignBytes) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{6}
+ return fileDescriptor_141333b361aae010, []int{6}
}
func (m *SignBytes) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -402,7 +402,7 @@ func (m *HeaderData) Reset() { *m = HeaderData{} }
func (m *HeaderData) String() string { return proto.CompactTextString(m) }
func (*HeaderData) ProtoMessage() {}
func (*HeaderData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{7}
+ return fileDescriptor_141333b361aae010, []int{7}
}
func (m *HeaderData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -441,7 +441,7 @@ func (m *ClientStateData) Reset() { *m = ClientStateData{} }
func (m *ClientStateData) String() string { return proto.CompactTextString(m) }
func (*ClientStateData) ProtoMessage() {}
func (*ClientStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{8}
+ return fileDescriptor_141333b361aae010, []int{8}
}
func (m *ClientStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -481,7 +481,7 @@ func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} }
func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) }
func (*ConsensusStateData) ProtoMessage() {}
func (*ConsensusStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{9}
+ return fileDescriptor_141333b361aae010, []int{9}
}
func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -521,7 +521,7 @@ func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} }
func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) }
func (*ConnectionStateData) ProtoMessage() {}
func (*ConnectionStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{10}
+ return fileDescriptor_141333b361aae010, []int{10}
}
func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -561,7 +561,7 @@ func (m *ChannelStateData) Reset() { *m = ChannelStateData{} }
func (m *ChannelStateData) String() string { return proto.CompactTextString(m) }
func (*ChannelStateData) ProtoMessage() {}
func (*ChannelStateData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{11}
+ return fileDescriptor_141333b361aae010, []int{11}
}
func (m *ChannelStateData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -601,7 +601,7 @@ func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} }
func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) }
func (*PacketCommitmentData) ProtoMessage() {}
func (*PacketCommitmentData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{12}
+ return fileDescriptor_141333b361aae010, []int{12}
}
func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -655,7 +655,7 @@ func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgement
func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) }
func (*PacketAcknowledgementData) ProtoMessage() {}
func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{13}
+ return fileDescriptor_141333b361aae010, []int{13}
}
func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -708,7 +708,7 @@ func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceDa
func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) }
func (*PacketReceiptAbsenceData) ProtoMessage() {}
func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{14}
+ return fileDescriptor_141333b361aae010, []int{14}
}
func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -755,7 +755,7 @@ func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} }
func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) }
func (*NextSequenceRecvData) ProtoMessage() {}
func (*NextSequenceRecvData) Descriptor() ([]byte, []int) {
- return fileDescriptor_6cc2ee18f7f86d4e, []int{15}
+ return fileDescriptor_141333b361aae010, []int{15}
}
func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -799,117 +799,117 @@ func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 {
}
func init() {
- proto.RegisterEnum("ibc.lightclients.solomachine.v1.DataType", DataType_name, DataType_value)
- proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v1.ClientState")
- proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v1.ConsensusState")
- proto.RegisterType((*Header)(nil), "ibc.lightclients.solomachine.v1.Header")
- proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v1.Misbehaviour")
- proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v1.SignatureAndData")
- proto.RegisterType((*TimestampedSignatureData)(nil), "ibc.lightclients.solomachine.v1.TimestampedSignatureData")
- proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v1.SignBytes")
- proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v1.HeaderData")
- proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v1.ClientStateData")
- proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v1.ConsensusStateData")
- proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v1.ConnectionStateData")
- proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v1.ChannelStateData")
- proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v1.PacketCommitmentData")
- proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementData")
- proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData")
- proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v1.NextSequenceRecvData")
+ proto.RegisterEnum("ibc.lightclients.solomachine.v2.DataType", DataType_name, DataType_value)
+ proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v2.ClientState")
+ proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v2.ConsensusState")
+ proto.RegisterType((*Header)(nil), "ibc.lightclients.solomachine.v2.Header")
+ proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v2.Misbehaviour")
+ proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v2.SignatureAndData")
+ proto.RegisterType((*TimestampedSignatureData)(nil), "ibc.lightclients.solomachine.v2.TimestampedSignatureData")
+ proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v2.SignBytes")
+ proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v2.HeaderData")
+ proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v2.ClientStateData")
+ proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v2.ConsensusStateData")
+ proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v2.ConnectionStateData")
+ proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v2.ChannelStateData")
+ proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v2.PacketCommitmentData")
+ proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v2.PacketAcknowledgementData")
+ proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibc.lightclients.solomachine.v2.PacketReceiptAbsenceData")
+ proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v2.NextSequenceRecvData")
}
func init() {
- proto.RegisterFile("ibc/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_6cc2ee18f7f86d4e)
-}
-
-var fileDescriptor_6cc2ee18f7f86d4e = []byte{
- // 1364 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdf, 0x8e, 0xdb, 0x54,
- 0x13, 0x5f, 0xa7, 0xe9, 0x76, 0x33, 0xd9, 0x66, 0xf3, 0xb9, 0x69, 0x9b, 0x75, 0xab, 0xc4, 0x9f,
- 0x11, 0x65, 0x41, 0x34, 0x61, 0x8b, 0xa8, 0x50, 0x85, 0x00, 0xc7, 0x31, 0x34, 0xed, 0xae, 0x37,
- 0x38, 0x5e, 0xa0, 0x15, 0xc8, 0x72, 0x9c, 0xb3, 0x89, 0xd5, 0xc4, 0x27, 0xc4, 0x4e, 0xd2, 0x20,
- 0x21, 0x21, 0xae, 0x4a, 0xc4, 0x05, 0x2f, 0x10, 0x09, 0x81, 0x78, 0x15, 0xe0, 0xb2, 0xdc, 0x71,
- 0x15, 0x50, 0xfb, 0x06, 0x79, 0x02, 0x64, 0x9f, 0x93, 0xd8, 0xce, 0xee, 0x66, 0xc5, 0xbf, 0xbb,
- 0x73, 0x66, 0x7e, 0xf3, 0x9b, 0x39, 0x33, 0xe3, 0x39, 0xc7, 0xb0, 0x6b, 0xd5, 0xcd, 0x62, 0xdb,
- 0x6a, 0xb6, 0x5c, 0xb3, 0x6d, 0x21, 0xdb, 0x75, 0x8a, 0x0e, 0x6e, 0xe3, 0x8e, 0x61, 0xb6, 0x2c,
- 0x1b, 0x15, 0x07, 0xbb, 0xe1, 0x6d, 0xa1, 0xdb, 0xc3, 0x2e, 0x66, 0xf3, 0x56, 0xdd, 0x2c, 0x84,
- 0x4d, 0x0a, 0x61, 0xcc, 0x60, 0x97, 0x7b, 0xc9, 0xe3, 0x34, 0x71, 0x0f, 0x15, 0x4d, 0x6c, 0xdb,
- 0xc8, 0x74, 0x2d, 0x6c, 0x7b, 0x54, 0xc1, 0x8e, 0x30, 0x71, 0xff, 0x0f, 0x80, 0x2d, 0xc3, 0xb6,
- 0x51, 0xdb, 0x47, 0x91, 0x25, 0x85, 0x64, 0x9a, 0xb8, 0x89, 0xfd, 0x65, 0xd1, 0x5b, 0x51, 0xe9,
- 0x76, 0x13, 0xe3, 0x66, 0x1b, 0x15, 0xfd, 0x5d, 0xbd, 0x7f, 0x54, 0x34, 0xec, 0x11, 0x51, 0x09,
- 0xbf, 0xc6, 0x20, 0x29, 0xf9, 0x71, 0xd5, 0x5c, 0xc3, 0x45, 0x2c, 0x07, 0x1b, 0x0e, 0xfa, 0xac,
- 0x8f, 0x6c, 0x13, 0x65, 0x19, 0x9e, 0xd9, 0x89, 0xab, 0x8b, 0x3d, 0x2b, 0xc1, 0xd6, 0x51, 0x0f,
- 0x7f, 0x8e, 0x6c, 0x7d, 0x01, 0x89, 0x79, 0x90, 0x12, 0x37, 0x9b, 0xe6, 0xaf, 0x8c, 0x8c, 0x4e,
- 0xfb, 0x8e, 0xb0, 0x04, 0x10, 0xd4, 0x14, 0x91, 0xd4, 0xe6, 0x24, 0x2e, 0x6c, 0x99, 0xd8, 0x76,
- 0x90, 0xed, 0xf4, 0x1d, 0xdd, 0xf1, 0x7c, 0x66, 0xcf, 0xf1, 0xcc, 0x4e, 0xf2, 0x56, 0xb1, 0x70,
- 0x46, 0xa2, 0x0a, 0xd2, 0xdc, 0xce, 0x0f, 0x35, 0xec, 0x75, 0x89, 0x51, 0x50, 0x53, 0x66, 0x04,
- 0xcb, 0x22, 0xb8, 0x66, 0xb4, 0xdb, 0x78, 0xa8, 0xf7, 0xbb, 0x0d, 0xc3, 0x45, 0xba, 0x71, 0xe4,
- 0xa2, 0x9e, 0xde, 0xed, 0xe1, 0x2e, 0x76, 0x8c, 0x76, 0x36, 0xce, 0x33, 0x3b, 0x1b, 0xa5, 0x1b,
- 0xb3, 0x69, 0x5e, 0x20, 0x84, 0x2b, 0xc0, 0x82, 0x9a, 0xf5, 0xb5, 0x87, 0xbe, 0x52, 0xf4, 0x74,
- 0x55, 0xaa, 0xba, 0x13, 0x7f, 0xf2, 0x5d, 0x7e, 0x4d, 0xf8, 0x9e, 0x81, 0x54, 0x34, 0x56, 0xf6,
- 0x1e, 0x40, 0xb7, 0x5f, 0x6f, 0x5b, 0xa6, 0xfe, 0x08, 0x8d, 0xfc, 0xc4, 0x26, 0x6f, 0x65, 0x0a,
- 0xa4, 0x2c, 0x85, 0x79, 0x59, 0x0a, 0xa2, 0x3d, 0x2a, 0x5d, 0x9e, 0x4d, 0xf3, 0xff, 0x23, 0x41,
- 0x04, 0x16, 0x82, 0x9a, 0x20, 0x9b, 0xfb, 0x68, 0xc4, 0xf2, 0x90, 0x6c, 0x58, 0x03, 0xd4, 0x73,
- 0xac, 0x23, 0x0b, 0xf5, 0xfc, 0x12, 0x24, 0xd4, 0xb0, 0x88, 0xbd, 0x0e, 0x09, 0xd7, 0xea, 0x20,
- 0xc7, 0x35, 0x3a, 0x5d, 0x3f, 0xbb, 0x71, 0x35, 0x10, 0xd0, 0x20, 0xbf, 0x8a, 0xc1, 0xfa, 0x5d,
- 0x64, 0x34, 0x50, 0x6f, 0x65, 0xcd, 0x23, 0x54, 0xb1, 0x25, 0x2a, 0x4f, 0xeb, 0x58, 0x4d, 0xdb,
- 0x70, 0xfb, 0x3d, 0x52, 0xc6, 0x4d, 0x35, 0x10, 0xb0, 0x87, 0x90, 0xb2, 0xd1, 0x50, 0x0f, 0x1d,
- 0x3c, 0xbe, 0xe2, 0xe0, 0xdb, 0xb3, 0x69, 0xfe, 0x32, 0x39, 0x78, 0xd4, 0x4a, 0x50, 0x37, 0x6d,
- 0x34, 0xac, 0x2e, 0xce, 0x2f, 0xc1, 0x96, 0x07, 0x08, 0xe7, 0xe0, 0xbc, 0x97, 0x83, 0x70, 0x43,
- 0x2c, 0x01, 0x04, 0xd5, 0x8b, 0xa4, 0x1c, 0x08, 0x68, 0x12, 0x7e, 0x8e, 0xc1, 0xe6, 0xbe, 0xe5,
- 0xd4, 0x51, 0xcb, 0x18, 0x58, 0xb8, 0xdf, 0x63, 0x77, 0x21, 0x41, 0x9a, 0x4f, 0xb7, 0x1a, 0x7e,
- 0x2e, 0x12, 0xa5, 0xcc, 0x6c, 0x9a, 0x4f, 0xd3, 0x36, 0x9b, 0xab, 0x04, 0x75, 0x83, 0xac, 0x2b,
- 0x8d, 0x48, 0xf6, 0x62, 0x4b, 0xd9, 0xeb, 0xc2, 0xc5, 0x45, 0x3a, 0x74, 0x6c, 0xcf, 0x5b, 0x7d,
- 0xf7, 0xcc, 0x56, 0xaf, 0xcd, 0xad, 0x44, 0xbb, 0x51, 0x36, 0x5c, 0xa3, 0x94, 0x9d, 0x4d, 0xf3,
- 0x19, 0x12, 0x45, 0x84, 0x51, 0x50, 0x37, 0x17, 0xfb, 0x03, 0x7b, 0xc9, 0xa3, 0x3b, 0xc4, 0x34,
- 0xe5, 0xff, 0x96, 0x47, 0x77, 0x88, 0xc3, 0x1e, 0xb5, 0x21, 0xa6, 0x99, 0xfc, 0x89, 0x81, 0xf4,
- 0x32, 0x45, 0xb4, 0x3d, 0x98, 0xe5, 0xf6, 0xf8, 0x04, 0x12, 0x0d, 0xc3, 0x35, 0x74, 0x77, 0xd4,
- 0x25, 0x99, 0x4b, 0xdd, 0x7a, 0xf9, 0xcc, 0x30, 0x3d, 0x5e, 0x6d, 0xd4, 0x45, 0xe1, 0xb2, 0x2c,
- 0x58, 0x04, 0x75, 0xa3, 0x41, 0xf5, 0x2c, 0x0b, 0x71, 0x6f, 0x4d, 0xbb, 0xd2, 0x5f, 0x47, 0x9b,
- 0x39, 0x7e, 0xf2, 0x77, 0xf1, 0x25, 0x03, 0x59, 0x6d, 0x2e, 0x43, 0x8d, 0xc5, 0x99, 0xfc, 0x03,
- 0xbd, 0x0b, 0xa9, 0x20, 0x17, 0x3e, 0xbd, 0x7f, 0xaa, 0x70, 0xef, 0x46, 0xf5, 0x82, 0x1a, 0x94,
- 0xa3, 0x7c, 0x2c, 0x84, 0xd8, 0xc9, 0x21, 0xfc, 0xce, 0x40, 0xc2, 0xf3, 0x5b, 0x1a, 0xb9, 0xc8,
- 0xf9, 0x07, 0x5f, 0xe7, 0xd2, 0xa0, 0x38, 0x77, 0x7c, 0x50, 0x44, 0x4a, 0x10, 0xff, 0xaf, 0x4a,
- 0x70, 0x3e, 0x28, 0x01, 0x3d, 0xe1, 0x8f, 0x0c, 0x00, 0x19, 0x3e, 0x7e, 0x52, 0xf6, 0x20, 0x49,
- 0x3f, 0xf9, 0x33, 0xc7, 0xe3, 0x95, 0xd9, 0x34, 0xcf, 0x46, 0xa6, 0x04, 0x9d, 0x8f, 0x64, 0x44,
- 0x9c, 0x32, 0x1f, 0x62, 0x7f, 0x73, 0x3e, 0x7c, 0x01, 0x5b, 0xa1, 0xcb, 0xd1, 0x8f, 0x95, 0x85,
- 0x78, 0xd7, 0x70, 0x5b, 0xb4, 0x9d, 0xfd, 0x35, 0x5b, 0x85, 0x4d, 0x3a, 0x1a, 0xc8, 0x85, 0x16,
- 0x5b, 0x71, 0x80, 0xab, 0xb3, 0x69, 0xfe, 0x52, 0x64, 0x9c, 0xd0, 0x2b, 0x2b, 0x69, 0x06, 0x9e,
- 0xa8, 0xfb, 0xaf, 0x19, 0x60, 0xa3, 0x17, 0xc9, 0xa9, 0x21, 0x3c, 0x38, 0x7e, 0xad, 0xae, 0x8a,
- 0xe2, 0x2f, 0xdc, 0x9d, 0x34, 0x96, 0x01, 0x5c, 0x92, 0x16, 0x0f, 0x92, 0xd5, 0xb1, 0xc8, 0x00,
- 0xc1, 0xdb, 0x85, 0x86, 0xf1, 0xa2, 0xdf, 0x56, 0xde, 0xe3, 0xa5, 0x10, 0x7a, 0xd7, 0x90, 0x4b,
- 0x9d, 0xee, 0x64, 0xbb, 0xa1, 0x86, 0x0c, 0xa9, 0xdf, 0x06, 0xa4, 0x25, 0xf2, 0xc4, 0x59, 0xed,
- 0xf4, 0x36, 0x5c, 0xa0, 0x4f, 0x21, 0xea, 0xf1, 0x7a, 0xc8, 0x23, 0x7d, 0x23, 0x79, 0xee, 0xc8,
- 0x52, 0x9d, 0x83, 0xa9, 0x97, 0x7b, 0x90, 0xa9, 0x1a, 0xe6, 0x23, 0xe4, 0x4a, 0xb8, 0xd3, 0xb1,
- 0xdc, 0x0e, 0xb2, 0xdd, 0x53, 0x3d, 0xe5, 0xbc, 0xe3, 0xcd, 0x51, 0xbe, 0xb3, 0x4d, 0x35, 0x24,
- 0x11, 0x1e, 0xc0, 0x36, 0xe1, 0x12, 0xcd, 0x47, 0x36, 0x1e, 0xb6, 0x51, 0xa3, 0x89, 0x56, 0x12,
- 0xee, 0xc0, 0x96, 0x11, 0x85, 0x52, 0xd6, 0x65, 0xb1, 0x50, 0x80, 0x2c, 0xa1, 0x56, 0x91, 0x89,
- 0xac, 0xae, 0x2b, 0xd6, 0x1d, 0x6f, 0x0e, 0x9c, 0xc6, 0x2c, 0xb4, 0x20, 0xa3, 0xa0, 0xc7, 0xee,
- 0xfc, 0xf1, 0xa5, 0x22, 0x73, 0x70, 0x6a, 0x14, 0x6f, 0xc1, 0x45, 0x1b, 0x3d, 0x76, 0xbd, 0xa7,
- 0x9b, 0xde, 0x43, 0xe6, 0x80, 0xbe, 0xed, 0x42, 0xd7, 0x40, 0x44, 0x2d, 0xa8, 0x49, 0x9b, 0x50,
- 0x7b, 0xac, 0xaf, 0x7c, 0x13, 0x87, 0x8d, 0xf9, 0x60, 0x60, 0xdf, 0x84, 0x17, 0xca, 0xa2, 0x26,
- 0xea, 0xda, 0x83, 0xaa, 0xac, 0x1f, 0x2a, 0x15, 0xa5, 0xa2, 0x55, 0xc4, 0xbd, 0xca, 0x43, 0xb9,
- 0xac, 0x1f, 0x2a, 0xb5, 0xaa, 0x2c, 0x55, 0xde, 0xab, 0xc8, 0xe5, 0xf4, 0x1a, 0xb7, 0x35, 0x9e,
- 0xf0, 0xc9, 0x90, 0x88, 0xbd, 0x01, 0x57, 0x02, 0x4b, 0x69, 0xaf, 0x22, 0x2b, 0x9a, 0x5e, 0xd3,
- 0x44, 0x4d, 0x4e, 0x33, 0x1c, 0x8c, 0x27, 0xfc, 0x3a, 0x91, 0xb1, 0xaf, 0xc2, 0x76, 0x08, 0x77,
- 0xa0, 0xd4, 0x64, 0xa5, 0x76, 0x58, 0xa3, 0xd0, 0x18, 0x77, 0x71, 0x3c, 0xe1, 0x13, 0x0b, 0x31,
- 0x5b, 0x00, 0x2e, 0x82, 0x56, 0x64, 0x49, 0xab, 0x1c, 0x28, 0x14, 0x7e, 0x8e, 0x4b, 0x8d, 0x27,
- 0x3c, 0x04, 0x72, 0x76, 0x07, 0xae, 0x86, 0xf0, 0x77, 0x45, 0x45, 0x91, 0xf7, 0x28, 0x38, 0xce,
- 0x25, 0xc7, 0x13, 0xfe, 0x02, 0x15, 0xb2, 0x6f, 0xc0, 0xb5, 0x00, 0x59, 0x15, 0xa5, 0xfb, 0xb2,
- 0xa6, 0x4b, 0x07, 0xfb, 0xfb, 0x15, 0x6d, 0x5f, 0x56, 0xb4, 0xf4, 0x79, 0x2e, 0x33, 0x9e, 0xf0,
- 0x69, 0xa2, 0x08, 0xe4, 0xec, 0x3b, 0xc0, 0x1f, 0x33, 0x13, 0xa5, 0xfb, 0xca, 0xc1, 0x47, 0x7b,
- 0x72, 0xf9, 0x7d, 0xd9, 0xb7, 0x5d, 0xe7, 0xb6, 0xc7, 0x13, 0xfe, 0x32, 0xd1, 0x2e, 0x29, 0xd9,
- 0xb7, 0x4f, 0x20, 0x50, 0x65, 0x49, 0xae, 0x54, 0x35, 0x5d, 0x2c, 0xd5, 0x64, 0x45, 0x92, 0xd3,
- 0x17, 0xb8, 0xec, 0x78, 0xc2, 0x67, 0x88, 0x96, 0x2a, 0xa9, 0x8e, 0xbd, 0x0d, 0xd7, 0x03, 0x7b,
- 0x45, 0xfe, 0x58, 0xd3, 0x6b, 0xf2, 0x07, 0x87, 0x9e, 0xca, 0xa3, 0xf9, 0x30, 0xbd, 0x41, 0x02,
- 0xf7, 0x34, 0x73, 0x85, 0x27, 0x67, 0x79, 0x48, 0x07, 0x76, 0x77, 0x65, 0xb1, 0x2c, 0xab, 0xe9,
- 0x04, 0xa9, 0x0c, 0xd9, 0x71, 0xf1, 0x27, 0x3f, 0xe4, 0xd6, 0x4a, 0x9f, 0xfe, 0xf2, 0x2c, 0xc7,
- 0x3c, 0x7d, 0x96, 0x63, 0xfe, 0x78, 0x96, 0x63, 0xbe, 0x7d, 0x9e, 0x5b, 0x7b, 0xfa, 0x3c, 0xb7,
- 0xf6, 0xdb, 0xf3, 0xdc, 0xda, 0x43, 0xa9, 0x69, 0xb9, 0xad, 0x7e, 0xbd, 0x60, 0xe2, 0x4e, 0xd1,
- 0xc4, 0x4e, 0x07, 0x3b, 0x45, 0xab, 0x6e, 0xde, 0x6c, 0xe2, 0x62, 0x07, 0x37, 0xfa, 0x6d, 0xe4,
- 0x90, 0xdf, 0xab, 0x9b, 0xf3, 0xff, 0xab, 0xd7, 0x6e, 0xdf, 0x0c, 0xff, 0x62, 0x79, 0x77, 0x8c,
- 0x53, 0x5f, 0xf7, 0x87, 0xd9, 0xeb, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xbe, 0x52, 0x62,
- 0x8f, 0x0d, 0x00, 0x00,
+ proto.RegisterFile("ibc/lightclients/solomachine/v2/solomachine.proto", fileDescriptor_141333b361aae010)
+}
+
+var fileDescriptor_141333b361aae010 = []byte{
+ // 1370 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x5f, 0x8f, 0xdb, 0xd4,
+ 0x12, 0x5f, 0xa7, 0xe9, 0x36, 0x99, 0x6c, 0x77, 0x73, 0xdd, 0xb4, 0xcd, 0xba, 0x55, 0xe2, 0xeb,
+ 0xab, 0xdb, 0xbb, 0x17, 0xd1, 0x84, 0x5d, 0x44, 0x85, 0x2a, 0x04, 0x38, 0x8e, 0x4b, 0xd3, 0xee,
+ 0x7a, 0x83, 0xe3, 0x05, 0x5a, 0x81, 0x2c, 0xc7, 0x39, 0x9b, 0x58, 0x4d, 0x7c, 0xd2, 0xd8, 0x49,
+ 0x1a, 0x24, 0x24, 0xc4, 0x53, 0x89, 0x78, 0xe0, 0x0b, 0x44, 0x42, 0x20, 0x3e, 0x07, 0x6f, 0xc0,
+ 0x63, 0x1f, 0x79, 0x0a, 0xa8, 0xfd, 0x06, 0xf9, 0x04, 0xc8, 0x3e, 0x27, 0xb1, 0x9d, 0xed, 0x66,
+ 0xc5, 0xbf, 0xb7, 0x73, 0xe6, 0x37, 0xf3, 0x9b, 0x39, 0x33, 0xe3, 0x39, 0xc7, 0xb0, 0x6b, 0xd5,
+ 0xcd, 0x62, 0xdb, 0x6a, 0xb6, 0x5c, 0xb3, 0x6d, 0x21, 0xdb, 0x75, 0x8a, 0x0e, 0x6e, 0xe3, 0x8e,
+ 0x61, 0xb6, 0x2c, 0x1b, 0x15, 0x07, 0x7b, 0xe1, 0x6d, 0xa1, 0xdb, 0xc3, 0x2e, 0x66, 0xf3, 0x56,
+ 0xdd, 0x2c, 0x84, 0x4d, 0x0a, 0x61, 0x9d, 0xc1, 0x1e, 0xf7, 0x3f, 0x8f, 0xd3, 0xc4, 0x3d, 0x54,
+ 0x34, 0xb1, 0x6d, 0x23, 0xd3, 0xb5, 0xb0, 0x5d, 0x1c, 0xec, 0x86, 0x76, 0x84, 0x89, 0xfb, 0x77,
+ 0xa0, 0xd8, 0x32, 0x6c, 0x1b, 0xb5, 0x7d, 0x2d, 0xb2, 0xa4, 0x2a, 0x99, 0x26, 0x6e, 0x62, 0x7f,
+ 0x59, 0xf4, 0x56, 0x54, 0xba, 0xdd, 0xc4, 0xb8, 0xd9, 0x46, 0x45, 0x7f, 0x57, 0xef, 0x1f, 0x17,
+ 0x0d, 0x7b, 0x44, 0x20, 0xe1, 0x87, 0x18, 0xa4, 0x24, 0x3f, 0xae, 0x9a, 0x6b, 0xb8, 0x88, 0xe5,
+ 0x20, 0xe1, 0xa0, 0xc7, 0x7d, 0x64, 0x9b, 0x28, 0xcb, 0xf0, 0xcc, 0x4e, 0x5c, 0x5d, 0xec, 0xd9,
+ 0x5d, 0x48, 0x5a, 0x8e, 0x7e, 0xdc, 0xc3, 0x9f, 0x22, 0x3b, 0x1b, 0xe3, 0x99, 0x9d, 0x44, 0x29,
+ 0x33, 0x9b, 0xe6, 0xd3, 0x23, 0xa3, 0xd3, 0xbe, 0x2d, 0x2c, 0x20, 0x41, 0x4d, 0x58, 0xce, 0x1d,
+ 0x7f, 0xc9, 0xba, 0xb0, 0x65, 0x62, 0xdb, 0x41, 0xb6, 0xd3, 0x77, 0x74, 0xc7, 0xf3, 0x90, 0x3d,
+ 0xc7, 0x33, 0x3b, 0xa9, 0xbd, 0x62, 0xe1, 0x8c, 0xb4, 0x14, 0xa4, 0xb9, 0x9d, 0x1f, 0x58, 0x89,
+ 0x9b, 0x4d, 0xf3, 0x57, 0x88, 0xa7, 0x25, 0x46, 0x41, 0xdd, 0x34, 0x23, 0xba, 0x2c, 0x82, 0x6b,
+ 0x46, 0xbb, 0x8d, 0x87, 0x7a, 0xbf, 0xdb, 0x30, 0x5c, 0xa4, 0x1b, 0xc7, 0x2e, 0xea, 0xe9, 0xdd,
+ 0x1e, 0xee, 0x62, 0xc7, 0x68, 0x67, 0xe3, 0x7e, 0xe8, 0x37, 0x66, 0xd3, 0xbc, 0x40, 0x08, 0x57,
+ 0x28, 0x0b, 0x6a, 0xd6, 0x47, 0x8f, 0x7c, 0x50, 0xf4, 0xb0, 0x2a, 0x85, 0x6e, 0xc7, 0x9f, 0x7e,
+ 0x93, 0x5f, 0x13, 0xbe, 0x65, 0x60, 0x33, 0x1a, 0x2b, 0x7b, 0x0f, 0xa0, 0xdb, 0xaf, 0xb7, 0x2d,
+ 0x53, 0x7f, 0x84, 0x46, 0x7e, 0x1a, 0x53, 0x7b, 0x99, 0x02, 0x29, 0x42, 0x61, 0x5e, 0x84, 0x82,
+ 0x68, 0x8f, 0x4a, 0x97, 0x67, 0xd3, 0xfc, 0xbf, 0x48, 0x10, 0x81, 0x85, 0xa0, 0x26, 0xc9, 0xe6,
+ 0x3e, 0x1a, 0xb1, 0x3c, 0xa4, 0x1a, 0xd6, 0x00, 0xf5, 0x1c, 0xeb, 0xd8, 0x42, 0x3d, 0x3f, 0xed,
+ 0x49, 0x35, 0x2c, 0x62, 0xaf, 0x43, 0xd2, 0xb5, 0x3a, 0xc8, 0x71, 0x8d, 0x4e, 0xd7, 0xcf, 0x6e,
+ 0x5c, 0x0d, 0x04, 0x34, 0xc8, 0x2f, 0x62, 0xb0, 0x7e, 0x17, 0x19, 0x0d, 0xd4, 0x5b, 0x59, 0xe1,
+ 0x08, 0x55, 0x6c, 0x89, 0xca, 0x43, 0x1d, 0xab, 0x69, 0x1b, 0x6e, 0xbf, 0x47, 0xca, 0xb8, 0xa1,
+ 0x06, 0x02, 0xf6, 0x08, 0x36, 0x6d, 0x34, 0xd4, 0x43, 0x07, 0x8f, 0xaf, 0x38, 0xf8, 0xf6, 0x6c,
+ 0x9a, 0xbf, 0x4c, 0x0e, 0x1e, 0xb5, 0x12, 0xd4, 0x0d, 0x1b, 0x0d, 0xab, 0x8b, 0xf3, 0x4b, 0xb0,
+ 0xe5, 0x29, 0x84, 0x73, 0x70, 0xde, 0xcb, 0x41, 0xb8, 0x21, 0x96, 0x14, 0x04, 0xd5, 0x8b, 0xa4,
+ 0x1c, 0x08, 0x68, 0x12, 0x7e, 0x8a, 0xc1, 0xc6, 0x81, 0xe5, 0xd4, 0x51, 0xcb, 0x18, 0x58, 0xb8,
+ 0xdf, 0xf3, 0x1a, 0x9a, 0x34, 0x9f, 0x6e, 0x35, 0xfc, 0x5c, 0x24, 0xc3, 0x0d, 0xbd, 0x80, 0x04,
+ 0x35, 0x41, 0xd6, 0x95, 0x46, 0x24, 0x7b, 0xb1, 0xa5, 0xec, 0x75, 0xe1, 0xe2, 0x22, 0x1d, 0x3a,
+ 0xb6, 0xe7, 0xad, 0xbe, 0x7b, 0x66, 0xab, 0xd7, 0xe6, 0x56, 0xa2, 0xdd, 0x28, 0x1b, 0xae, 0x51,
+ 0xca, 0xce, 0xa6, 0xf9, 0x0c, 0x89, 0x22, 0xc2, 0x28, 0xa8, 0x1b, 0x8b, 0xfd, 0xa1, 0xbd, 0xe4,
+ 0xd1, 0x1d, 0x62, 0x9a, 0xf2, 0xbf, 0xcb, 0xa3, 0x3b, 0xc4, 0x61, 0x8f, 0xda, 0x10, 0xd3, 0x4c,
+ 0xfe, 0xc8, 0x40, 0x7a, 0x99, 0x22, 0xda, 0x1e, 0xcc, 0x72, 0x7b, 0x7c, 0x0c, 0xc9, 0x86, 0xe1,
+ 0x1a, 0xba, 0x3b, 0xea, 0x92, 0xcc, 0x6d, 0xee, 0xfd, 0xff, 0xcc, 0x30, 0x3d, 0x5e, 0x6d, 0xd4,
+ 0x45, 0xe1, 0xb2, 0x2c, 0x58, 0x04, 0x35, 0xd1, 0xa0, 0x38, 0xcb, 0x42, 0xdc, 0x5b, 0xd3, 0xae,
+ 0xf4, 0xd7, 0xd1, 0x66, 0x8e, 0xbf, 0xfc, 0xbb, 0xf8, 0x9c, 0x81, 0xac, 0x36, 0x97, 0xa1, 0xc6,
+ 0xe2, 0x4c, 0xfe, 0x81, 0xde, 0x85, 0xcd, 0x20, 0x17, 0x3e, 0xbd, 0x7f, 0xaa, 0x70, 0xef, 0x46,
+ 0x71, 0x41, 0x0d, 0xca, 0x51, 0x3e, 0x11, 0x42, 0xec, 0xe5, 0x21, 0xfc, 0xca, 0x40, 0xd2, 0xf3,
+ 0x5b, 0x1a, 0xb9, 0xc8, 0xf9, 0x0b, 0x5f, 0xe7, 0xd2, 0xa0, 0x38, 0x77, 0x72, 0x50, 0x44, 0x4a,
+ 0x10, 0xff, 0xa7, 0x4a, 0x70, 0x3e, 0x28, 0x01, 0x3d, 0xe1, 0xf7, 0x0c, 0x00, 0x19, 0x3e, 0x7e,
+ 0x52, 0xf6, 0x21, 0x45, 0x3f, 0xf9, 0x33, 0xc7, 0xe3, 0x95, 0xd9, 0x34, 0xcf, 0x46, 0xa6, 0x04,
+ 0x9d, 0x8f, 0x64, 0x44, 0x9c, 0x32, 0x1f, 0x62, 0x7f, 0x72, 0x3e, 0x7c, 0x06, 0x5b, 0xa1, 0xab,
+ 0xd0, 0x8f, 0x95, 0x85, 0x78, 0xd7, 0x70, 0x5b, 0xb4, 0x9d, 0xfd, 0x35, 0x5b, 0x85, 0x0d, 0x3a,
+ 0x1a, 0xc8, 0x85, 0x16, 0x5b, 0x71, 0x80, 0xab, 0xb3, 0x69, 0xfe, 0x52, 0x64, 0x9c, 0xd0, 0x2b,
+ 0x2b, 0x65, 0x06, 0x9e, 0xa8, 0xfb, 0x2f, 0x19, 0x60, 0xa3, 0x17, 0xc9, 0xa9, 0x21, 0x3c, 0x38,
+ 0x79, 0xad, 0xae, 0x8a, 0xe2, 0x0f, 0xdc, 0x9d, 0x34, 0x96, 0x01, 0x5c, 0x92, 0x16, 0xcf, 0x8f,
+ 0xd5, 0xb1, 0xc8, 0x00, 0xc1, 0x4b, 0x85, 0x86, 0xf1, 0x5f, 0xbf, 0xad, 0xbc, 0xa7, 0x4a, 0x21,
+ 0xf4, 0x8a, 0x19, 0xec, 0x16, 0x02, 0x52, 0xd9, 0x6e, 0xa8, 0x21, 0x43, 0xea, 0xb7, 0x01, 0x69,
+ 0x89, 0x3c, 0x68, 0x56, 0x3b, 0xbd, 0x05, 0x17, 0xe8, 0xc3, 0x87, 0x7a, 0xbc, 0x1e, 0xf2, 0x48,
+ 0x5f, 0x44, 0x9e, 0x3b, 0xb2, 0x54, 0xe7, 0xca, 0xd4, 0xcb, 0x3d, 0xc8, 0x54, 0x0d, 0xf3, 0x11,
+ 0x72, 0x25, 0xdc, 0xe9, 0x58, 0x6e, 0x07, 0xd9, 0xee, 0xa9, 0x9e, 0x72, 0xde, 0xf1, 0xe6, 0x5a,
+ 0xbe, 0xb3, 0x0d, 0x35, 0x24, 0x11, 0x1e, 0xc0, 0x36, 0xe1, 0x12, 0xcd, 0x47, 0x36, 0x1e, 0xb6,
+ 0x51, 0xa3, 0x89, 0x56, 0x12, 0xee, 0xc0, 0x96, 0x11, 0x55, 0xa5, 0xac, 0xcb, 0x62, 0xa1, 0x00,
+ 0x59, 0x42, 0xad, 0x22, 0x13, 0x59, 0x5d, 0x57, 0xac, 0x3b, 0xde, 0x1c, 0x38, 0x8d, 0x59, 0x68,
+ 0x41, 0x46, 0x41, 0x4f, 0xdc, 0x1a, 0x9d, 0x17, 0x2a, 0x32, 0x07, 0xa7, 0x46, 0xf1, 0x16, 0x5c,
+ 0xb4, 0xd1, 0x13, 0x57, 0x77, 0xd0, 0x63, 0xbd, 0x87, 0xcc, 0x01, 0x99, 0x27, 0xe1, 0x6b, 0x20,
+ 0x02, 0x0b, 0x6a, 0xca, 0x26, 0xd4, 0x1e, 0xeb, 0x2b, 0x5f, 0xc5, 0x21, 0x31, 0x1f, 0x0c, 0xec,
+ 0x9b, 0xf0, 0x9f, 0xb2, 0xa8, 0x89, 0xba, 0xf6, 0xa0, 0x2a, 0xeb, 0x47, 0x4a, 0x45, 0xa9, 0x68,
+ 0x15, 0x71, 0xbf, 0xf2, 0x50, 0x2e, 0xeb, 0x47, 0x4a, 0xad, 0x2a, 0x4b, 0x95, 0x3b, 0x15, 0xb9,
+ 0x9c, 0x5e, 0xe3, 0xb6, 0xc6, 0x13, 0x3e, 0x15, 0x12, 0xb1, 0x37, 0xe0, 0x4a, 0x60, 0x29, 0xed,
+ 0x57, 0x64, 0x45, 0xd3, 0x6b, 0x9a, 0xa8, 0xc9, 0x69, 0x86, 0x83, 0xf1, 0x84, 0x5f, 0x27, 0x32,
+ 0xf6, 0x55, 0xd8, 0x0e, 0xe9, 0x1d, 0x2a, 0x35, 0x59, 0xa9, 0x1d, 0xd5, 0xa8, 0x6a, 0x8c, 0xbb,
+ 0x38, 0x9e, 0xf0, 0xc9, 0x85, 0x98, 0x2d, 0x00, 0x17, 0xd1, 0x56, 0x64, 0x49, 0xab, 0x1c, 0x2a,
+ 0x54, 0xfd, 0x1c, 0xb7, 0x39, 0x9e, 0xf0, 0x10, 0xc8, 0xd9, 0x1d, 0xb8, 0x1a, 0xd2, 0xbf, 0x2b,
+ 0x2a, 0x8a, 0xbc, 0x4f, 0x95, 0xe3, 0x5c, 0x6a, 0x3c, 0xe1, 0x2f, 0x50, 0x21, 0xfb, 0x06, 0x5c,
+ 0x0b, 0x34, 0xab, 0xa2, 0x74, 0x5f, 0xd6, 0x74, 0xe9, 0xf0, 0xe0, 0xa0, 0xa2, 0x1d, 0xc8, 0x8a,
+ 0x96, 0x3e, 0xcf, 0x65, 0xc6, 0x13, 0x3e, 0x4d, 0x80, 0x40, 0xce, 0xbe, 0x03, 0xfc, 0x09, 0x33,
+ 0x51, 0xba, 0xaf, 0x1c, 0x7e, 0xb8, 0x2f, 0x97, 0xdf, 0x93, 0x7d, 0xdb, 0x75, 0x6e, 0x7b, 0x3c,
+ 0xe1, 0x2f, 0x13, 0x74, 0x09, 0x64, 0xdf, 0x7e, 0x09, 0x81, 0x2a, 0x4b, 0x72, 0xa5, 0xaa, 0xe9,
+ 0x62, 0xa9, 0x26, 0x2b, 0x92, 0x9c, 0xbe, 0xc0, 0x65, 0xc7, 0x13, 0x3e, 0x43, 0x50, 0x0a, 0x52,
+ 0x8c, 0xbd, 0x05, 0xd7, 0x03, 0x7b, 0x45, 0xfe, 0x48, 0xd3, 0x6b, 0xf2, 0xfb, 0x47, 0x1e, 0xe4,
+ 0xd1, 0x7c, 0x90, 0x4e, 0x90, 0xc0, 0x3d, 0x64, 0x0e, 0x78, 0x72, 0x96, 0x87, 0x74, 0x60, 0x77,
+ 0x57, 0x16, 0xcb, 0xb2, 0x9a, 0x4e, 0x92, 0xca, 0x90, 0x1d, 0x17, 0x7f, 0xfa, 0x5d, 0x6e, 0xad,
+ 0xf4, 0xc9, 0xcf, 0xcf, 0x73, 0xcc, 0xb3, 0xe7, 0x39, 0xe6, 0xb7, 0xe7, 0x39, 0xe6, 0xeb, 0x17,
+ 0xb9, 0xb5, 0x67, 0x2f, 0x72, 0x6b, 0xbf, 0xbc, 0xc8, 0xad, 0x3d, 0x94, 0x9a, 0x96, 0xdb, 0xea,
+ 0xd7, 0x0b, 0x26, 0xee, 0x14, 0x4d, 0xec, 0x74, 0xb0, 0x53, 0xb4, 0xea, 0xe6, 0xcd, 0x26, 0x2e,
+ 0x76, 0x70, 0xa3, 0xdf, 0x46, 0x0e, 0xf9, 0x99, 0xba, 0x39, 0xff, 0x9b, 0x7a, 0xed, 0xd6, 0xcd,
+ 0xf0, 0x0f, 0x95, 0x77, 0xc7, 0x38, 0xf5, 0x75, 0x7f, 0x98, 0xbd, 0xfe, 0x7b, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x44, 0x0b, 0xc7, 0x37, 0x7d, 0x0d, 0x00, 0x00,
}
func (m *ClientState) Marshal() (dAtA []byte, err error) {
@@ -954,8 +954,13 @@ func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x1a
}
- if m.FrozenSequence != 0 {
- i = encodeVarintSolomachine(dAtA, i, uint64(m.FrozenSequence))
+ if m.IsFrozen {
+ i--
+ if m.IsFrozen {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
i--
dAtA[i] = 0x10
}
@@ -1635,8 +1640,8 @@ func (m *ClientState) Size() (n int) {
if m.Sequence != 0 {
n += 1 + sovSolomachine(uint64(m.Sequence))
}
- if m.FrozenSequence != 0 {
- n += 1 + sovSolomachine(uint64(m.FrozenSequence))
+ if m.IsFrozen {
+ n += 2
}
if m.ConsensusState != nil {
l = m.ConsensusState.Size()
@@ -1988,9 +1993,9 @@ func (m *ClientState) Unmarshal(dAtA []byte) error {
}
case 2:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field FrozenSequence", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field IsFrozen", wireType)
}
- m.FrozenSequence = 0
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSolomachine
@@ -2000,11 +2005,12 @@ func (m *ClientState) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.FrozenSequence |= uint64(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
+ m.IsFrozen = bool(v != 0)
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
diff --git a/modules/light-clients/06-solomachine/types/update_test.go b/modules/light-clients/06-solomachine/types/update_test.go
index 5df5cb72..ba605d15 100644
--- a/modules/light-clients/06-solomachine/types/update_test.go
+++ b/modules/light-clients/06-solomachine/types/update_test.go
@@ -167,7 +167,7 @@ func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() {
if tc.expPass {
suite.Require().NoError(err)
suite.Require().Equal(header.(*types.Header).NewPublicKey, clientState.(*types.ClientState).ConsensusState.PublicKey)
- suite.Require().Equal(uint64(0), clientState.(*types.ClientState).FrozenSequence)
+ suite.Require().Equal(false, clientState.(*types.ClientState).IsFrozen)
suite.Require().Equal(header.(*types.Header).Sequence+1, clientState.(*types.ClientState).Sequence)
suite.Require().Equal(consensusState, clientState.(*types.ClientState).ConsensusState)
} else {
diff --git a/proto/ibc/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v2/solomachine.proto
similarity index 98%
rename from proto/ibc/lightclients/solomachine/v1/solomachine.proto
rename to proto/ibc/lightclients/solomachine/v2/solomachine.proto
index 68f0c1ea..fdb659f6 100644
--- a/proto/ibc/lightclients/solomachine/v1/solomachine.proto
+++ b/proto/ibc/lightclients/solomachine/v2/solomachine.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-package ibc.lightclients.solomachine.v1;
+package ibc.lightclients.solomachine.v2;
option go_package = "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types";
@@ -16,7 +16,7 @@ message ClientState {
// latest sequence of the client state
uint64 sequence = 1;
// frozen sequence of the solo machine
- uint64 frozen_sequence = 2 [(gogoproto.moretags) = "yaml:\"frozen_sequence\""];
+ bool is_frozen = 2 [(gogoproto.moretags) = "yaml:\"is_frozen\""];
ConsensusState consensus_state = 3 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
// when set to true, will allow governance to update a solo machine client.
// The client will be unfrozen if it is frozen.
diff --git a/testing/sdk_test.go b/testing/sdk_test.go
index caef571c..b7b63b1a 100644
--- a/testing/sdk_test.go
+++ b/testing/sdk_test.go
@@ -198,12 +198,12 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
// a solo machine client state
clientStateJSON := testutil.WriteToNewTempFile(
s.T(),
- `{"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}`,
+ `{"@type":"/ibc.lightclients.solomachine.v2.ClientState","sequence":"1","is_frozen":false,"consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false}`,
)
badClientStateJSON := testutil.WriteToNewTempFile(
s.T(),
- `{"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"DIFFERENT","timestamp":"10"},"allow_update_after_proposal":false}`,
+ `{"@type":"/ibc.lightclients.solomachine.v2.ClientState","sequence":"1","is_frozen":false,"consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"DIFFERENT","timestamp":"10"},"allow_update_after_proposal":false}`,
)
// Write consensus json to temp file, used for an IBC message.
@@ -211,7 +211,7 @@ func (s *IntegrationTestSuite) TestLegacyRestErrMessages() {
// a solo machine consensus state
consensusJSON := testutil.WriteToNewTempFile(
s.T(),
- `{"@type":"/ibc.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
+ `{"@type":"/ibc.lightclients.solomachine.v2.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`,
)
testCases := []struct {
From 2523fdbfd8fc14554d81dc92ce58b3a590964647 Mon Sep 17 00:00:00 2001
From: Aditya
Date: Fri, 21 May 2021 17:01:21 -0400
Subject: [PATCH 061/393] Block Delay Parameter (#171)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* progress on blockdelay
* complete block delay param
* address most comments
* address the rest
* Update modules/light-clients/07-tendermint/types/client_state.go
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
* fix tm tests
* fix lightclient tests
* add connection tests
* DRY consensus metadata setting
* fix genesis and add safety check
* switch param name and default
* set metadata on init and fix tests
* change param name
* CHANGELOG
* add param documentation:
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
CHANGELOG.md | 1 +
docs/ibc/proto-docs.md | 17 ++
go.mod | 1 -
.../core/02-client/keeper/grpc_query_test.go | 3 +-
modules/core/02-client/keeper/params.go | 6 +-
modules/core/02-client/types/params.go | 8 +-
modules/core/03-connection/genesis.go | 1 +
modules/core/03-connection/keeper/keeper.go | 10 +-
modules/core/03-connection/keeper/params.go | 23 ++
.../core/03-connection/keeper/params_test.go | 17 ++
modules/core/03-connection/keeper/verify.go | 49 +++-
.../core/03-connection/keeper/verify_test.go | 128 ++++++---
.../core/03-connection/types/connection.pb.go | 247 +++++++++++++++---
modules/core/03-connection/types/genesis.go | 8 +-
.../core/03-connection/types/genesis.pb.go | 98 +++++--
.../core/03-connection/types/genesis_test.go | 20 ++
modules/core/03-connection/types/params.go | 54 ++++
.../core/03-connection/types/params_test.go | 29 ++
modules/core/exported/client.go | 20 +-
modules/core/genesis_test.go | 3 +
modules/core/keeper/keeper.go | 13 +-
.../06-solomachine/types/client_state.go | 4 +
.../06-solomachine/types/client_state_test.go | 12 +-
.../07-tendermint/types/client_state.go | 59 +++--
.../07-tendermint/types/client_state_test.go | 172 ++++++++----
.../07-tendermint/types/errors.go | 25 +-
.../07-tendermint/types/proposal_handle.go | 8 +-
.../07-tendermint/types/store.go | 63 ++++-
.../07-tendermint/types/tendermint.pb.go | 4 +-
.../07-tendermint/types/update.go | 11 +-
.../07-tendermint/types/update_test.go | 19 +-
.../07-tendermint/types/upgrade.go | 5 +-
.../09-localhost/types/client_state.go | 4 +
.../09-localhost/types/client_state_test.go | 10 +-
proto/ibc/core/connection/v1/connection.proto | 8 +
proto/ibc/core/connection/v1/genesis.proto | 1 +
36 files changed, 916 insertions(+), 245 deletions(-)
create mode 100644 modules/core/03-connection/keeper/params.go
create mode 100644 modules/core/03-connection/keeper/params_test.go
create mode 100644 modules/core/03-connection/types/params.go
create mode 100644 modules/core/03-connection/types/params_test.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8119e101..e05d119b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -62,6 +62,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Freeze the client if there's a conflicting header submitted for an existing consensus state.
* (modules/core/02-client) [\#8405](https://github.com/cosmos/cosmos-sdk/pull/8405) Refactor IBC client update governance proposals to use a substitute client to update a frozen or expired client.
* (modules/core/02-client) [\#8673](https://github.com/cosmos/cosmos-sdk/pull/8673) IBC upgrade logic moved to 02-client and an IBC UpgradeProposal is added.
+* (modules/core/03-connection) [\#171](https://github.com/cosmos/ibc-go/pull/171) Introduces a new parameter `MaxExpectedTimePerBlock` to allow connections to calculate and enforce a block delay that is proportional to time delay set by connection.
### Improvements
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 67c1741e..b098c732 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -155,6 +155,7 @@
- [ConnectionPaths](#ibc.core.connection.v1.ConnectionPaths)
- [Counterparty](#ibc.core.connection.v1.Counterparty)
- [IdentifiedConnection](#ibc.core.connection.v1.IdentifiedConnection)
+ - [Params](#ibc.core.connection.v1.Params)
- [Version](#ibc.core.connection.v1.Version)
- [State](#ibc.core.connection.v1.State)
@@ -2399,6 +2400,21 @@ identifier field.
+
+
+### Params
+Params defines the set of Connection parameters.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. |
+
+
+
+
+
+
### Version
@@ -2458,6 +2474,7 @@ GenesisState defines the ibc connection submodule's genesis state.
| `connections` | [IdentifiedConnection](#ibc.core.connection.v1.IdentifiedConnection) | repeated | |
| `client_connection_paths` | [ConnectionPaths](#ibc.core.connection.v1.ConnectionPaths) | repeated | |
| `next_connection_sequence` | [uint64](#uint64) | | the sequence for the next generated connection identifier |
+| `params` | [Params](#ibc.core.connection.v1.Params) | | |
diff --git a/go.mod b/go.mod
index f4100fcb..233f26b7 100644
--- a/go.mod
+++ b/go.mod
@@ -22,5 +22,4 @@ require (
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
google.golang.org/grpc v1.37.0
- google.golang.org/protobuf v1.26.0
)
diff --git a/modules/core/02-client/keeper/grpc_query_test.go b/modules/core/02-client/keeper/grpc_query_test.go
index a2531c4d..f066b05b 100644
--- a/modules/core/02-client/keeper/grpc_query_test.go
+++ b/modules/core/02-client/keeper/grpc_query_test.go
@@ -144,7 +144,7 @@ func (suite *KeeperTestSuite) TestQueryClientStates() {
expClientStates = types.IdentifiedClientStates{idcs, idcs2}.Sort()
req = &types.QueryClientStatesRequest{
Pagination: &query.PageRequest{
- Limit: 7,
+ Limit: 20,
CountTotal: true,
},
}
@@ -159,7 +159,6 @@ func (suite *KeeperTestSuite) TestQueryClientStates() {
expClientStates = nil
tc.malleate()
-
// always add localhost which is created by default in init genesis
localhostClientState := suite.chainA.GetClientState(exported.Localhost)
identifiedLocalhost := types.NewIdentifiedClientState(exported.Localhost, localhostClientState)
diff --git a/modules/core/02-client/keeper/params.go b/modules/core/02-client/keeper/params.go
index c9b88acd..2addf95d 100644
--- a/modules/core/02-client/keeper/params.go
+++ b/modules/core/02-client/keeper/params.go
@@ -5,19 +5,19 @@ import (
"github.com/cosmos/ibc-go/modules/core/02-client/types"
)
-// GetAllowedClients retrieves the receive enabled boolean from the paramstore
+// GetAllowedClients retrieves the allowed clients from the paramstore
func (k Keeper) GetAllowedClients(ctx sdk.Context) []string {
var res []string
k.paramSpace.Get(ctx, types.KeyAllowedClients, &res)
return res
}
-// GetParams returns the total set of ibc-transfer parameters.
+// GetParams returns the total set of ibc-client parameters.
func (k Keeper) GetParams(ctx sdk.Context) types.Params {
return types.NewParams(k.GetAllowedClients(ctx)...)
}
-// SetParams sets the total set of ibc-transfer parameters.
+// SetParams sets the total set of ibc-client parameters.
func (k Keeper) SetParams(ctx sdk.Context, params types.Params) {
k.paramSpace.SetParamSet(ctx, ¶ms)
}
diff --git a/modules/core/02-client/types/params.go b/modules/core/02-client/types/params.go
index f9c50f15..6460a3fc 100644
--- a/modules/core/02-client/types/params.go
+++ b/modules/core/02-client/types/params.go
@@ -4,8 +4,8 @@ import (
"fmt"
"strings"
- "github.com/cosmos/ibc-go/modules/core/exported"
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
)
var (
@@ -21,19 +21,19 @@ func ParamKeyTable() paramtypes.KeyTable {
return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
}
-// NewParams creates a new parameter configuration for the ibc transfer module
+// NewParams creates a new parameter configuration for the ibc client module
func NewParams(allowedClients ...string) Params {
return Params{
AllowedClients: allowedClients,
}
}
-// DefaultParams is the default parameter configuration for the ibc-transfer module
+// DefaultParams is the default parameter configuration for the ibc-client module
func DefaultParams() Params {
return NewParams(DefaultAllowedClients...)
}
-// Validate all ibc-transfer module parameters
+// Validate all ibc-client module parameters
func (p Params) Validate() error {
return validateClients(p.AllowedClients)
}
diff --git a/modules/core/03-connection/genesis.go b/modules/core/03-connection/genesis.go
index ca2d9e7e..af46c8ba 100644
--- a/modules/core/03-connection/genesis.go
+++ b/modules/core/03-connection/genesis.go
@@ -17,6 +17,7 @@ func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) {
k.SetClientConnectionPaths(ctx, connPaths.ClientId, connPaths.Paths)
}
k.SetNextConnectionSequence(ctx, gs.NextConnectionSequence)
+ k.SetParams(ctx, gs.Params)
}
// ExportGenesis returns the ibc connection submodule's exported genesis.
diff --git a/modules/core/03-connection/keeper/keeper.go b/modules/core/03-connection/keeper/keeper.go
index 49747b56..3ce02916 100644
--- a/modules/core/03-connection/keeper/keeper.go
+++ b/modules/core/03-connection/keeper/keeper.go
@@ -6,6 +6,7 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/03-connection/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
@@ -19,15 +20,22 @@ type Keeper struct {
types.QueryServer
storeKey sdk.StoreKey
+ paramSpace paramtypes.Subspace
cdc codec.BinaryCodec
clientKeeper types.ClientKeeper
}
// NewKeeper creates a new IBC connection Keeper instance
-func NewKeeper(cdc codec.BinaryCodec, key sdk.StoreKey, ck types.ClientKeeper) Keeper {
+func NewKeeper(cdc codec.BinaryCodec, key sdk.StoreKey, paramSpace paramtypes.Subspace, ck types.ClientKeeper) Keeper {
+ // set KeyTable if it has not already been set
+ if !paramSpace.HasKeyTable() {
+ paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable())
+ }
+
return Keeper{
storeKey: key,
cdc: cdc,
+ paramSpace: paramSpace,
clientKeeper: ck,
}
}
diff --git a/modules/core/03-connection/keeper/params.go b/modules/core/03-connection/keeper/params.go
new file mode 100644
index 00000000..df057f9d
--- /dev/null
+++ b/modules/core/03-connection/keeper/params.go
@@ -0,0 +1,23 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+)
+
+// GetMaxExpectedTimePerBlock retrieves the maximum expected time per block from the paramstore
+func (k Keeper) GetMaxExpectedTimePerBlock(ctx sdk.Context) uint64 {
+ var res uint64
+ k.paramSpace.Get(ctx, types.KeyMaxExpectedTimePerBlock, &res)
+ return res
+}
+
+// GetParams returns the total set of ibc-connection parameters.
+func (k Keeper) GetParams(ctx sdk.Context) types.Params {
+ return types.NewParams(k.GetMaxExpectedTimePerBlock(ctx))
+}
+
+// SetParams sets the total set of ibc-connection parameters.
+func (k Keeper) SetParams(ctx sdk.Context, params types.Params) {
+ k.paramSpace.SetParamSet(ctx, ¶ms)
+}
diff --git a/modules/core/03-connection/keeper/params_test.go b/modules/core/03-connection/keeper/params_test.go
new file mode 100644
index 00000000..53ab160c
--- /dev/null
+++ b/modules/core/03-connection/keeper/params_test.go
@@ -0,0 +1,17 @@
+package keeper_test
+
+import (
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+)
+
+func (suite *KeeperTestSuite) TestParams() {
+ expParams := types.DefaultParams()
+
+ params := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetParams(suite.chainA.GetContext())
+ suite.Require().Equal(expParams, params)
+
+ expParams.MaxExpectedTimePerBlock = 10
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), expParams)
+ params = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetParams(suite.chainA.GetContext())
+ suite.Require().Equal(uint64(10), expParams.MaxExpectedTimePerBlock)
+}
diff --git a/modules/core/03-connection/keeper/verify.go b/modules/core/03-connection/keeper/verify.go
index 38722f98..51003974 100644
--- a/modules/core/03-connection/keeper/verify.go
+++ b/modules/core/03-connection/keeper/verify.go
@@ -1,6 +1,8 @@
package keeper
import (
+ "math"
+
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
@@ -159,9 +161,13 @@ func (k Keeper) VerifyPacketCommitment(
return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
+ // get time and block delays
+ timeDelay := connection.GetDelayPeriod()
+ blockDelay := k.getBlockDelay(ctx, connection)
+
if err := clientState.VerifyPacketCommitment(
- clientStore, k.cdc, height,
- uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ ctx, clientStore, k.cdc, height,
+ timeDelay, blockDelay,
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
sequence, commitmentBytes,
); err != nil {
@@ -195,9 +201,13 @@ func (k Keeper) VerifyPacketAcknowledgement(
return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
+ // get time and block delays
+ timeDelay := connection.GetDelayPeriod()
+ blockDelay := k.getBlockDelay(ctx, connection)
+
if err := clientState.VerifyPacketAcknowledgement(
- clientStore, k.cdc, height,
- uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ ctx, clientStore, k.cdc, height,
+ timeDelay, blockDelay,
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
sequence, acknowledgement,
); err != nil {
@@ -231,9 +241,13 @@ func (k Keeper) VerifyPacketReceiptAbsence(
return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
+ // get time and block delays
+ timeDelay := connection.GetDelayPeriod()
+ blockDelay := k.getBlockDelay(ctx, connection)
+
if err := clientState.VerifyPacketReceiptAbsence(
- clientStore, k.cdc, height,
- uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ ctx, clientStore, k.cdc, height,
+ timeDelay, blockDelay,
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
sequence,
); err != nil {
@@ -266,9 +280,13 @@ func (k Keeper) VerifyNextSequenceRecv(
return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status)
}
+ // get time and block delays
+ timeDelay := connection.GetDelayPeriod()
+ blockDelay := k.getBlockDelay(ctx, connection)
+
if err := clientState.VerifyNextSequenceRecv(
- clientStore, k.cdc, height,
- uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(),
+ ctx, clientStore, k.cdc, height,
+ timeDelay, blockDelay,
connection.GetCounterparty().GetPrefix(), proof, portID, channelID,
nextSequenceRecv,
); err != nil {
@@ -277,3 +295,18 @@ func (k Keeper) VerifyNextSequenceRecv(
return nil
}
+
+// getBlockDelay calculates the block delay period from the time delay of the connection
+// and the maximum expected time per block.
+func (k Keeper) getBlockDelay(ctx sdk.Context, connection exported.ConnectionI) uint64 {
+ // expectedTimePerBlock should never be zero, however if it is then return a 0 blcok delay for safety
+ // as the expectedTimePerBlock parameter was not set.
+ expectedTimePerBlock := k.GetMaxExpectedTimePerBlock(ctx)
+ if expectedTimePerBlock == 0 {
+ return 0
+ }
+ // calculate minimum block delay by dividing time delay period
+ // by the expected time per block. Round up the block delay.
+ timeDelay := connection.GetDelayPeriod()
+ return uint64(math.Ceil(float64(timeDelay) / float64(expectedTimePerBlock)))
+}
diff --git a/modules/core/03-connection/keeper/verify_test.go b/modules/core/03-connection/keeper/verify_test.go
index f57953fe..620f4425 100644
--- a/modules/core/03-connection/keeper/verify_test.go
+++ b/modules/core/03-connection/keeper/verify_test.go
@@ -290,10 +290,11 @@ func (suite *KeeperTestSuite) TestVerifyChannelState() {
// packet is sent from chainA to chainB, but has not been received.
func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
var (
- path *ibctesting.Path
- packet channeltypes.Packet
- heightDiff uint64
- delayPeriod uint64
+ path *ibctesting.Path
+ packet channeltypes.Packet
+ heightDiff uint64
+ delayTimePeriod uint64
+ timePerBlock uint64
)
cases := []struct {
name string
@@ -302,10 +303,16 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
}{
{"verification success", func() {}, true},
{"verification success: delay period passed", func() {
- delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
}, true},
- {"delay period has not passed", func() {
- delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ {"delay time period has not passed", func() {
+ delayTimePeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"delay block period has not passed", func() {
+ // make timePerBlock 1 nanosecond so that block delay is not passed.
+ // must also set a non-zero time delay to ensure block delay is enforced.
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
+ timePerBlock = 1
}, false},
{"client state not found- changed client ID", func() {
connection := path.EndpointB.GetConnection()
@@ -338,13 +345,22 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
+ // reset variables
+ heightDiff = 0
+ delayTimePeriod = 0
+ timePerBlock = 0
tc.malleate()
connection := path.EndpointB.GetConnection()
- connection.DelayPeriod = delayPeriod
+ connection.DelayPeriod = delayTimePeriod
commitmentKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
proof, proofHeight := suite.chainA.QueryProof(commitmentKey)
+ // set time per block param
+ if timePerBlock != 0 {
+ suite.chainB.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainB.GetContext(), types.NewParams(timePerBlock))
+ }
+
commitment := channeltypes.CommitPacket(suite.chainB.App.GetIBCKeeper().Codec(), packet)
err = suite.chainB.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketCommitment(
suite.chainB.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
@@ -365,10 +381,11 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() {
// is sent from chainA to chainB and received.
func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
var (
- path *ibctesting.Path
- ack exported.Acknowledgement
- heightDiff uint64
- delayPeriod uint64
+ path *ibctesting.Path
+ ack exported.Acknowledgement
+ heightDiff uint64
+ delayTimePeriod uint64
+ timePerBlock uint64
)
cases := []struct {
@@ -378,10 +395,16 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
}{
{"verification success", func() {}, true},
{"verification success: delay period passed", func() {
- delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
}, true},
- {"delay period has not passed", func() {
- delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ {"delay time period has not passed", func() {
+ delayTimePeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"delay block period has not passed", func() {
+ // make timePerBlock 1 nanosecond so that block delay is not passed.
+ // must also set a non-zero time delay to ensure block delay is enforced.
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
+ timePerBlock = 1
}, false},
{"client state not found- changed client ID", func() {
connection := path.EndpointA.GetConnection()
@@ -426,10 +449,19 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
packetAckKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetAckKey)
+ // reset variables
+ heightDiff = 0
+ delayTimePeriod = 0
+ timePerBlock = 0
tc.malleate()
connection := path.EndpointA.GetConnection()
- connection.DelayPeriod = delayPeriod
+ connection.DelayPeriod = delayTimePeriod
+
+ // set time per block param
+ if timePerBlock != 0 {
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(timePerBlock))
+ }
err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketAcknowledgement(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
@@ -450,10 +482,11 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() {
// a packet is sent from chainA to chainB and not received.
func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
var (
- path *ibctesting.Path
- packet channeltypes.Packet
- heightDiff uint64
- delayPeriod uint64
+ path *ibctesting.Path
+ packet channeltypes.Packet
+ heightDiff uint64
+ delayTimePeriod uint64
+ timePerBlock uint64
)
cases := []struct {
@@ -463,10 +496,16 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
}{
{"verification success", func() {}, true},
{"verification success: delay period passed", func() {
- delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
}, true},
- {"delay period has not passed", func() {
- delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ {"delay time period has not passed", func() {
+ delayTimePeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"delay block period has not passed", func() {
+ // make timePerBlock 1 nanosecond so that block delay is not passed.
+ // must also set a non-zero time delay to ensure block delay is enforced.
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
+ timePerBlock = 1
}, false},
{"client state not found - changed client ID", func() {
connection := path.EndpointA.GetConnection()
@@ -505,10 +544,14 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
+ // reset variables
+ heightDiff = 0
+ delayTimePeriod = 0
+ timePerBlock = 0
tc.malleate()
connection := path.EndpointA.GetConnection()
- connection.DelayPeriod = delayPeriod
+ connection.DelayPeriod = delayTimePeriod
clientState := path.EndpointA.GetClientState().(*ibctmtypes.ClientState)
if clientState.FrozenHeight.IsZero() {
@@ -520,6 +563,11 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
packetReceiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight := suite.chainB.QueryProof(packetReceiptKey)
+ // set time per block param
+ if timePerBlock != 0 {
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(timePerBlock))
+ }
+
err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketReceiptAbsence(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
@@ -539,10 +587,11 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() {
// is sent from chainA to chainB and received.
func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
var (
- path *ibctesting.Path
- heightDiff uint64
- delayPeriod uint64
- offsetSeq uint64
+ path *ibctesting.Path
+ heightDiff uint64
+ delayTimePeriod uint64
+ timePerBlock uint64
+ offsetSeq uint64
)
cases := []struct {
@@ -552,10 +601,16 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
}{
{"verification success", func() {}, true},
{"verification success: delay period passed", func() {
- delayPeriod = uint64(1 * time.Second.Nanoseconds())
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
}, true},
- {"delay period has not passed", func() {
- delayPeriod = uint64(1 * time.Hour.Nanoseconds())
+ {"delay time period has not passed", func() {
+ delayTimePeriod = uint64(1 * time.Hour.Nanoseconds())
+ }, false},
+ {"delay block period has not passed", func() {
+ // make timePerBlock 1 nanosecond so that block delay is not passed.
+ // must also set a non-zero time delay to ensure block delay is enforced.
+ delayTimePeriod = uint64(1 * time.Second.Nanoseconds())
+ timePerBlock = 1
}, false},
{"client state not found- changed client ID", func() {
connection := path.EndpointA.GetConnection()
@@ -599,10 +654,19 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() {
nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
proof, proofHeight := suite.chainB.QueryProof(nextSeqRecvKey)
+ // reset variables
+ heightDiff = 0
+ delayTimePeriod = 0
+ timePerBlock = 0
tc.malleate()
+ // set time per block param
+ if timePerBlock != 0 {
+ suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(timePerBlock))
+ }
+
connection := path.EndpointA.GetConnection()
- connection.DelayPeriod = delayPeriod
+ connection.DelayPeriod = delayTimePeriod
err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyNextSequenceRecv(
suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+offsetSeq,
diff --git a/modules/core/03-connection/types/connection.pb.go b/modules/core/03-connection/types/connection.pb.go
index 16389778..07577489 100644
--- a/modules/core/03-connection/types/connection.pb.go
+++ b/modules/core/03-connection/types/connection.pb.go
@@ -354,6 +354,54 @@ func (m *Version) XXX_DiscardUnknown() {
var xxx_messageInfo_Version proto.InternalMessageInfo
+// Params defines the set of Connection parameters.
+type Params struct {
+ // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of
+ // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe
+ // choice is 3-5x the expected time per block.
+ MaxExpectedTimePerBlock uint64 `protobuf:"varint,1,opt,name=max_expected_time_per_block,json=maxExpectedTimePerBlock,proto3" json:"max_expected_time_per_block,omitempty" yaml:"max_expected_time_per_block"`
+}
+
+func (m *Params) Reset() { *m = Params{} }
+func (m *Params) String() string { return proto.CompactTextString(m) }
+func (*Params) ProtoMessage() {}
+func (*Params) Descriptor() ([]byte, []int) {
+ return fileDescriptor_90572467c054e43a, []int{6}
+}
+func (m *Params) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Params.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Params) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Params.Merge(m, src)
+}
+func (m *Params) XXX_Size() int {
+ return m.Size()
+}
+func (m *Params) XXX_DiscardUnknown() {
+ xxx_messageInfo_Params.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Params proto.InternalMessageInfo
+
+func (m *Params) GetMaxExpectedTimePerBlock() uint64 {
+ if m != nil {
+ return m.MaxExpectedTimePerBlock
+ }
+ return 0
+}
+
func init() {
proto.RegisterEnum("ibc.core.connection.v1.State", State_name, State_value)
proto.RegisterType((*ConnectionEnd)(nil), "ibc.core.connection.v1.ConnectionEnd")
@@ -362,6 +410,7 @@ func init() {
proto.RegisterType((*ClientPaths)(nil), "ibc.core.connection.v1.ClientPaths")
proto.RegisterType((*ConnectionPaths)(nil), "ibc.core.connection.v1.ConnectionPaths")
proto.RegisterType((*Version)(nil), "ibc.core.connection.v1.Version")
+ proto.RegisterType((*Params)(nil), "ibc.core.connection.v1.Params")
}
func init() {
@@ -369,49 +418,52 @@ func init() {
}
var fileDescriptor_90572467c054e43a = []byte{
- // 658 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x41, 0x6b, 0xdb, 0x4c,
- 0x14, 0x94, 0x64, 0x39, 0xb1, 0xd7, 0xf1, 0xf7, 0xb9, 0x5b, 0xd3, 0x0a, 0x41, 0x24, 0xa1, 0x16,
- 0x6a, 0x0a, 0xb1, 0xea, 0x04, 0x0a, 0x4d, 0xe9, 0x21, 0x76, 0x5c, 0x10, 0x6d, 0x5d, 0x23, 0x3b,
- 0x85, 0xe6, 0x62, 0x6c, 0x69, 0xe3, 0x2c, 0xb5, 0xb5, 0x46, 0x5a, 0x9b, 0xfa, 0x1f, 0x84, 0x9c,
- 0x7a, 0xed, 0x21, 0x50, 0xe8, 0x7f, 0x29, 0xa1, 0xa7, 0x1c, 0x7b, 0x32, 0x25, 0xb9, 0xf6, 0xe4,
- 0x5f, 0x50, 0xa4, 0x95, 0x65, 0x25, 0x34, 0x87, 0xa4, 0xbd, 0xbd, 0xd9, 0x37, 0x33, 0xde, 0x37,
- 0x7e, 0x5a, 0xf0, 0x08, 0xf7, 0x6c, 0xc3, 0x26, 0x1e, 0x32, 0x6c, 0xe2, 0xba, 0xc8, 0xa6, 0x98,
- 0xb8, 0xc6, 0xa4, 0x92, 0x40, 0xe5, 0x91, 0x47, 0x28, 0x81, 0xf7, 0x70, 0xcf, 0x2e, 0x07, 0xc4,
- 0x72, 0xa2, 0x35, 0xa9, 0xc8, 0xc5, 0x3e, 0xe9, 0x93, 0x90, 0x62, 0x04, 0x15, 0x63, 0xcb, 0x49,
- 0xdb, 0xe1, 0x10, 0xd3, 0x21, 0x72, 0x29, 0xb3, 0x5d, 0x20, 0x46, 0xd4, 0xbf, 0x09, 0x20, 0x5f,
- 0x8b, 0x0d, 0xeb, 0xae, 0x03, 0x2b, 0x20, 0x6b, 0x0f, 0x30, 0x72, 0x69, 0x07, 0x3b, 0x12, 0xaf,
- 0xf1, 0xa5, 0x6c, 0xb5, 0x38, 0x9f, 0xa9, 0x85, 0x69, 0x77, 0x38, 0xd8, 0xd6, 0xe3, 0x96, 0x6e,
- 0x65, 0x58, 0x6d, 0x3a, 0xf0, 0x39, 0xc8, 0x4c, 0x90, 0xe7, 0x63, 0xe2, 0xfa, 0x92, 0xa0, 0xa5,
- 0x4a, 0xb9, 0x4d, 0xb5, 0xfc, 0xe7, 0xeb, 0x96, 0xdf, 0x31, 0x9e, 0x15, 0x0b, 0xe0, 0x16, 0x48,
- 0xfb, 0xb4, 0x4b, 0x91, 0x94, 0xd2, 0xf8, 0xd2, 0x7f, 0x9b, 0xeb, 0xd7, 0x29, 0x5b, 0x01, 0xc9,
- 0x62, 0x5c, 0xd8, 0x00, 0x6b, 0x36, 0x19, 0xbb, 0x14, 0x79, 0xa3, 0xae, 0x47, 0xa7, 0x92, 0xa8,
- 0xf1, 0xa5, 0xdc, 0xe6, 0xc3, 0xeb, 0xb4, 0xb5, 0x04, 0xb7, 0x2a, 0x9e, 0xce, 0x54, 0xce, 0xba,
- 0xa4, 0x87, 0xdb, 0x60, 0xcd, 0x41, 0x83, 0xee, 0xb4, 0x33, 0x42, 0x1e, 0x26, 0x8e, 0x94, 0xd6,
- 0xf8, 0x92, 0x58, 0xbd, 0x3f, 0x9f, 0xa9, 0x77, 0xd9, 0xdc, 0xc9, 0xae, 0x6e, 0xe5, 0x42, 0xd8,
- 0x0c, 0xd1, 0xb6, 0x78, 0xf4, 0x45, 0xe5, 0xf4, 0x5f, 0x02, 0x28, 0x9a, 0x0e, 0x72, 0x29, 0x3e,
- 0xc0, 0xc8, 0x59, 0x46, 0x0a, 0xd7, 0x81, 0x10, 0x07, 0x99, 0x9f, 0xcf, 0xd4, 0x2c, 0x33, 0x0c,
- 0x12, 0x14, 0xf0, 0x95, 0xb8, 0x85, 0x1b, 0xc7, 0x9d, 0xba, 0x75, 0xdc, 0xe2, 0x5f, 0xc4, 0x9d,
- 0xfe, 0xc7, 0x71, 0xaf, 0xdc, 0x38, 0xee, 0xef, 0x3c, 0x58, 0x4b, 0xfe, 0xcc, 0x6d, 0xd6, 0xf6,
- 0x05, 0xc8, 0x2f, 0xef, 0xbd, 0x8c, 0x5f, 0x9a, 0xcf, 0xd4, 0x62, 0x24, 0x4b, 0xb6, 0xf5, 0x60,
- 0x88, 0x05, 0x36, 0x1d, 0x58, 0x05, 0x2b, 0x23, 0x0f, 0x1d, 0xe0, 0x8f, 0xe1, 0xe6, 0x5e, 0x89,
- 0x23, 0xfe, 0xcc, 0x26, 0x95, 0xf2, 0x1b, 0xe4, 0x7d, 0x18, 0xa0, 0x66, 0xc8, 0x8d, 0xe2, 0x88,
- 0x94, 0xd1, 0x30, 0x0f, 0x40, 0xae, 0x16, 0x5e, 0xaa, 0xd9, 0xa5, 0x87, 0x3e, 0x2c, 0x82, 0xf4,
- 0x28, 0x28, 0x24, 0x5e, 0x4b, 0x95, 0xb2, 0x16, 0x03, 0xfa, 0x3e, 0xf8, 0x7f, 0xb9, 0x55, 0x8c,
- 0x78, 0x8b, 0x99, 0x63, 0x6f, 0x21, 0xe9, 0xfd, 0x0a, 0xac, 0x46, 0x9b, 0x02, 0x15, 0x00, 0xf0,
- 0x62, 0x8d, 0x3d, 0x66, 0x6a, 0x25, 0x4e, 0xa0, 0x0c, 0x32, 0x07, 0xa8, 0x4b, 0xc7, 0x1e, 0x5a,
- 0x78, 0xc4, 0x98, 0x4d, 0xf3, 0xf8, 0x33, 0x0f, 0xd2, 0xe1, 0xf6, 0xc0, 0xa7, 0x40, 0x6d, 0xb5,
- 0x77, 0xda, 0xf5, 0xce, 0x5e, 0xc3, 0x6c, 0x98, 0x6d, 0x73, 0xe7, 0xb5, 0xb9, 0x5f, 0xdf, 0xed,
- 0xec, 0x35, 0x5a, 0xcd, 0x7a, 0xcd, 0x7c, 0x69, 0xd6, 0x77, 0x0b, 0x9c, 0x7c, 0xe7, 0xf8, 0x44,
- 0xcb, 0x5f, 0x22, 0x40, 0x09, 0x00, 0xa6, 0x0b, 0x0e, 0x0b, 0xbc, 0x9c, 0x39, 0x3e, 0xd1, 0xc4,
- 0xa0, 0x86, 0x0a, 0xc8, 0xb3, 0x4e, 0xdb, 0x7a, 0xff, 0xb6, 0x59, 0x6f, 0x14, 0x04, 0x39, 0x77,
- 0x7c, 0xa2, 0xad, 0x46, 0x70, 0xa9, 0x0c, 0x9b, 0x29, 0xa6, 0x0c, 0x6a, 0x59, 0x3c, 0xfa, 0xaa,
- 0x70, 0xd5, 0xd6, 0xe9, 0xb9, 0xc2, 0x9f, 0x9d, 0x2b, 0xfc, 0xcf, 0x73, 0x85, 0xff, 0x74, 0xa1,
- 0x70, 0x67, 0x17, 0x0a, 0xf7, 0xe3, 0x42, 0xe1, 0xf6, 0x9f, 0xf5, 0x31, 0x3d, 0x1c, 0xf7, 0x82,
- 0xbf, 0xce, 0xb0, 0x89, 0x3f, 0x24, 0xbe, 0x81, 0x7b, 0xf6, 0x46, 0x9f, 0x18, 0x43, 0xe2, 0x8c,
- 0x07, 0xc8, 0x67, 0xcf, 0xe9, 0x93, 0xad, 0x8d, 0xc4, 0x43, 0x4d, 0xa7, 0x23, 0xe4, 0xf7, 0x56,
- 0xc2, 0xa7, 0x74, 0xeb, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x1f, 0x87, 0x8e, 0xcc, 0x05,
- 0x00, 0x00,
+ // 717 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5f, 0x6b, 0xda, 0x5c,
+ 0x18, 0x37, 0x31, 0x5a, 0x3d, 0xd6, 0xf7, 0xf5, 0x3d, 0xaf, 0xbc, 0x0d, 0xbe, 0x34, 0x09, 0xd9,
+ 0xd8, 0x64, 0x50, 0x9d, 0x2d, 0x0c, 0xd6, 0xb1, 0x8b, 0x6a, 0x1d, 0x84, 0x6d, 0x4e, 0xa2, 0x1d,
+ 0xac, 0x37, 0x12, 0x93, 0x53, 0x7b, 0xa8, 0xc9, 0x91, 0xe4, 0x28, 0xfa, 0x0d, 0x4a, 0xaf, 0x76,
+ 0xbb, 0x8b, 0xc2, 0x60, 0xdf, 0x65, 0x94, 0x5d, 0xf5, 0x72, 0x57, 0x32, 0xda, 0xdb, 0x5d, 0xf9,
+ 0x09, 0x46, 0x72, 0xa2, 0xa6, 0x65, 0x1d, 0xb4, 0xdb, 0xdd, 0xf3, 0xe4, 0xf7, 0xc7, 0xe7, 0xf9,
+ 0xf9, 0x18, 0xc1, 0x43, 0xdc, 0x35, 0xcb, 0x26, 0x71, 0x51, 0xd9, 0x24, 0x8e, 0x83, 0x4c, 0x8a,
+ 0x89, 0x53, 0x1e, 0x55, 0x22, 0x5d, 0x69, 0xe0, 0x12, 0x4a, 0xe0, 0x7f, 0xb8, 0x6b, 0x96, 0x7c,
+ 0x62, 0x29, 0x02, 0x8d, 0x2a, 0x85, 0x7c, 0x8f, 0xf4, 0x48, 0x40, 0x29, 0xfb, 0x15, 0x63, 0x17,
+ 0xa2, 0xb6, 0xb6, 0x8d, 0xa9, 0x8d, 0x1c, 0xca, 0x6c, 0xe7, 0x1d, 0x23, 0xaa, 0x9f, 0x79, 0x90,
+ 0xad, 0x2d, 0x0c, 0xeb, 0x8e, 0x05, 0x2b, 0x20, 0x6d, 0xf6, 0x31, 0x72, 0x68, 0x07, 0x5b, 0x22,
+ 0xa7, 0x70, 0xc5, 0x74, 0x35, 0x3f, 0x9b, 0xca, 0xb9, 0x89, 0x61, 0xf7, 0xb7, 0xd5, 0x05, 0xa4,
+ 0xea, 0x29, 0x56, 0x6b, 0x16, 0x7c, 0x06, 0x52, 0x23, 0xe4, 0x7a, 0x98, 0x38, 0x9e, 0xc8, 0x2b,
+ 0xf1, 0x62, 0x66, 0x53, 0x2e, 0xfd, 0x7c, 0xdc, 0xd2, 0x5b, 0xc6, 0xd3, 0x17, 0x02, 0xb8, 0x05,
+ 0x12, 0x1e, 0x35, 0x28, 0x12, 0xe3, 0x0a, 0x57, 0xfc, 0x6b, 0x73, 0xfd, 0x26, 0x65, 0xcb, 0x27,
+ 0xe9, 0x8c, 0x0b, 0x1b, 0x60, 0xd5, 0x24, 0x43, 0x87, 0x22, 0x77, 0x60, 0xb8, 0x74, 0x22, 0x0a,
+ 0x0a, 0x57, 0xcc, 0x6c, 0xde, 0xbf, 0x49, 0x5b, 0x8b, 0x70, 0xab, 0xc2, 0xd9, 0x54, 0x8e, 0xe9,
+ 0x57, 0xf4, 0x70, 0x1b, 0xac, 0x5a, 0xa8, 0x6f, 0x4c, 0x3a, 0x03, 0xe4, 0x62, 0x62, 0x89, 0x09,
+ 0x85, 0x2b, 0x0a, 0xd5, 0xb5, 0xd9, 0x54, 0xfe, 0x97, 0xed, 0x1d, 0x45, 0x55, 0x3d, 0x13, 0xb4,
+ 0xcd, 0xa0, 0xdb, 0x16, 0x8e, 0x3f, 0xca, 0x31, 0xf5, 0x3b, 0x0f, 0xf2, 0x9a, 0x85, 0x1c, 0x8a,
+ 0x0f, 0x30, 0xb2, 0x96, 0x91, 0xc2, 0x75, 0xc0, 0x2f, 0x82, 0xcc, 0xce, 0xa6, 0x72, 0x9a, 0x19,
+ 0xfa, 0x09, 0xf2, 0xf8, 0x5a, 0xdc, 0xfc, 0xad, 0xe3, 0x8e, 0xdf, 0x39, 0x6e, 0xe1, 0x37, 0xe2,
+ 0x4e, 0xfc, 0xe1, 0xb8, 0x93, 0xb7, 0x8e, 0xfb, 0x0b, 0x07, 0x56, 0xa3, 0x1f, 0x73, 0x97, 0xb3,
+ 0x7d, 0x0e, 0xb2, 0xcb, 0xb9, 0x97, 0xf1, 0x8b, 0xb3, 0xa9, 0x9c, 0x0f, 0x65, 0x51, 0x58, 0xf5,
+ 0x97, 0x98, 0xf7, 0x9a, 0x05, 0xab, 0x20, 0x39, 0x70, 0xd1, 0x01, 0x1e, 0x07, 0x97, 0x7b, 0x2d,
+ 0x8e, 0xc5, 0xcf, 0x6c, 0x54, 0x29, 0xbd, 0x46, 0xee, 0x51, 0x1f, 0x35, 0x03, 0x6e, 0x18, 0x47,
+ 0xa8, 0x0c, 0x97, 0xb9, 0x07, 0x32, 0xb5, 0x60, 0xa8, 0xa6, 0x41, 0x0f, 0x3d, 0x98, 0x07, 0x89,
+ 0x81, 0x5f, 0x88, 0x9c, 0x12, 0x2f, 0xa6, 0x75, 0xd6, 0xa8, 0xfb, 0xe0, 0xef, 0xe5, 0x55, 0x31,
+ 0xe2, 0x1d, 0x76, 0x5e, 0x78, 0xf3, 0x51, 0xef, 0x97, 0x60, 0x25, 0xbc, 0x14, 0x28, 0x01, 0x80,
+ 0xe7, 0x67, 0xec, 0x32, 0x53, 0x3d, 0xf2, 0x04, 0x16, 0x40, 0xea, 0x00, 0x19, 0x74, 0xe8, 0xa2,
+ 0xb9, 0xc7, 0xa2, 0x0f, 0xb7, 0x71, 0x40, 0xb2, 0x69, 0xb8, 0x86, 0xed, 0x41, 0x0b, 0xfc, 0x6f,
+ 0x1b, 0xe3, 0x0e, 0x1a, 0x0f, 0x90, 0x49, 0x91, 0xd5, 0xa1, 0xd8, 0x46, 0xfe, 0x97, 0xda, 0xe9,
+ 0xf6, 0x89, 0x79, 0x14, 0x98, 0x0b, 0xd5, 0x07, 0xb3, 0xa9, 0xac, 0xb2, 0x89, 0x7f, 0x41, 0x56,
+ 0xf5, 0x35, 0xdb, 0x18, 0xd7, 0x43, 0xb0, 0x8d, 0x6d, 0xd4, 0x44, 0x6e, 0xd5, 0x47, 0x1e, 0x7d,
+ 0xe0, 0x40, 0x22, 0xb8, 0x56, 0xf8, 0x04, 0xc8, 0xad, 0xf6, 0x4e, 0xbb, 0xde, 0xd9, 0x6b, 0x68,
+ 0x0d, 0xad, 0xad, 0xed, 0xbc, 0xd2, 0xf6, 0xeb, 0xbb, 0x9d, 0xbd, 0x46, 0xab, 0x59, 0xaf, 0x69,
+ 0x2f, 0xb4, 0xfa, 0x6e, 0x2e, 0x56, 0xf8, 0xe7, 0xe4, 0x54, 0xc9, 0x5e, 0x21, 0x40, 0x11, 0x00,
+ 0xa6, 0xf3, 0x1f, 0xe6, 0xb8, 0x42, 0xea, 0xe4, 0x54, 0x11, 0xfc, 0x1a, 0x4a, 0x20, 0xcb, 0x90,
+ 0xb6, 0xfe, 0xee, 0x4d, 0xb3, 0xde, 0xc8, 0xf1, 0x85, 0xcc, 0xc9, 0xa9, 0xb2, 0x12, 0xb6, 0x4b,
+ 0x65, 0x00, 0xc6, 0x99, 0xd2, 0xaf, 0x0b, 0xc2, 0xf1, 0x27, 0x29, 0x56, 0x6d, 0x9d, 0x5d, 0x48,
+ 0xdc, 0xf9, 0x85, 0xc4, 0x7d, 0xbb, 0x90, 0xb8, 0xf7, 0x97, 0x52, 0xec, 0xfc, 0x52, 0x8a, 0x7d,
+ 0xbd, 0x94, 0x62, 0xfb, 0x4f, 0x7b, 0x98, 0x1e, 0x0e, 0xbb, 0xfe, 0xa9, 0x94, 0x4d, 0xe2, 0xd9,
+ 0xc4, 0x2b, 0xe3, 0xae, 0xb9, 0xd1, 0x23, 0x65, 0x9b, 0x58, 0xc3, 0x3e, 0xf2, 0xd8, 0xeb, 0xfb,
+ 0xf1, 0xd6, 0x46, 0xe4, 0x8f, 0x81, 0x4e, 0x06, 0xc8, 0xeb, 0x26, 0x83, 0x57, 0xf7, 0xd6, 0x8f,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x83, 0x45, 0x2b, 0x3c, 0x06, 0x00, 0x00,
}
func (m *ConnectionEnd) Marshal() (dAtA []byte, err error) {
@@ -706,6 +758,34 @@ func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *Params) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Params) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MaxExpectedTimePerBlock != 0 {
+ i = encodeVarintConnection(dAtA, i, uint64(m.MaxExpectedTimePerBlock))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintConnection(dAtA []byte, offset int, v uint64) int {
offset -= sovConnection(v)
base := offset
@@ -847,6 +927,18 @@ func (m *Version) Size() (n int) {
return n
}
+func (m *Params) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxExpectedTimePerBlock != 0 {
+ n += 1 + sovConnection(uint64(m.MaxExpectedTimePerBlock))
+ }
+ return n
+}
+
func sovConnection(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -1716,6 +1808,75 @@ func (m *Version) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *Params) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Params: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxExpectedTimePerBlock", wireType)
+ }
+ m.MaxExpectedTimePerBlock = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowConnection
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxExpectedTimePerBlock |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipConnection(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthConnection
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipConnection(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/modules/core/03-connection/types/genesis.go b/modules/core/03-connection/types/genesis.go
index 8eb441c5..31378218 100644
--- a/modules/core/03-connection/types/genesis.go
+++ b/modules/core/03-connection/types/genesis.go
@@ -17,12 +17,13 @@ func NewConnectionPaths(id string, paths []string) ConnectionPaths {
// NewGenesisState creates a GenesisState instance.
func NewGenesisState(
connections []IdentifiedConnection, connPaths []ConnectionPaths,
- nextConnectionSequence uint64,
+ nextConnectionSequence uint64, params Params,
) GenesisState {
return GenesisState{
Connections: connections,
ClientConnectionPaths: connPaths,
NextConnectionSequence: nextConnectionSequence,
+ Params: params,
}
}
@@ -32,6 +33,7 @@ func DefaultGenesisState() GenesisState {
Connections: []IdentifiedConnection{},
ClientConnectionPaths: []ConnectionPaths{},
NextConnectionSequence: 0,
+ Params: DefaultParams(),
}
}
@@ -72,5 +74,9 @@ func (gs GenesisState) Validate() error {
return fmt.Errorf("next connection sequence %d must be greater than maximum sequence used in connection identifier %d", gs.NextConnectionSequence, maxSequence)
}
+ if err := gs.Params.Validate(); err != nil {
+ return err
+ }
+
return nil
}
diff --git a/modules/core/03-connection/types/genesis.pb.go b/modules/core/03-connection/types/genesis.pb.go
index c538d3e3..2efa4395 100644
--- a/modules/core/03-connection/types/genesis.pb.go
+++ b/modules/core/03-connection/types/genesis.pb.go
@@ -29,6 +29,7 @@ type GenesisState struct {
ClientConnectionPaths []ConnectionPaths `protobuf:"bytes,2,rep,name=client_connection_paths,json=clientConnectionPaths,proto3" json:"client_connection_paths" yaml:"client_connection_paths"`
// the sequence for the next generated connection identifier
NextConnectionSequence uint64 `protobuf:"varint,3,opt,name=next_connection_sequence,json=nextConnectionSequence,proto3" json:"next_connection_sequence,omitempty" yaml:"next_connection_sequence"`
+ Params Params `protobuf:"bytes,4,opt,name=params,proto3" json:"params"`
}
func (m *GenesisState) Reset() { *m = GenesisState{} }
@@ -85,6 +86,13 @@ func (m *GenesisState) GetNextConnectionSequence() uint64 {
return 0
}
+func (m *GenesisState) GetParams() Params {
+ if m != nil {
+ return m.Params
+ }
+ return Params{}
+}
+
func init() {
proto.RegisterType((*GenesisState)(nil), "ibc.core.connection.v1.GenesisState")
}
@@ -94,28 +102,29 @@ func init() {
}
var fileDescriptor_1879d34bc6ac3cd7 = []byte{
- // 327 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x31, 0x4f, 0xc2, 0x40,
- 0x18, 0x86, 0x5b, 0x20, 0x0e, 0xc5, 0xa9, 0x51, 0x6c, 0x18, 0xae, 0xa4, 0x1a, 0x61, 0x90, 0x3b,
- 0x91, 0x49, 0xc7, 0x3a, 0x18, 0x37, 0x03, 0x4e, 0x26, 0x86, 0xd0, 0xe3, 0xb3, 0x5c, 0x42, 0xef,
- 0x43, 0xee, 0x20, 0xf2, 0x0b, 0x5c, 0xfd, 0x59, 0x2c, 0x26, 0x8c, 0x4e, 0xc4, 0xc0, 0x3f, 0xe0,
- 0x17, 0x98, 0xb6, 0xc6, 0xa2, 0xb1, 0xdb, 0xe5, 0xbe, 0xe7, 0x7d, 0xde, 0xe1, 0xb5, 0x4e, 0x44,
- 0xc0, 0x19, 0xc7, 0x09, 0x30, 0x8e, 0x52, 0x02, 0xd7, 0x02, 0x25, 0x9b, 0xb5, 0x58, 0x08, 0x12,
- 0x94, 0x50, 0x74, 0x3c, 0x41, 0x8d, 0x76, 0x45, 0x04, 0x9c, 0xc6, 0x14, 0xcd, 0x28, 0x3a, 0x6b,
- 0x55, 0x0f, 0x42, 0x0c, 0x31, 0x41, 0x58, 0xfc, 0x4a, 0xe9, 0x6a, 0x3d, 0xc7, 0xb9, 0x93, 0x4d,
- 0x40, 0xef, 0xbd, 0x60, 0xed, 0xdf, 0xa4, 0x45, 0x5d, 0xdd, 0xd7, 0x60, 0xdf, 0x5b, 0xe5, 0x0c,
- 0x52, 0x8e, 0x59, 0x2b, 0x36, 0xca, 0x17, 0x67, 0xf4, 0xff, 0x76, 0x7a, 0x3b, 0x00, 0xa9, 0xc5,
- 0x93, 0x80, 0xc1, 0xf5, 0xcf, 0xbf, 0x5f, 0x5a, 0xac, 0x5c, 0xa3, 0xb3, 0xab, 0xb1, 0x5f, 0x4d,
- 0xeb, 0x88, 0x8f, 0x04, 0x48, 0xdd, 0xcb, 0xbe, 0x7b, 0xe3, 0xbe, 0x1e, 0x2a, 0xa7, 0x90, 0x54,
- 0xd4, 0xf3, 0x2a, 0x32, 0xf1, 0x5d, 0x8c, 0xfb, 0xa7, 0xb1, 0x7d, 0xbb, 0x72, 0xc9, 0xbc, 0x1f,
- 0x8d, 0xae, 0xbc, 0x1c, 0xab, 0xd7, 0x39, 0x4c, 0x2f, 0x7f, 0xe2, 0xf6, 0xa3, 0xe5, 0x48, 0x78,
- 0xf9, 0x15, 0x50, 0xf0, 0x3c, 0x05, 0xc9, 0xc1, 0x29, 0xd6, 0xcc, 0x46, 0xc9, 0x3f, 0xde, 0xae,
- 0x5c, 0x37, 0x95, 0xe7, 0x91, 0x5e, 0xa7, 0x12, 0x9f, 0x32, 0x77, 0xf7, 0xfb, 0xe0, 0x77, 0x17,
- 0x6b, 0x62, 0x2e, 0xd7, 0xc4, 0xfc, 0x5c, 0x13, 0xf3, 0x6d, 0x43, 0x8c, 0xe5, 0x86, 0x18, 0x1f,
- 0x1b, 0x62, 0x3c, 0x5c, 0x86, 0x42, 0x0f, 0xa7, 0x01, 0xe5, 0x18, 0x31, 0x8e, 0x2a, 0x42, 0xc5,
- 0x44, 0xc0, 0x9b, 0x21, 0xb2, 0x08, 0x07, 0xd3, 0x11, 0xa8, 0x74, 0xaf, 0xf3, 0x76, 0x73, 0x67,
- 0x32, 0x3d, 0x1f, 0x83, 0x0a, 0xf6, 0x92, 0xad, 0xda, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x85,
- 0x66, 0x6f, 0x29, 0x2a, 0x02, 0x00, 0x00,
+ // 352 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x4e, 0xc2, 0x40,
+ 0x18, 0xc7, 0x5b, 0x21, 0x0c, 0xc5, 0xa9, 0x51, 0x6c, 0x18, 0xae, 0xa4, 0x1a, 0x61, 0x90, 0x3b,
+ 0x81, 0x49, 0xe3, 0x54, 0x07, 0xe3, 0x46, 0xc0, 0xc9, 0xc4, 0x90, 0xf6, 0xf8, 0x2c, 0x97, 0xd0,
+ 0xbb, 0xca, 0x1d, 0x44, 0x9e, 0xc0, 0xc1, 0xc5, 0xc7, 0x62, 0x64, 0x74, 0x22, 0x06, 0xde, 0x80,
+ 0x27, 0x30, 0x6d, 0x89, 0x45, 0x63, 0xb7, 0xe6, 0xfb, 0x7e, 0xff, 0xdf, 0x3f, 0xbd, 0xcf, 0x38,
+ 0x63, 0x3e, 0x25, 0x54, 0x4c, 0x80, 0x50, 0xc1, 0x39, 0x50, 0xc5, 0x04, 0x27, 0xb3, 0x16, 0x09,
+ 0x80, 0x83, 0x64, 0x12, 0x47, 0x13, 0xa1, 0x84, 0x59, 0x61, 0x3e, 0xc5, 0x31, 0x85, 0x33, 0x0a,
+ 0xcf, 0x5a, 0xd5, 0xa3, 0x40, 0x04, 0x22, 0x41, 0x48, 0xfc, 0x95, 0xd2, 0xd5, 0x7a, 0x8e, 0x73,
+ 0x2f, 0x9b, 0x80, 0xce, 0x7b, 0xc1, 0x38, 0xbc, 0x4b, 0x8b, 0xfa, 0xca, 0x53, 0x60, 0x3e, 0x18,
+ 0xe5, 0x0c, 0x92, 0x96, 0x5e, 0x2b, 0x34, 0xca, 0xed, 0x0b, 0xfc, 0x7f, 0x3b, 0xbe, 0x1f, 0x02,
+ 0x57, 0xec, 0x99, 0xc1, 0xf0, 0xf6, 0x67, 0xee, 0x16, 0x17, 0x2b, 0x5b, 0xeb, 0xed, 0x6b, 0xcc,
+ 0x37, 0xdd, 0x38, 0xa1, 0x63, 0x06, 0x5c, 0x0d, 0xb2, 0xf1, 0x20, 0xf2, 0xd4, 0x48, 0x5a, 0x07,
+ 0x49, 0x45, 0x3d, 0xaf, 0x22, 0x13, 0x77, 0x63, 0xdc, 0x3d, 0x8f, 0xed, 0xdb, 0x95, 0x8d, 0xe6,
+ 0x5e, 0x38, 0xbe, 0x76, 0x72, 0xac, 0x4e, 0xef, 0x38, 0xdd, 0xfc, 0x89, 0x9b, 0x4f, 0x86, 0xc5,
+ 0xe1, 0xf5, 0x57, 0x40, 0xc2, 0xcb, 0x14, 0x38, 0x05, 0xab, 0x50, 0xd3, 0x1b, 0x45, 0xf7, 0x74,
+ 0xbb, 0xb2, 0xed, 0x54, 0x9e, 0x47, 0x3a, 0xbd, 0x4a, 0xbc, 0xca, 0xdc, 0xfd, 0xdd, 0xc2, 0xbc,
+ 0x31, 0x4a, 0x91, 0x37, 0xf1, 0x42, 0x69, 0x15, 0x6b, 0x7a, 0xa3, 0xdc, 0x46, 0x79, 0xbf, 0xd5,
+ 0x4d, 0xa8, 0xdd, 0x5b, 0xed, 0x32, 0x6e, 0x7f, 0xb1, 0x46, 0xfa, 0x72, 0x8d, 0xf4, 0xaf, 0x35,
+ 0xd2, 0x3f, 0x36, 0x48, 0x5b, 0x6e, 0x90, 0xf6, 0xb9, 0x41, 0xda, 0xe3, 0x55, 0xc0, 0xd4, 0x68,
+ 0xea, 0x63, 0x2a, 0x42, 0x42, 0x85, 0x0c, 0x85, 0x24, 0xcc, 0xa7, 0xcd, 0x40, 0x90, 0x50, 0x0c,
+ 0xa7, 0x63, 0x90, 0xe9, 0xb5, 0x2f, 0x3b, 0xcd, 0xbd, 0x83, 0xab, 0x79, 0x04, 0xd2, 0x2f, 0x25,
+ 0x97, 0xee, 0x7c, 0x07, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x52, 0xa6, 0xc4, 0x68, 0x02, 0x00, 0x00,
}
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
@@ -138,6 +147,16 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ {
+ size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenesis(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
if m.NextConnectionSequence != 0 {
i = encodeVarintGenesis(dAtA, i, uint64(m.NextConnectionSequence))
i--
@@ -206,6 +225,8 @@ func (m *GenesisState) Size() (n int) {
if m.NextConnectionSequence != 0 {
n += 1 + sovGenesis(uint64(m.NextConnectionSequence))
}
+ l = m.Params.Size()
+ n += 1 + l + sovGenesis(uint64(l))
return n
}
@@ -331,6 +352,39 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error {
break
}
}
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenesis
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenesis
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenesis(dAtA[iNdEx:])
diff --git a/modules/core/03-connection/types/genesis_test.go b/modules/core/03-connection/types/genesis_test.go
index 562890dd..9a64c86b 100644
--- a/modules/core/03-connection/types/genesis_test.go
+++ b/modules/core/03-connection/types/genesis_test.go
@@ -32,6 +32,7 @@ func TestValidateGenesis(t *testing.T) {
{clientID, []string{connectionID}},
},
0,
+ types.DefaultParams(),
),
expPass: true,
},
@@ -45,6 +46,7 @@ func TestValidateGenesis(t *testing.T) {
{clientID, []string{connectionID}},
},
0,
+ types.DefaultParams(),
),
expPass: false,
},
@@ -58,6 +60,7 @@ func TestValidateGenesis(t *testing.T) {
{"(CLIENTIDONE)", []string{connectionID}},
},
0,
+ types.DefaultParams(),
),
expPass: false,
},
@@ -71,6 +74,7 @@ func TestValidateGenesis(t *testing.T) {
{clientID, []string{invalidConnectionID}},
},
0,
+ types.DefaultParams(),
),
expPass: false,
},
@@ -84,6 +88,7 @@ func TestValidateGenesis(t *testing.T) {
{clientID, []string{connectionID}},
},
0,
+ types.DefaultParams(),
),
expPass: false,
},
@@ -97,6 +102,21 @@ func TestValidateGenesis(t *testing.T) {
{clientID, []string{connectionID}},
},
0,
+ types.DefaultParams(),
+ ),
+ expPass: false,
+ },
+ {
+ name: "invalid params",
+ genState: types.NewGenesisState(
+ []types.IdentifiedConnection{
+ types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)),
+ },
+ []types.ConnectionPaths{
+ {clientID, []string{connectionID}},
+ },
+ 0,
+ types.Params{},
),
expPass: false,
},
diff --git a/modules/core/03-connection/types/params.go b/modules/core/03-connection/types/params.go
new file mode 100644
index 00000000..904bde60
--- /dev/null
+++ b/modules/core/03-connection/types/params.go
@@ -0,0 +1,54 @@
+package types
+
+import (
+ "fmt"
+ "time"
+
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
+)
+
+// DefaultTimePerBlock is the default value for maximum expected time per block.
+const DefaultTimePerBlock = 30 * time.Second
+
+// KeyMaxExpectedTimePerBlock is store's key for MaxExpectedTimePerBlock parameter
+var KeyMaxExpectedTimePerBlock = []byte("MaxExpectedTimePerBlock")
+
+// ParamKeyTable type declaration for parameters
+func ParamKeyTable() paramtypes.KeyTable {
+ return paramtypes.NewKeyTable().RegisterParamSet(&Params{})
+}
+
+// NewParams creates a new parameter configuration for the ibc connection module
+func NewParams(timePerBlock uint64) Params {
+ return Params{
+ MaxExpectedTimePerBlock: timePerBlock,
+ }
+}
+
+// DefaultParams is the default parameter configuration for the ibc connection module
+func DefaultParams() Params {
+ return NewParams(uint64(DefaultTimePerBlock))
+}
+
+// Validate ensures MaxExpectedTimePerBlock is non-zero
+func (p Params) Validate() error {
+ if p.MaxExpectedTimePerBlock == 0 {
+ return fmt.Errorf("MaxExpectedTimePerBlock cannot be zero")
+ }
+ return nil
+}
+
+// ParamSetPairs implements params.ParamSet
+func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs {
+ return paramtypes.ParamSetPairs{
+ paramtypes.NewParamSetPair(KeyMaxExpectedTimePerBlock, p.MaxExpectedTimePerBlock, validateParams),
+ }
+}
+
+func validateParams(i interface{}) error {
+ _, ok := i.(uint64)
+ if !ok {
+ return fmt.Errorf("invalid parameter. expected %T, got type: %T", uint64(1), i)
+ }
+ return nil
+}
diff --git a/modules/core/03-connection/types/params_test.go b/modules/core/03-connection/types/params_test.go
new file mode 100644
index 00000000..0a04ed34
--- /dev/null
+++ b/modules/core/03-connection/types/params_test.go
@@ -0,0 +1,29 @@
+package types_test
+
+import (
+ "testing"
+
+ "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ "github.com/stretchr/testify/require"
+)
+
+func TestValidateParams(t *testing.T) {
+ testCases := []struct {
+ name string
+ params types.Params
+ expPass bool
+ }{
+ {"default params", types.DefaultParams(), true},
+ {"custom params", types.NewParams(10), true},
+ {"blank client", types.NewParams(0), false},
+ }
+
+ for _, tc := range testCases {
+ err := tc.params.Validate()
+ if tc.expPass {
+ require.NoError(t, err, tc.name)
+ } else {
+ require.Error(t, err, tc.name)
+ }
+ }
+}
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index 77268b1b..8de7976c 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -126,11 +126,12 @@ type ClientState interface {
channel ChannelI,
) error
VerifyPacketCommitment(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix Prefix,
proof []byte,
portID,
@@ -139,11 +140,12 @@ type ClientState interface {
commitmentBytes []byte,
) error
VerifyPacketAcknowledgement(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix Prefix,
proof []byte,
portID,
@@ -152,11 +154,12 @@ type ClientState interface {
acknowledgement []byte,
) error
VerifyPacketReceiptAbsence(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix Prefix,
proof []byte,
portID,
@@ -164,11 +167,12 @@ type ClientState interface {
sequence uint64,
) error
VerifyNextSequenceRecv(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix Prefix,
proof []byte,
portID,
diff --git a/modules/core/genesis_test.go b/modules/core/genesis_test.go
index 924fb42e..5873440d 100644
--- a/modules/core/genesis_test.go
+++ b/modules/core/genesis_test.go
@@ -117,6 +117,7 @@ func (suite *IBCTestSuite) TestValidateGenesis() {
connectiontypes.NewConnectionPaths(clientID, []string{connectionID}),
},
0,
+ connectiontypes.NewParams(10),
),
ChannelGenesis: channeltypes.NewGenesisState(
[]channeltypes.IdentifiedChannel{
@@ -192,6 +193,7 @@ func (suite *IBCTestSuite) TestValidateGenesis() {
connectiontypes.NewConnectionPaths(clientID, []string{connectionID}),
},
0,
+ connectiontypes.Params{},
),
},
expPass: false,
@@ -279,6 +281,7 @@ func (suite *IBCTestSuite) TestInitGenesis() {
connectiontypes.NewConnectionPaths(clientID, []string{connectionID}),
},
0,
+ connectiontypes.NewParams(10),
),
ChannelGenesis: channeltypes.NewGenesisState(
[]channeltypes.IdentifiedChannel{
diff --git a/modules/core/keeper/keeper.go b/modules/core/keeper/keeper.go
index 0320bb46..164fecfc 100644
--- a/modules/core/keeper/keeper.go
+++ b/modules/core/keeper/keeper.go
@@ -4,14 +4,15 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
connectionkeeper "github.com/cosmos/ibc-go/modules/core/03-connection/keeper"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
channelkeeper "github.com/cosmos/ibc-go/modules/core/04-channel/keeper"
portkeeper "github.com/cosmos/ibc-go/modules/core/05-port/keeper"
porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
"github.com/cosmos/ibc-go/modules/core/types"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
var _ types.QueryServer = (*Keeper)(nil)
@@ -36,8 +37,16 @@ func NewKeeper(
stakingKeeper clienttypes.StakingKeeper, upgradeKeeper clienttypes.UpgradeKeeper,
scopedKeeper capabilitykeeper.ScopedKeeper,
) *Keeper {
+ // register paramSpace at top level keeper
+ // set KeyTable if it has not already been set
+ if !paramSpace.HasKeyTable() {
+ keyTable := clienttypes.ParamKeyTable()
+ keyTable.RegisterParamSet(&connectiontypes.Params{})
+ paramSpace = paramSpace.WithKeyTable(keyTable)
+ }
+
clientKeeper := clientkeeper.NewKeeper(cdc, key, paramSpace, stakingKeeper, upgradeKeeper)
- connectionKeeper := connectionkeeper.NewKeeper(cdc, key, clientKeeper)
+ connectionKeeper := connectionkeeper.NewKeeper(cdc, key, paramSpace, clientKeeper)
portKeeper := portkeeper.NewKeeper(scopedKeeper)
channelKeeper := channelkeeper.NewKeeper(cdc, key, clientKeeper, connectionKeeper, portKeeper, scopedKeeper)
diff --git a/modules/light-clients/06-solomachine/types/client_state.go b/modules/light-clients/06-solomachine/types/client_state.go
index dd07273e..b4fcf710 100644
--- a/modules/light-clients/06-solomachine/types/client_state.go
+++ b/modules/light-clients/06-solomachine/types/client_state.go
@@ -257,6 +257,7 @@ func (cs *ClientState) VerifyChannelState(
// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
// the specified port, specified channel, and specified sequence.
func (cs *ClientState) VerifyPacketCommitment(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
@@ -298,6 +299,7 @@ func (cs *ClientState) VerifyPacketCommitment(
// VerifyPacketAcknowledgement verifies a proof of an incoming packet
// acknowledgement at the specified port, specified channel, and specified sequence.
func (cs *ClientState) VerifyPacketAcknowledgement(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
@@ -340,6 +342,7 @@ func (cs *ClientState) VerifyPacketAcknowledgement(
// incoming packet receipt at the specified port, specified channel, and
// specified sequence.
func (cs *ClientState) VerifyPacketReceiptAbsence(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
@@ -380,6 +383,7 @@ func (cs *ClientState) VerifyPacketReceiptAbsence(
// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
// received of the specified channel at the specified port.
func (cs *ClientState) VerifyNextSequenceRecv(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
diff --git a/modules/light-clients/06-solomachine/types/client_state_test.go b/modules/light-clients/06-solomachine/types/client_state_test.go
index 82b9ac57..34344622 100644
--- a/modules/light-clients/06-solomachine/types/client_state_test.go
+++ b/modules/light-clients/06-solomachine/types/client_state_test.go
@@ -601,9 +601,10 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketCommitment() {
tc := tc
expSeq := tc.clientState.Sequence + 1
+ ctx := suite.chainA.GetContext()
err := tc.clientState.VerifyPacketCommitment(
- suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, commitmentBytes,
+ ctx, suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, commitmentBytes,
)
if tc.expPass {
@@ -676,9 +677,10 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgement() {
tc := tc
expSeq := tc.clientState.Sequence + 1
+ ctx := suite.chainA.GetContext()
err := tc.clientState.VerifyPacketAcknowledgement(
- suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, ack,
+ ctx, suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, ack,
)
if tc.expPass {
@@ -751,9 +753,10 @@ func (suite *SoloMachineTestSuite) TestVerifyPacketReceiptAbsence() {
tc := tc
expSeq := tc.clientState.Sequence + 1
+ ctx := suite.chainA.GetContext()
err := tc.clientState.VerifyPacketReceiptAbsence(
- suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence,
+ ctx, suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence,
)
if tc.expPass {
@@ -826,9 +829,10 @@ func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() {
tc := tc
expSeq := tc.clientState.Sequence + 1
+ ctx := suite.chainA.GetContext()
err := tc.clientState.VerifyNextSequenceRecv(
- suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, nextSeqRecv,
+ ctx, suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, nextSeqRecv,
)
if tc.expPass {
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 4928a681..bf93cdbf 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -171,8 +171,8 @@ func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryCodec, clientSto
return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid initial consensus state. expected type: %T, got: %T",
&ConsensusState{}, consState)
}
- // set processed time with initial consensus state height equal to initial client state's latest height
- SetProcessedTime(clientStore, cs.GetLatestHeight(), uint64(ctx.BlockTime().UnixNano()))
+ // set metadata for initial consensus state.
+ setConsensusMetadata(ctx, clientStore, cs.GetLatestHeight())
return nil
}
@@ -341,11 +341,12 @@ func (cs ClientState) VerifyChannelState(
// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
// the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketCommitment(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix exported.Prefix,
proof []byte,
portID,
@@ -359,7 +360,7 @@ func (cs ClientState) VerifyPacketCommitment(
}
// check delay period has passed
- if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil {
return err
}
@@ -379,11 +380,12 @@ func (cs ClientState) VerifyPacketCommitment(
// VerifyPacketAcknowledgement verifies a proof of an incoming packet
// acknowledgement at the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketAcknowledgement(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix exported.Prefix,
proof []byte,
portID,
@@ -397,7 +399,7 @@ func (cs ClientState) VerifyPacketAcknowledgement(
}
// check delay period has passed
- if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil {
return err
}
@@ -418,11 +420,12 @@ func (cs ClientState) VerifyPacketAcknowledgement(
// incoming packet receipt at the specified port, specified channel, and
// specified sequence.
func (cs ClientState) VerifyPacketReceiptAbsence(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix exported.Prefix,
proof []byte,
portID,
@@ -435,7 +438,7 @@ func (cs ClientState) VerifyPacketReceiptAbsence(
}
// check delay period has passed
- if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil {
return err
}
@@ -455,11 +458,12 @@ func (cs ClientState) VerifyPacketReceiptAbsence(
// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
// received of the specified channel at the specified port.
func (cs ClientState) VerifyNextSequenceRecv(
+ ctx sdk.Context,
store sdk.KVStore,
cdc codec.BinaryCodec,
height exported.Height,
- currentTimestamp uint64,
- delayPeriod uint64,
+ delayTimePeriod uint64,
+ delayBlockPeriod uint64,
prefix exported.Prefix,
proof []byte,
portID,
@@ -472,7 +476,7 @@ func (cs ClientState) VerifyNextSequenceRecv(
}
// check delay period has passed
- if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil {
+ if err := verifyDelayPeriodPassed(ctx, store, height, delayTimePeriod, delayBlockPeriod); err != nil {
return err
}
@@ -491,20 +495,33 @@ func (cs ClientState) VerifyNextSequenceRecv(
return nil
}
-// verifyDelayPeriodPassed will ensure that at least delayPeriod amount of time has passed since consensus state was submitted
-// before allowing verification to continue.
-func verifyDelayPeriodPassed(store sdk.KVStore, proofHeight exported.Height, currentTimestamp, delayPeriod uint64) error {
- // check that executing chain's timestamp has passed consensusState's processed time + delay period
+// verifyDelayPeriodPassed will ensure that at least delayTimePeriod amount of time and delayBlockPeriod number of blocks have passed
+// since consensus state was submitted before allowing verification to continue.
+func verifyDelayPeriodPassed(ctx sdk.Context, store sdk.KVStore, proofHeight exported.Height, delayTimePeriod, delayBlockPeriod uint64) error {
+ // check that executing chain's timestamp has passed consensusState's processed time + delay time period
processedTime, ok := GetProcessedTime(store, proofHeight)
if !ok {
return sdkerrors.Wrapf(ErrProcessedTimeNotFound, "processed time not found for height: %s", proofHeight)
}
- validTime := processedTime + delayPeriod
- // NOTE: delay period is inclusive, so if currentTimestamp is validTime, then we return no error
- if validTime > currentTimestamp {
+ currentTimestamp := uint64(ctx.BlockTime().UnixNano())
+ validTime := processedTime + delayTimePeriod
+ // NOTE: delay time period is inclusive, so if currentTimestamp is validTime, then we return no error
+ if currentTimestamp < validTime {
return sdkerrors.Wrapf(ErrDelayPeriodNotPassed, "cannot verify packet until time: %d, current time: %d",
validTime, currentTimestamp)
}
+ // check that executing chain's height has passed consensusState's processed height + delay block period
+ processedHeight, ok := GetProcessedHeight(store, proofHeight)
+ if !ok {
+ return sdkerrors.Wrapf(ErrProcessedHeightNotFound, "processed height not found for height: %s", proofHeight)
+ }
+ currentHeight := clienttypes.GetSelfHeight(ctx)
+ validHeight := clienttypes.NewHeight(processedHeight.GetRevisionNumber(), processedHeight.GetRevisionHeight()+delayBlockPeriod)
+ // NOTE: delay block period is inclusive, so if currentHeight is validHeight, then we return no error
+ if currentHeight.LT(validHeight) {
+ return sdkerrors.Wrapf(ErrDelayPeriodNotPassed, "cannot verify packet until height: %s, current height: %s",
+ validHeight, currentHeight)
+ }
return nil
}
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index e6b62ddc..93125c04 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -400,11 +400,12 @@ func (suite *TendermintTestSuite) TestVerifyChannelState() {
// in the light client on chainA. A send from chainB to chainA is simulated.
func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
var (
- clientState *types.ClientState
- proof []byte
- delayPeriod uint64
- proofHeight exported.Height
- prefix commitmenttypes.MerklePrefix
+ clientState *types.ClientState
+ proof []byte
+ delayTimePeriod uint64
+ delayBlockPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
)
testCases := []struct {
@@ -416,19 +417,34 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
"successful verification", func() {}, true,
},
{
- name: "delay period has passed",
+ name: "delay time period has passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay time period has not passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ name: "delay block period has passed",
malleate: func() {
- delayPeriod = uint64(time.Second.Nanoseconds())
+ delayBlockPeriod = 1
},
expPass: true,
},
{
- name: "delay period has not passed",
+ name: "delay block period has not passed",
malleate: func() {
- delayPeriod = uint64(time.Hour.Nanoseconds())
+ delayBlockPeriod = 1000
},
expPass: false,
},
+
{
"ApplyPrefix failed", func() {
prefix = commitmenttypes.MerklePrefix{}
@@ -470,14 +486,17 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
proof, proofHeight = path.EndpointB.QueryProof(packetKey)
+ // reset time and block delays to 0, malleate may change to a specific non-zero value.
+ delayTimePeriod = 0
+ delayBlockPeriod = 0
tc.malleate() // make changes as necessary
- store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ ctx := suite.chainA.GetContext()
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
- currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
commitment := channeltypes.CommitPacket(suite.chainA.App.GetIBCKeeper().Codec(), packet)
err = clientState.VerifyPacketCommitment(
- store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ ctx, store, suite.chainA.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof,
packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment,
)
@@ -495,11 +514,12 @@ func (suite *TendermintTestSuite) TestVerifyPacketCommitment() {
// is simulated.
func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
var (
- clientState *types.ClientState
- proof []byte
- delayPeriod uint64
- proofHeight exported.Height
- prefix commitmenttypes.MerklePrefix
+ clientState *types.ClientState
+ proof []byte
+ delayTimePeriod uint64
+ delayBlockPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
)
testCases := []struct {
@@ -511,19 +531,34 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
"successful verification", func() {}, true,
},
{
- name: "delay period has passed",
+ name: "delay time period has passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay time period has not passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ name: "delay block period has passed",
malleate: func() {
- delayPeriod = uint64(time.Second.Nanoseconds())
+ delayBlockPeriod = 1
},
expPass: true,
},
{
- name: "delay period has not passed",
+ name: "delay block period has not passed",
malleate: func() {
- delayPeriod = uint64(time.Hour.Nanoseconds())
+ delayBlockPeriod = 10
},
expPass: false,
},
+
{
"ApplyPrefix failed", func() {
prefix = commitmenttypes.MerklePrefix{}
@@ -571,13 +606,16 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
acknowledgementKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight = suite.chainB.QueryProof(acknowledgementKey)
+ // reset time and block delays to 0, malleate may change to a specific non-zero value.
+ delayTimePeriod = 0
+ delayBlockPeriod = 0
tc.malleate() // make changes as necessary
- store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ ctx := suite.chainA.GetContext()
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
- currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyPacketAcknowledgement(
- store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ ctx, store, suite.chainA.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ibcmock.MockAcknowledgement.Acknowledgement(),
)
@@ -595,11 +633,12 @@ func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() {
// no receive.
func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
var (
- clientState *types.ClientState
- proof []byte
- delayPeriod uint64
- proofHeight exported.Height
- prefix commitmenttypes.MerklePrefix
+ clientState *types.ClientState
+ proof []byte
+ delayTimePeriod uint64
+ delayBlockPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
)
testCases := []struct {
@@ -611,19 +650,34 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
"successful verification", func() {}, true,
},
{
- name: "delay period has passed",
+ name: "delay time period has passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay time period has not passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ name: "delay block period has passed",
malleate: func() {
- delayPeriod = uint64(time.Second.Nanoseconds())
+ delayBlockPeriod = 1
},
expPass: true,
},
{
- name: "delay period has not passed",
+ name: "delay block period has not passed",
malleate: func() {
- delayPeriod = uint64(time.Hour.Nanoseconds())
+ delayBlockPeriod = 10
},
expPass: false,
},
+
{
"ApplyPrefix failed", func() {
prefix = commitmenttypes.MerklePrefix{}
@@ -667,13 +721,16 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
receiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
proof, proofHeight = path.EndpointB.QueryProof(receiptKey)
+ // reset time and block delays to 0, malleate may change to a specific non-zero value.
+ delayTimePeriod = 0
+ delayBlockPeriod = 0
tc.malleate() // make changes as necessary
- store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ ctx := suite.chainA.GetContext()
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
- currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyPacketReceiptAbsence(
- store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ ctx, store, suite.chainA.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(),
)
@@ -691,11 +748,12 @@ func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() {
// simulated.
func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
var (
- clientState *types.ClientState
- proof []byte
- delayPeriod uint64
- proofHeight exported.Height
- prefix commitmenttypes.MerklePrefix
+ clientState *types.ClientState
+ proof []byte
+ delayTimePeriod uint64
+ delayBlockPeriod uint64
+ proofHeight exported.Height
+ prefix commitmenttypes.MerklePrefix
)
testCases := []struct {
@@ -707,19 +765,34 @@ func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
"successful verification", func() {}, true,
},
{
- name: "delay period has passed",
+ name: "delay time period has passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Second.Nanoseconds())
+ },
+ expPass: true,
+ },
+ {
+ name: "delay time period has not passed",
+ malleate: func() {
+ delayTimePeriod = uint64(time.Hour.Nanoseconds())
+ },
+ expPass: false,
+ },
+ {
+ name: "delay block period has passed",
malleate: func() {
- delayPeriod = uint64(time.Second.Nanoseconds())
+ delayBlockPeriod = 1
},
expPass: true,
},
{
- name: "delay period has not passed",
+ name: "delay block period has not passed",
malleate: func() {
- delayPeriod = uint64(time.Hour.Nanoseconds())
+ delayBlockPeriod = 10
},
expPass: false,
},
+
{
"ApplyPrefix failed", func() {
prefix = commitmenttypes.MerklePrefix{}
@@ -768,13 +841,16 @@ func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() {
nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
proof, proofHeight = suite.chainB.QueryProof(nextSeqRecvKey)
+ // reset time and block delays to 0, malleate may change to a specific non-zero value.
+ delayTimePeriod = 0
+ delayBlockPeriod = 0
tc.malleate() // make changes as necessary
- store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ ctx := suite.chainA.GetContext()
+ store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
- currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano())
err = clientState.VerifyNextSequenceRecv(
- store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof,
+ ctx, store, suite.chainA.Codec, proofHeight, delayTimePeriod, delayBlockPeriod, &prefix, proof,
packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+1,
)
diff --git a/modules/light-clients/07-tendermint/types/errors.go b/modules/light-clients/07-tendermint/types/errors.go
index 276c225b..7b087e41 100644
--- a/modules/light-clients/07-tendermint/types/errors.go
+++ b/modules/light-clients/07-tendermint/types/errors.go
@@ -10,16 +10,17 @@ const (
// IBC tendermint client sentinel errors
var (
- ErrInvalidChainID = sdkerrors.Register(SubModuleName, 2, "invalid chain-id")
- ErrInvalidTrustingPeriod = sdkerrors.Register(SubModuleName, 3, "invalid trusting period")
- ErrInvalidUnbondingPeriod = sdkerrors.Register(SubModuleName, 4, "invalid unbonding period")
- ErrInvalidHeaderHeight = sdkerrors.Register(SubModuleName, 5, "invalid header height")
- ErrInvalidHeader = sdkerrors.Register(SubModuleName, 6, "invalid header")
- ErrInvalidMaxClockDrift = sdkerrors.Register(SubModuleName, 7, "invalid max clock drift")
- ErrProcessedTimeNotFound = sdkerrors.Register(SubModuleName, 8, "processed time not found")
- ErrDelayPeriodNotPassed = sdkerrors.Register(SubModuleName, 9, "packet-specified delay period has not been reached")
- ErrTrustingPeriodExpired = sdkerrors.Register(SubModuleName, 10, "time since latest trusted state has passed the trusting period")
- ErrUnbondingPeriodExpired = sdkerrors.Register(SubModuleName, 11, "time since latest trusted state has passed the unbonding period")
- ErrInvalidProofSpecs = sdkerrors.Register(SubModuleName, 12, "invalid proof specs")
- ErrInvalidValidatorSet = sdkerrors.Register(SubModuleName, 13, "invalid validator set")
+ ErrInvalidChainID = sdkerrors.Register(SubModuleName, 2, "invalid chain-id")
+ ErrInvalidTrustingPeriod = sdkerrors.Register(SubModuleName, 3, "invalid trusting period")
+ ErrInvalidUnbondingPeriod = sdkerrors.Register(SubModuleName, 4, "invalid unbonding period")
+ ErrInvalidHeaderHeight = sdkerrors.Register(SubModuleName, 5, "invalid header height")
+ ErrInvalidHeader = sdkerrors.Register(SubModuleName, 6, "invalid header")
+ ErrInvalidMaxClockDrift = sdkerrors.Register(SubModuleName, 7, "invalid max clock drift")
+ ErrProcessedTimeNotFound = sdkerrors.Register(SubModuleName, 8, "processed time not found")
+ ErrProcessedHeightNotFound = sdkerrors.Register(SubModuleName, 9, "processed height not found")
+ ErrDelayPeriodNotPassed = sdkerrors.Register(SubModuleName, 10, "packet-specified delay period has not been reached")
+ ErrTrustingPeriodExpired = sdkerrors.Register(SubModuleName, 11, "time since latest trusted state has passed the trusting period")
+ ErrUnbondingPeriodExpired = sdkerrors.Register(SubModuleName, 12, "time since latest trusted state has passed the unbonding period")
+ ErrInvalidProofSpecs = sdkerrors.Register(SubModuleName, 13, "invalid proof specs")
+ ErrInvalidValidatorSet = sdkerrors.Register(SubModuleName, 14, "invalid validator set")
)
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle.go b/modules/light-clients/07-tendermint/types/proposal_handle.go
index 55c00d0f..780ffdf9 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle.go
@@ -90,12 +90,8 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
}
SetConsensusState(subjectClientStore, cdc, consensusState, height)
- processedTime, found := GetProcessedTime(substituteClientStore, height)
- if !found {
- continue
- }
- SetProcessedTime(subjectClientStore, height, processedTime)
-
+ // set metadata for this consensus state
+ setConsensusMetadata(ctx, subjectClientStore, height)
}
cs.LatestHeight = substituteClientState.LatestHeight
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index f261f4dc..ba21e81b 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -36,7 +36,10 @@ const KeyIterateConsensusStatePrefix = "iterateConsensusStates"
var (
// KeyProcessedTime is appended to consensus state key to store the processed time
KeyProcessedTime = []byte("/processedTime")
- KeyIteration = []byte("/iterationKey")
+ // KeyProcessedHeight is appended to consensus state key to store the processed height
+ KeyProcessedHeight = []byte("/processedHeight")
+ // KeyIteration stores the key mapping to consensus state key for efficient iteration
+ KeyIteration = []byte("/iterationKey")
)
// SetConsensusState stores the consensus state at the given height.
@@ -99,15 +102,13 @@ func IterateProcessedTime(store sdk.KVStore, cb func(key, val []byte) bool) {
}
}
-// ProcessedTime Store code
-
// ProcessedTimeKey returns the key under which the processed time will be stored in the client store.
func ProcessedTimeKey(height exported.Height) []byte {
return append(host.ConsensusStateKey(height), KeyProcessedTime...)
}
// SetProcessedTime stores the time at which a header was processed and the corresponding consensus state was created.
-// This is useful when validating whether a packet has reached the specified delay period in the tendermint client's
+// This is useful when validating whether a packet has reached the time specified delay period in the tendermint client's
// verification functions
func SetProcessedTime(clientStore sdk.KVStore, height exported.Height, timeNs uint64) {
key := ProcessedTimeKey(height)
@@ -116,7 +117,7 @@ func SetProcessedTime(clientStore sdk.KVStore, height exported.Height, timeNs ui
}
// GetProcessedTime gets the time (in nanoseconds) at which this chain received and processed a tendermint header.
-// This is used to validate that a received packet has passed the delay period.
+// This is used to validate that a received packet has passed the time delay period.
func GetProcessedTime(clientStore sdk.KVStore, height exported.Height) (uint64, bool) {
key := ProcessedTimeKey(height)
bz := clientStore.Get(key)
@@ -132,7 +133,40 @@ func deleteProcessedTime(clientStore sdk.KVStore, height exported.Height) {
clientStore.Delete(key)
}
-// Iteration Code
+// ProcessedHeightKey returns the key under which the processed height will be stored in the client store.
+func ProcessedHeightKey(height exported.Height) []byte {
+ return append(host.ConsensusStateKey(height), KeyProcessedHeight...)
+}
+
+// SetProcessedHeight stores the height at which a header was processed and the corresponding consensus state was created.
+// This is useful when validating whether a packet has reached the specified block delay period in the tendermint client's
+// verification functions
+func SetProcessedHeight(clientStore sdk.KVStore, consHeight, processedHeight exported.Height) {
+ key := ProcessedHeightKey(consHeight)
+ val := []byte(processedHeight.String())
+ clientStore.Set(key, val)
+}
+
+// GetProcessedHeight gets the height at which this chain received and processed a tendermint header.
+// This is used to validate that a received packet has passed the block delay period.
+func GetProcessedHeight(clientStore sdk.KVStore, height exported.Height) (exported.Height, bool) {
+ key := ProcessedHeightKey(height)
+ bz := clientStore.Get(key)
+ if bz == nil {
+ return nil, false
+ }
+ processedHeight, err := clienttypes.ParseHeight(string(bz))
+ if err != nil {
+ return nil, false
+ }
+ return processedHeight, true
+}
+
+// deleteProcessedHeight deletes the processedHeight for a given height
+func deleteProcessedHeight(clientStore sdk.KVStore, height exported.Height) {
+ key := ProcessedHeightKey(height)
+ clientStore.Delete(key)
+}
// IterationKey returns the key under which the consensus state key will be stored.
// The iteration key is a BigEndian representation of the consensus state key to support efficient iteration.
@@ -255,3 +289,20 @@ func bigEndianHeightBytes(height exported.Height) []byte {
binary.BigEndian.PutUint64(heightBytes[8:], height.GetRevisionHeight())
return heightBytes
}
+
+// setConsensusMetadata sets context time as processed time and set context height as processed height
+// as this is internal tendermint light client logic.
+// client state and consensus state will be set by client keeper
+// set iteration key to provide ability for efficient ordered iteration of consensus states.
+func setConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, height exported.Height) {
+ SetProcessedTime(clientStore, height, uint64(ctx.BlockTime().UnixNano()))
+ SetProcessedHeight(clientStore, height, clienttypes.GetSelfHeight(ctx))
+ SetIterationKey(clientStore, height)
+}
+
+// deleteConsensusMetadata deletes the metadata stored for a particular consensus state.
+func deleteConsensusMetadata(clientStore sdk.KVStore, height exported.Height) {
+ deleteProcessedTime(clientStore, height)
+ deleteProcessedHeight(clientStore, height)
+ deleteIterationKey(clientStore, height)
+}
diff --git a/modules/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go
index 9a0645a4..84a79b66 100644
--- a/modules/light-clients/07-tendermint/types/tendermint.pb.go
+++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go
@@ -11,10 +11,10 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes"
types2 "github.com/tendermint/tendermint/proto/tendermint/types"
- _ "google.golang.org/protobuf/types/known/durationpb"
- _ "google.golang.org/protobuf/types/known/timestamppb"
io "io"
math "math"
math_bits "math/bits"
diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go
index e1104e8d..c70746b4 100644
--- a/modules/light-clients/07-tendermint/types/update.go
+++ b/modules/light-clients/07-tendermint/types/update.go
@@ -134,9 +134,9 @@ func (cs ClientState) CheckHeaderAndUpdateState(
}
// if pruneHeight is set, delete consensus state and metadata
if pruneHeight != nil {
+
deleteConsensusState(clientStore, pruneHeight)
- deleteProcessedTime(clientStore, pruneHeight)
- deleteIterationKey(clientStore, pruneHeight)
+ deleteConsensusMetadata(clientStore, pruneHeight)
}
newClientState, consensusState := update(ctx, clientStore, &cs, tmHeader)
@@ -257,11 +257,8 @@ func update(ctx sdk.Context, clientStore sdk.KVStore, clientState *ClientState,
NextValidatorsHash: header.Header.NextValidatorsHash,
}
- // set context time as processed time as this is state internal to tendermint client logic.
- // client state and consensus state will be set by client keeper
- // set iteration key to provide ability for efficient ordered iteration of consensus states.
- SetProcessedTime(clientStore, header.GetHeight(), uint64(ctx.BlockTime().UnixNano()))
- SetIterationKey(clientStore, header.GetHeight())
+ // set metadata for this consensus state
+ setConsensusMetadata(ctx, clientStore, header.GetHeight())
return clientState, consensusState
}
diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go
index 95a159ef..b93168b5 100644
--- a/modules/light-clients/07-tendermint/types/update_test.go
+++ b/modules/light-clients/07-tendermint/types/update_test.go
@@ -8,6 +8,7 @@ import (
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
+ "github.com/cosmos/ibc-go/modules/core/exported"
types "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
ibctesting "github.com/cosmos/ibc-go/testing"
ibctestingmock "github.com/cosmos/ibc-go/testing/mock"
@@ -377,10 +378,16 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() {
path := ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(path)
- // call update client twice. When pruning occurs, only first consensus state should be pruned.
- // this height will be pruned
- path.EndpointA.UpdateClient()
- pruneHeight := path.EndpointA.GetClientState().GetLatestHeight()
+ // get the first height as it will be pruned first.
+ var pruneHeight exported.Height
+ getFirstHeightCb := func(height exported.Height) bool {
+ pruneHeight = height
+ return true
+ }
+ ctx := path.EndpointA.Chain.GetContext()
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
+ err := types.IterateConsensusStateAscending(clientStore, getFirstHeightCb)
+ suite.Require().Nil(err)
// this height will be expired but not pruned
path.EndpointA.UpdateClient()
@@ -389,8 +396,8 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() {
// expected values that must still remain in store after pruning
expectedConsState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight)
suite.Require().True(ok)
- ctx := path.EndpointA.Chain.GetContext()
- clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
+ ctx = path.EndpointA.Chain.GetContext()
+ clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
expectedProcessTime, ok := types.GetProcessedTime(clientStore, expiredHeight)
suite.Require().True(ok)
expectedConsKey := types.GetIterationKey(clientStore, expiredHeight)
diff --git a/modules/light-clients/07-tendermint/types/upgrade.go b/modules/light-clients/07-tendermint/types/upgrade.go
index ab5ebcf4..b70cbd48 100644
--- a/modules/light-clients/07-tendermint/types/upgrade.go
+++ b/modules/light-clients/07-tendermint/types/upgrade.go
@@ -6,10 +6,10 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+ upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
"github.com/cosmos/ibc-go/modules/core/exported"
- upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
// VerifyUpgradeAndUpdateState checks if the upgraded client has been committed by the current client
@@ -122,6 +122,9 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
tmUpgradeConsState.Timestamp, commitmenttypes.MerkleRoot{}, tmUpgradeConsState.NextValidatorsHash,
)
+ // set metadata for this consensus state
+ setConsensusMetadata(ctx, clientStore, tmUpgradeClient.LatestHeight)
+
return newClientState, newConsState, nil
}
diff --git a/modules/light-clients/09-localhost/types/client_state.go b/modules/light-clients/09-localhost/types/client_state.go
index 294ee4d9..a0615b8f 100644
--- a/modules/light-clients/09-localhost/types/client_state.go
+++ b/modules/light-clients/09-localhost/types/client_state.go
@@ -223,6 +223,7 @@ func (cs ClientState) VerifyChannelState(
// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at
// the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketCommitment(
+ ctx sdk.Context,
store sdk.KVStore,
_ codec.BinaryCodec,
_ exported.Height,
@@ -255,6 +256,7 @@ func (cs ClientState) VerifyPacketCommitment(
// VerifyPacketAcknowledgement verifies a proof of an incoming packet
// acknowledgement at the specified port, specified channel, and specified sequence.
func (cs ClientState) VerifyPacketAcknowledgement(
+ ctx sdk.Context,
store sdk.KVStore,
_ codec.BinaryCodec,
_ exported.Height,
@@ -288,6 +290,7 @@ func (cs ClientState) VerifyPacketAcknowledgement(
// incoming packet receipt at the specified port, specified channel, and
// specified sequence.
func (cs ClientState) VerifyPacketReceiptAbsence(
+ ctx sdk.Context,
store sdk.KVStore,
_ codec.BinaryCodec,
_ exported.Height,
@@ -312,6 +315,7 @@ func (cs ClientState) VerifyPacketReceiptAbsence(
// VerifyNextSequenceRecv verifies a proof of the next sequence number to be
// received of the specified channel at the specified port.
func (cs ClientState) VerifyNextSequenceRecv(
+ ctx sdk.Context,
store sdk.KVStore,
_ codec.BinaryCodec,
_ exported.Height,
diff --git a/modules/light-clients/09-localhost/types/client_state_test.go b/modules/light-clients/09-localhost/types/client_state_test.go
index 358ac294..e2dbe89b 100644
--- a/modules/light-clients/09-localhost/types/client_state_test.go
+++ b/modules/light-clients/09-localhost/types/client_state_test.go
@@ -376,7 +376,7 @@ func (suite *LocalhostTestSuite) TestVerifyPacketCommitment() {
tc.malleate()
err := tc.clientState.VerifyPacketCommitment(
- suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.commitment,
+ suite.ctx, suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.commitment,
)
if tc.expPass {
@@ -435,7 +435,7 @@ func (suite *LocalhostTestSuite) TestVerifyPacketAcknowledgement() {
tc.malleate()
err := tc.clientState.VerifyPacketAcknowledgement(
- suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.ack,
+ suite.ctx, suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.ack,
)
if tc.expPass {
@@ -451,7 +451,7 @@ func (suite *LocalhostTestSuite) TestVerifyPacketReceiptAbsence() {
clientState := types.NewClientState("chainID", clientHeight)
err := clientState.VerifyPacketReceiptAbsence(
- suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence,
+ suite.ctx, suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence,
)
suite.Require().NoError(err, "receipt absence failed")
@@ -459,7 +459,7 @@ func (suite *LocalhostTestSuite) TestVerifyPacketReceiptAbsence() {
suite.store.Set(host.PacketReceiptKey(testPortID, testChannelID, testSequence), []byte("receipt"))
err = clientState.VerifyPacketReceiptAbsence(
- suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence,
+ suite.ctx, suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence,
)
suite.Require().Error(err, "receipt exists in store")
}
@@ -515,7 +515,7 @@ func (suite *LocalhostTestSuite) TestVerifyNextSeqRecv() {
tc.malleate()
err := tc.clientState.VerifyNextSequenceRecv(
- suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, nextSeqRecv,
+ suite.ctx, suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, nextSeqRecv,
)
if tc.expPass {
diff --git a/proto/ibc/core/connection/v1/connection.proto b/proto/ibc/core/connection/v1/connection.proto
index 5b4e32bf..e09f1529 100644
--- a/proto/ibc/core/connection/v1/connection.proto
+++ b/proto/ibc/core/connection/v1/connection.proto
@@ -104,3 +104,11 @@ message Version {
// list of features compatible with the specified identifier
repeated string features = 2;
}
+
+// Params defines the set of Connection parameters.
+message Params {
+ // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of
+ // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe
+ // choice is 3-5x the expected time per block.
+ uint64 max_expected_time_per_block = 1 [(gogoproto.moretags) = "yaml:\"max_expected_time_per_block\""];
+}
diff --git a/proto/ibc/core/connection/v1/genesis.proto b/proto/ibc/core/connection/v1/genesis.proto
index 62296e1e..64f2e026 100644
--- a/proto/ibc/core/connection/v1/genesis.proto
+++ b/proto/ibc/core/connection/v1/genesis.proto
@@ -14,4 +14,5 @@ message GenesisState {
[(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"client_connection_paths\""];
// the sequence for the next generated connection identifier
uint64 next_connection_sequence = 3 [(gogoproto.moretags) = "yaml:\"next_connection_sequence\""];
+ Params params = 4 [(gogoproto.nullable) = false];
}
From 0d01128fdc610627085e591e510524ac8a6a4a4e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 24 May 2021 16:05:51 +0200
Subject: [PATCH 062/393] remove duplicate checks in upgrade logic (#182)
* remove duplicate checks
* changelog
---
CHANGELOG.md | 1 +
modules/core/02-client/types/proposal.go | 4 ----
modules/light-clients/07-tendermint/types/upgrade.go | 4 ----
3 files changed, 1 insertion(+), 8 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e05d119b..5ad829f0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -66,6 +66,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Improvements
+* (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic.
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
* (modules/core/04-channel) [\#144](https://github.com/cosmos/ibc-go/pull/144) Introduced a `packet_data_hex` attribute to emit the hex-encoded packet data in events. This allows for raw binary (proto-encoded message) to be sent over events and decoded correctly on relayer. Original `packet_data` is DEPRECATED. All relayers and IBC event consumers are encouraged to switch to `packet_data_hex` as soon as possible.
* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
diff --git a/modules/core/02-client/types/proposal.go b/modules/core/02-client/types/proposal.go
index 3d10a925..61234228 100644
--- a/modules/core/02-client/types/proposal.go
+++ b/modules/core/02-client/types/proposal.go
@@ -111,10 +111,6 @@ func (up *UpgradeProposal) ValidateBasic() error {
return err
}
- if up.Plan.Height <= 0 {
- return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "IBC chain upgrades must set a positive height")
- }
-
if up.UpgradedClientState == nil {
return sdkerrors.Wrap(ErrInvalidUpgradeProposal, "upgraded client state cannot be nil")
}
diff --git a/modules/light-clients/07-tendermint/types/upgrade.go b/modules/light-clients/07-tendermint/types/upgrade.go
index b70cbd48..a4dead6f 100644
--- a/modules/light-clients/07-tendermint/types/upgrade.go
+++ b/modules/light-clients/07-tendermint/types/upgrade.go
@@ -71,10 +71,6 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
return nil, nil, sdkerrors.Wrap(err, "could not retrieve consensus state for lastHeight")
}
- if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) {
- return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidClient, "cannot upgrade an expired client")
- }
-
// Verify client proof
bz, err := cdc.MarshalInterface(upgradedClient)
if err != nil {
From c24f60cde1c0aa28a5fdf2121ed3d15431d28ca6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 24 May 2021 17:48:54 +0200
Subject: [PATCH 063/393] add notes for chain-id validation (#180)
---
.../07-tendermint/types/client_state.go | 6 ++++++
.../07-tendermint/types/client_state_test.go | 20 ++++++++++++++++---
2 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index bf93cdbf..06bbb902 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -100,6 +100,12 @@ func (cs ClientState) Validate() error {
if strings.TrimSpace(cs.ChainId) == "" {
return sdkerrors.Wrap(ErrInvalidChainID, "chain id cannot be empty string")
}
+
+ // NOTE: the value of tmtypes.MaxChainIDLen may change in the future.
+ // If this occurs, the code here must account for potential difference
+ // between the tendermint version being run by the counterparty chain
+ // and the tendermint version used by this light client.
+ // https://github.com/cosmos/ibc-go/issues/177
if len(cs.ChainId) > tmtypes.MaxChainIDLen {
return sdkerrors.Wrapf(ErrInvalidChainID, "chainID is too long; got: %d, max: %d", len(cs.ChainId), tmtypes.MaxChainIDLen)
}
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index 93125c04..b6235113 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -21,7 +21,10 @@ const (
testPortID = "testportid"
testChannelID = "testchannelid"
testSequence = 1
- longChainID = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
+
+ // Do not change the length of these variables
+ fiftyCharChainID = "12345678901234567890123456789012345678901234567890"
+ fiftyOneCharChainID = "123456789012345678901234567890123456789012345678901"
)
var (
@@ -90,8 +93,19 @@ func (suite *TendermintTestSuite) TestValidate() {
expPass: false,
},
{
- name: "invalid chainID - chainID is above maximum character length",
- clientState: types.NewClientState(longChainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ // NOTE: if this test fails, the code must account for the change in chainID length across tendermint versions!
+ // Do not only fix the test, fix the code!
+ // https://github.com/cosmos/ibc-go/issues/177
+ name: "valid chainID - chainID validation failed for chainID of length 50! ",
+ clientState: types.NewClientState(fiftyCharChainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: true,
+ },
+ {
+ // NOTE: if this test fails, the code must account for the change in chainID length across tendermint versions!
+ // Do not only fix the test, fix the code!
+ // https://github.com/cosmos/ibc-go/issues/177
+ name: "invalid chainID - chainID validation did not fail for chainID of length 51! ",
+ clientState: types.NewClientState(fiftyOneCharChainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
expPass: false,
},
{
From 457095517b7832c42ecf13571fee1e550fec02d0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 24 May 2021 17:57:08 +0200
Subject: [PATCH 064/393] Fix query header and node-state cli cmds (#192)
* fix cli header cmd and node-state
* fix bug
* changelog
---
CHANGELOG.md | 3 ++-
modules/core/02-client/client/utils/utils.go | 16 +++++++++++++---
2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5ad829f0..8e7c5fa7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,7 +38,8 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
-* (modules/light-clients/06-solomachine) [\153](https://github.com/cosmos/ibc-go/pull/153) Fix solo machine proof height sequence mismatch bug.
+* (02-client) [\#192](https://github.com/cosmos/ibc-go/pull/192) Fix IBC `query ibc client header` cli command. Support historical queries for query header/node-state commands.
+* (modules/light-clients/06-solomachine) [\#153](https://github.com/cosmos/ibc-go/pull/153) Fix solo machine proof height sequence mismatch bug.
* (modules/light-clients/06-solomachine) [\#122](https://github.com/cosmos/ibc-go/pull/122) Fix solo machine merkle prefix casting bug.
* (modules/light-clients/06-solomachine) [\#120](https://github.com/cosmos/ibc-go/pull/120) Fix solo machine handshake verification bug.
diff --git a/modules/core/02-client/client/utils/utils.go b/modules/core/02-client/client/utils/utils.go
index b7614146..dfbbefcb 100644
--- a/modules/core/02-client/client/utils/utils.go
+++ b/modules/core/02-client/client/utils/utils.go
@@ -131,14 +131,19 @@ func QueryTendermintHeader(clientCtx client.Context) (ibctmtypes.Header, int64,
return ibctmtypes.Header{}, 0, err
}
- height := info.Response.LastBlockHeight
+ var height int64
+ if clientCtx.Height != 0 {
+ height = clientCtx.Height
+ } else {
+ height = info.Response.LastBlockHeight
+ }
commit, err := node.Commit(context.Background(), &height)
if err != nil {
return ibctmtypes.Header{}, 0, err
}
- page := 0
+ page := 1
count := 10_000
validators, err := node.Validators(context.Background(), &height, &page, &count)
@@ -173,7 +178,12 @@ func QueryNodeConsensusState(clientCtx client.Context) (*ibctmtypes.ConsensusSta
return &ibctmtypes.ConsensusState{}, 0, err
}
- height := info.Response.LastBlockHeight
+ var height int64
+ if clientCtx.Height != 0 {
+ height = clientCtx.Height
+ } else {
+ height = info.Response.LastBlockHeight
+ }
commit, err := node.Commit(context.Background(), &height)
if err != nil {
From 2f97f0ba70523c44326c97feb99c321038321b9a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 26 May 2021 21:28:14 +0200
Subject: [PATCH 065/393] emit hex encoded ack attribute in events (#197)
* emit hex encoded ack in events
* add changelog and update migration docs
---
CHANGELOG.md | 1 +
docs/migrations/ibc-migration-043.md | 2 ++
modules/core/04-channel/keeper/packet.go | 1 +
modules/core/04-channel/types/events.go | 4 +++-
4 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8e7c5fa7..edf71106 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -70,6 +70,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic.
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
* (modules/core/04-channel) [\#144](https://github.com/cosmos/ibc-go/pull/144) Introduced a `packet_data_hex` attribute to emit the hex-encoded packet data in events. This allows for raw binary (proto-encoded message) to be sent over events and decoded correctly on relayer. Original `packet_data` is DEPRECATED. All relayers and IBC event consumers are encouraged to switch to `packet_data_hex` as soon as possible.
+* (core/04-channel) [\#197](https://github.com/cosmos/ibc-go/pull/197) Introduced a `packet_ack_hex` attribute to emit the hex-encoded acknowledgement in events. This allows for raw binary (proto-encoded message) to be sent over events and decoded correctly on relayer. Original `packet_ack` is DEPRECATED. All relayers and IBC event consumers are encouraged to switch to `packet_ack_hex` as soon as possible.
* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Return early in case there's a duplicate update call to save Gas.
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 194cca4c..177bb3c7 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -110,6 +110,8 @@ The `OnRecvPacket` callback has been modified to only return the acknowledgement
The `packet_data` attribute has been deprecated in favor of `packet_data_hex`, in order to provide standardized encoding/decoding of packet data in events. While the `packet_data` event still exists, all relayers and IBC Event consumers are strongly encouraged to switch over to using `packet_data_hex` as soon as possible.
+The `packet_ack` attribute has also been deprecated in favor of `packet_ack_hex` for the same reason stated above. All relayers and IBC Event consumers are strongly encouraged to switch over to using `packet_ack_hex` as soon as possible.
+
The `consensus_height` attribute has been removed in the Misbehaviour event emitted. IBC clients no longer have a frozen height and misbehaviour does not necessarily have an associated height.
## Relevant SDK changes
diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
index 72a1ff5b..78755d10 100644
--- a/modules/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -381,6 +381,7 @@ func (k Keeper) WriteAcknowledgement(
sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()),
sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()),
sdk.NewAttribute(types.AttributeKeyAck, string(acknowledgement)),
+ sdk.NewAttribute(types.AttributeKeyAckHex, hex.EncodeToString(acknowledgement)),
// we only support 1-hop packets now, and that is the most important hop for a relayer
// (is it going to a chain I am connected to)
sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]),
diff --git a/modules/core/04-channel/types/events.go b/modules/core/04-channel/types/events.go
index 1ef14346..201fd26a 100644
--- a/modules/core/04-channel/types/events.go
+++ b/modules/core/04-channel/types/events.go
@@ -23,9 +23,11 @@ const (
// NOTE: DEPRECATED in favor of AttributeKeyDataHex
AttributeKeyData = "packet_data"
+ // NOTE: DEPRECATED in favor of AttributeKeyAckHex
+ AttributeKeyAck = "packet_ack"
AttributeKeyDataHex = "packet_data_hex"
- AttributeKeyAck = "packet_ack"
+ AttributeKeyAckHex = "packet_ack_hex"
AttributeKeyTimeoutHeight = "packet_timeout_height"
AttributeKeyTimeoutTimestamp = "packet_timeout_timestamp"
AttributeKeySequence = "packet_sequence"
From ddeb58ecbc101dd667583dfd7588a7c102d013b2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 26 May 2021 21:47:07 +0200
Subject: [PATCH 066/393] Reference commit hash IBC was removed from the SDK in
the changelog (#194)
* update CHANGELOG
* fix wording
Co-authored-by: Aditya
---
CHANGELOG.md | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index edf71106..5aa5bcc8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -82,3 +82,7 @@ Please see the [Release Notes](https://github.com/cosmos/cosmos-sdk/blob/v0.40.0
The IBC module is also contained in the releases for [v0.41.x](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.41.0) and [v0.42.x](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.42.0).
Please see the Release Notes for [v0.41.x](https://github.com/cosmos/cosmos-sdk/blob/release/v0.41.x/RELEASE_NOTES.md) and [v0.42.x](https://github.com/cosmos/cosmos-sdk/blob/release/v0.42.x/RELEASE_NOTES.md).
+
+The IBC module was removed in the commit hash [da064e13d56add466548135739c5860a9f7ed842](https://github.com/cosmos/cosmos-sdk/commit/da064e13d56add466548135739c5860a9f7ed842) on the SDK. The release for SDK v0.43.0 will be the first release without the IBC module.
+
+Backports should be made to the [release/v0.42.x](https://github.com/cosmos/cosmos-sdk/tree/release/v0.42.x) branch on the SDK.
From 269164c229b2a85b8fbf0c9f7520018eec84002b Mon Sep 17 00:00:00 2001
From: Leo Pang <34628052+allthatjazzleo@users.noreply.github.com>
Date: Thu, 27 May 2021 18:06:33 +0800
Subject: [PATCH 067/393] Missing NextClientSequence, NextConnectionSequence
and NextChannelSequence in ExportGenesis (#200)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* add missing GenesisState field for ibc client, connection and channel
* add changelog
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
CHANGELOG.md | 1 +
modules/core/02-client/genesis.go | 11 ++++++-----
modules/core/03-connection/genesis.go | 5 +++--
modules/core/04-channel/genesis.go | 15 ++++++++-------
4 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5aa5bcc8..1ad0001a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (core) [\#200](https://github.com/cosmos/ibc-go/pull/200) Fixes incorrect export of IBC identifier sequences. Previously, the next identifier sequence for clients/connections/channels was not set during genesis export. This resulted in the next identifiers being generated on the new chain to reuse old identifiers (the sequences began again from 0).
* (02-client) [\#192](https://github.com/cosmos/ibc-go/pull/192) Fix IBC `query ibc client header` cli command. Support historical queries for query header/node-state commands.
* (modules/light-clients/06-solomachine) [\#153](https://github.com/cosmos/ibc-go/pull/153) Fix solo machine proof height sequence mismatch bug.
* (modules/light-clients/06-solomachine) [\#122](https://github.com/cosmos/ibc-go/pull/122) Fix solo machine merkle prefix casting bug.
diff --git a/modules/core/02-client/genesis.go b/modules/core/02-client/genesis.go
index d6e0a217..df7db36a 100644
--- a/modules/core/02-client/genesis.go
+++ b/modules/core/02-client/genesis.go
@@ -60,10 +60,11 @@ func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
panic(err)
}
return types.GenesisState{
- Clients: genClients,
- ClientsMetadata: clientsMetadata,
- ClientsConsensus: k.GetAllConsensusStates(ctx),
- Params: k.GetParams(ctx),
- CreateLocalhost: false,
+ Clients: genClients,
+ ClientsMetadata: clientsMetadata,
+ ClientsConsensus: k.GetAllConsensusStates(ctx),
+ Params: k.GetParams(ctx),
+ CreateLocalhost: false,
+ NextClientSequence: k.GetNextClientSequence(ctx),
}
}
diff --git a/modules/core/03-connection/genesis.go b/modules/core/03-connection/genesis.go
index af46c8ba..4b139c93 100644
--- a/modules/core/03-connection/genesis.go
+++ b/modules/core/03-connection/genesis.go
@@ -23,7 +23,8 @@ func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) {
// ExportGenesis returns the ibc connection submodule's exported genesis.
func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
return types.GenesisState{
- Connections: k.GetAllConnections(ctx),
- ClientConnectionPaths: k.GetAllClientConnectionPaths(ctx),
+ Connections: k.GetAllConnections(ctx),
+ ClientConnectionPaths: k.GetAllClientConnectionPaths(ctx),
+ NextConnectionSequence: k.GetNextConnectionSequence(ctx),
}
}
diff --git a/modules/core/04-channel/genesis.go b/modules/core/04-channel/genesis.go
index b86a2112..63d354ee 100644
--- a/modules/core/04-channel/genesis.go
+++ b/modules/core/04-channel/genesis.go
@@ -37,12 +37,13 @@ func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) {
// ExportGenesis returns the ibc channel submodule's exported genesis.
func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
return types.GenesisState{
- Channels: k.GetAllChannels(ctx),
- Acknowledgements: k.GetAllPacketAcks(ctx),
- Commitments: k.GetAllPacketCommitments(ctx),
- Receipts: k.GetAllPacketReceipts(ctx),
- SendSequences: k.GetAllPacketSendSeqs(ctx),
- RecvSequences: k.GetAllPacketRecvSeqs(ctx),
- AckSequences: k.GetAllPacketAckSeqs(ctx),
+ Channels: k.GetAllChannels(ctx),
+ Acknowledgements: k.GetAllPacketAcks(ctx),
+ Commitments: k.GetAllPacketCommitments(ctx),
+ Receipts: k.GetAllPacketReceipts(ctx),
+ SendSequences: k.GetAllPacketSendSeqs(ctx),
+ RecvSequences: k.GetAllPacketRecvSeqs(ctx),
+ AckSequences: k.GetAllPacketAckSeqs(ctx),
+ NextChannelSequence: k.GetNextChannelSequence(ctx),
}
}
From 4ecfe16a8dec94a1ed2fcd266f171fb3c9a041ba Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 27 May 2021 12:37:06 +0200
Subject: [PATCH 068/393] improve error messages, indicate already relayed
packets (#184)
* improve error messages
* changelog
* update recvpacket errors and test
* add acknowledge packet tests
* update timeout tests
Co-authored-by: Aditya
---
CHANGELOG.md | 1 +
modules/core/04-channel/keeper/packet.go | 22 +-
modules/core/04-channel/keeper/packet_test.go | 188 ++++++++++++++++--
modules/core/04-channel/keeper/timeout.go | 6 +-
.../core/04-channel/keeper/timeout_test.go | 50 +++++
modules/core/04-channel/types/errors.go | 13 +-
modules/core/23-commitment/types/merkle.go | 4 +-
.../07-tendermint/types/client_state.go | 2 +-
testing/endpoint.go | 28 ++-
9 files changed, 287 insertions(+), 27 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1ad0001a..99b12c62 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -68,6 +68,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Improvements
+* (modules/core) [\#184](https://github.com/cosmos/ibc-go/pull/184) Improve error messages. Uses unique error codes to indicate already relayed packets.
* (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic.
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
* (modules/core/04-channel) [\#144](https://github.com/cosmos/ibc-go/pull/144) Introduced a `packet_data_hex` attribute to emit the hex-encoded packet data in events. This allows for raw binary (proto-encoded message) to be sent over events and decoded correctly on relayer. Original `packet_data` is DEPRECATED. All relayers and IBC event consumers are encouraged to switch to `packet_data_hex` as soon as possible.
diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
index 78755d10..f917353a 100644
--- a/modules/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -241,8 +241,8 @@ func (k Keeper) RecvPacket(
_, found := k.GetPacketReceipt(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
if found {
return sdkerrors.Wrapf(
- types.ErrInvalidPacket,
- "packet sequence (%d) already has been received", packet.GetSequence(),
+ types.ErrPacketReceived,
+ "packet sequence (%d)", packet.GetSequence(),
)
}
@@ -262,9 +262,17 @@ func (k Keeper) RecvPacket(
)
}
+ // helpful error message for relayers
+ if packet.GetSequence() < nextSequenceRecv {
+ return sdkerrors.Wrapf(
+ types.ErrPacketReceived,
+ "packet sequence (%d), next sequence receive (%d)", packet.GetSequence(), nextSequenceRecv,
+ )
+ }
+
if packet.GetSequence() != nextSequenceRecv {
return sdkerrors.Wrapf(
- types.ErrInvalidPacket,
+ types.ErrPacketSequenceOutOfOrder,
"packet sequence ≠ next receive sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceRecv,
)
}
@@ -462,6 +470,10 @@ func (k Keeper) AcknowledgePacket(
commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ if len(commitment) == 0 {
+ return sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "packet with sequence (%d) has been acknowledged, or timed out. In rare cases the packet was never sent or the packet sequence is incorrect", packet.GetSequence())
+ }
+
packetCommitment := types.CommitPacket(k.cdc, packet)
// verify we sent the packet and haven't cleared it out yet
@@ -473,7 +485,7 @@ func (k Keeper) AcknowledgePacket(
ctx, connectionEnd, proofHeight, proof, packet.GetDestPort(), packet.GetDestChannel(),
packet.GetSequence(), acknowledgement,
); err != nil {
- return sdkerrors.Wrap(err, "packet acknowledgement verification failed")
+ return err
}
// assert packets acknowledged in order
@@ -488,7 +500,7 @@ func (k Keeper) AcknowledgePacket(
if packet.GetSequence() != nextSequenceAck {
return sdkerrors.Wrapf(
- sdkerrors.ErrInvalidSequence,
+ types.ErrPacketSequenceOutOfOrder,
"packet sequence ≠ next ack sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceAck,
)
}
diff --git a/modules/core/04-channel/keeper/packet_test.go b/modules/core/04-channel/keeper/packet_test.go
index 4916ae9d..91ddc7d1 100644
--- a/modules/core/04-channel/keeper/packet_test.go
+++ b/modules/core/04-channel/keeper/packet_test.go
@@ -1,10 +1,14 @@
package keeper_test
import (
+ "errors"
"fmt"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
"github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
@@ -200,6 +204,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
path *ibctesting.Path
packet exported.PacketI
channelCap *capabilitytypes.Capability
+ expError *sdkerrors.Error
)
testCases := []testCase{
@@ -235,7 +240,36 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
// attempts to receive packet 2 without receiving packet 1
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, true},
+ {"packet already relayed ORDERED channel", func() {
+ expError = types.ErrPacketReceived
+
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err := path.EndpointA.SendPacket(packet)
+ suite.Require().NoError(err)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
+
+ err = path.EndpointB.RecvPacket(packet.(types.Packet))
+ suite.Require().NoError(err)
+ }, false},
+ {"packet already relayed UNORDERED channel", func() {
+ expError = types.ErrPacketReceived
+
+ // setup uses an UNORDERED channel
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ err := path.EndpointA.SendPacket(packet)
+ suite.Require().NoError(err)
+ channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
+
+ err = path.EndpointB.RecvPacket(packet.(types.Packet))
+ suite.Require().NoError(err)
+ }, false},
{"out of order packet failure with ORDERED channel", func() {
+ expError = types.ErrPacketSequenceOutOfOrder
+
path.SetChannelOrdered()
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -251,12 +285,16 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel not found", func() {
+ expError = types.ErrChannelNotFound
+
// use wrong channel naming
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"channel not open", func() {
+ expError = types.ErrInvalidChannelState
+
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -265,26 +303,36 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"capability cannot authenticate ORDERED", func() {
+ expError = types.ErrInvalidChannelCapability
+
path.SetChannelOrdered()
suite.coordinator.Setup(path)
+
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
err := path.EndpointA.SendPacket(packet)
suite.Require().NoError(err)
channelCap = capabilitytypes.NewCapability(3)
}, false},
{"packet source port ≠ channel counterparty port", func() {
+ expError = types.ErrInvalidPacket
suite.coordinator.Setup(path)
+
// use wrong port for dest
packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"packet source channel ID ≠ channel counterparty channel ID", func() {
+ expError = types.ErrInvalidPacket
suite.coordinator.Setup(path)
+
// use wrong port for dest
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"connection not found", func() {
+ expError = connectiontypes.ErrConnectionNotFound
+ suite.coordinator.Setup(path)
+
// pass channel check
suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainB.GetContext(),
@@ -296,6 +344,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"connection not OPEN", func() {
+ expError = connectiontypes.ErrInvalidConnectionState
suite.coordinator.SetupClients(path)
// connection on chainB is in INIT
@@ -313,19 +362,26 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"timeout height passed", func() {
+ expError = types.ErrPacketTimeout
suite.coordinator.Setup(path)
+
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"timeout timestamp passed", func() {
+ expError = types.ErrPacketTimeout
suite.coordinator.Setup(path)
+
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"next receive sequence is not found", func() {
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ expError = types.ErrSequenceReceiveNotFound
suite.coordinator.SetupConnections(path)
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
+ path.EndpointB.ChannelID = ibctesting.FirstChannelID
+
// manually creating channel prevents next recv sequence from being set
suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainB.GetContext(),
@@ -336,19 +392,26 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// manually set packet commitment
- suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), ibctesting.MockPacketData)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), types.CommitPacket(suite.chainA.App.AppCodec(), packet))
suite.chainB.CreateChannelCapability(suite.chainB.GetSimApp().ScopedIBCMockKeeper, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
+
+ path.EndpointA.UpdateClient()
+ path.EndpointB.UpdateClient()
}, false},
{"receipt already stored", func() {
+ expError = types.ErrPacketReceived
suite.coordinator.Setup(path)
+
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
path.EndpointA.SendPacket(packet)
suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, 1)
channelCap = suite.chainB.GetChannelCapability(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)
}, false},
{"validation failed", func() {
+ // skip error code check, downstream error code is used from light-client implementations
+
// packet commitment not set resulting in invalid proof
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -360,13 +423,14 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ expError = nil // must explicitly set for failed cases
path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
// get proof of packet commitment from chainA
packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
- proof, proofHeight := suite.chainA.QueryProof(packetKey)
+ proof, proofHeight := path.EndpointA.QueryProof(packetKey)
err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacket(suite.chainB.GetContext(), channelCap, packet, proof, proofHeight)
@@ -388,6 +452,11 @@ func (suite *KeeperTestSuite) TestRecvPacket() {
}
} else {
suite.Require().Error(err)
+
+ // only check if expError is set, since not all error codes can be known
+ if expError != nil {
+ suite.Require().True(errors.Is(err, expError))
+ }
}
})
}
@@ -488,6 +557,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
ack = ibcmock.MockAcknowledgement
channelCap *capabilitytypes.Capability
+ expError *sdkerrors.Error
)
testCases := []testCase{
@@ -520,12 +590,55 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
+ {"packet already acknowledged ordered channel", func() {
+ expError = types.ErrPacketCommitmentNotFound
+
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ // create packet commitment
+ err := path.EndpointA.SendPacket(packet)
+ suite.Require().NoError(err)
+
+ // create packet receipt and acknowledgement
+ err = path.EndpointB.RecvPacket(packet)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+
+ err = path.EndpointA.AcknowledgePacket(packet, ack.Acknowledgement())
+ suite.Require().NoError(err)
+ }, false},
+ {"packet already acknowledged unordered channel", func() {
+ expError = types.ErrPacketCommitmentNotFound
+
+ // setup uses an UNORDERED channel
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // create packet commitment
+ err := path.EndpointA.SendPacket(packet)
+ suite.Require().NoError(err)
+
+ // create packet receipt and acknowledgement
+ err = path.EndpointB.RecvPacket(packet)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+
+ err = path.EndpointA.AcknowledgePacket(packet, ack.Acknowledgement())
+ suite.Require().NoError(err)
+ }, false},
{"channel not found", func() {
+ expError = types.ErrChannelNotFound
+
// use wrong channel naming
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"channel not open", func() {
+ expError = types.ErrInvalidChannelState
+
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -534,8 +647,11 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"capability authentication failed ORDERED", func() {
+ expError = types.ErrInvalidChannelCapability
+
path.SetChannelOrdered()
suite.coordinator.Setup(path)
+
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// create packet commitment
err := path.EndpointA.SendPacket(packet)
@@ -548,29 +664,37 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
channelCap = capabilitytypes.NewCapability(3)
}, false},
{"packet destination port ≠ channel counterparty port", func() {
+ expError = types.ErrInvalidPacket
suite.coordinator.Setup(path)
+
// use wrong port for dest
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet destination channel ID ≠ channel counterparty channel ID", func() {
+ expError = types.ErrInvalidPacket
suite.coordinator.Setup(path)
+
// use wrong channel for dest
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not found", func() {
+ expError = connectiontypes.ErrConnectionNotFound
+ suite.coordinator.Setup(path)
+
// pass channel check
- suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(
- suite.chainB.GetContext(),
- path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID,
- types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{connIDB}, path.EndpointB.ChannelConfig.Version),
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
+ suite.chainA.GetContext(),
+ path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
+ types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{"connection-1000"}, path.EndpointA.ChannelConfig.Version),
)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"connection not OPEN", func() {
+ expError = connectiontypes.ErrInvalidConnectionState
suite.coordinator.SetupClients(path)
// connection on chainA is in INIT
err := path.EndpointA.ConnOpenInit()
@@ -587,12 +711,16 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet hasn't been sent", func() {
+ expError = types.ErrPacketCommitmentNotFound
+
// packet commitment never written
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
{"packet ack verification failed", func() {
+ // skip error code check since error occurs in light-clients
+
// ack never written
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -601,25 +729,56 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
path.EndpointA.SendPacket(packet)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, false},
+ {"packet commitment bytes do not match", func() {
+ expError = types.ErrInvalidPacket
+
+ // setup uses an UNORDERED channel
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+
+ // create packet commitment
+ err := path.EndpointA.SendPacket(packet)
+ suite.Require().NoError(err)
+
+ // create packet receipt and acknowledgement
+ err = path.EndpointB.RecvPacket(packet)
+ suite.Require().NoError(err)
+
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+
+ packet.Data = []byte("invalid packet commitment")
+ }, false},
{"next ack sequence not found", func() {
- path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ expError = types.ErrSequenceAckNotFound
suite.coordinator.SetupConnections(path)
- packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+
+ path.EndpointA.ChannelID = ibctesting.FirstChannelID
+ path.EndpointB.ChannelID = ibctesting.FirstChannelID
+
// manually creating channel prevents next sequence acknowledgement from being set
suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID,
types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version),
)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
// manually set packet commitment
- suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), ibctesting.MockPacketData)
+ suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), types.CommitPacket(suite.chainA.App.AppCodec(), packet))
// manually set packet acknowledgement and capability
- suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, packet.GetSequence(), ibctesting.MockAcknowledgement)
+ suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, packet.GetSequence(), types.CommitAcknowledgement(ack.Acknowledgement()))
+
suite.chainA.CreateChannelCapability(suite.chainA.GetSimApp().ScopedIBCMockKeeper, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+
+ suite.coordinator.CommitBlock(path.EndpointA.Chain, path.EndpointB.Chain)
+
+ path.EndpointA.UpdateClient()
+ path.EndpointB.UpdateClient()
}, false},
{"next ack sequence mismatch ORDERED", func() {
+ expError = types.ErrPacketSequenceOutOfOrder
path.SetChannelOrdered()
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -641,12 +800,13 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
tc := tc
suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() {
suite.SetupTest() // reset
+ expError = nil // must explcitly set error for failed cases
path = ibctesting.NewPath(suite.chainA, suite.chainB)
tc.malleate()
packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
- proof, proofHeight := suite.chainB.QueryProof(packetKey)
+ proof, proofHeight := path.EndpointB.QueryProof(packetKey)
err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack.Acknowledgement(), proof, proofHeight)
pc := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
@@ -665,6 +825,10 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() {
}
} else {
suite.Error(err)
+ // only check if expError is set, since not all error codes can be known
+ if expError != nil {
+ suite.Require().True(errors.Is(err, expError))
+ }
}
})
}
diff --git a/modules/core/04-channel/keeper/timeout.go b/modules/core/04-channel/keeper/timeout.go
index 1f6357d3..08d10f83 100644
--- a/modules/core/04-channel/keeper/timeout.go
+++ b/modules/core/04-channel/keeper/timeout.go
@@ -80,6 +80,10 @@ func (k Keeper) TimeoutPacket(
commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
+ if len(commitment) == 0 {
+ return sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "packet with sequence (%d) has been acknowledged or timed out. In rare cases the packet was never sent or the packet sequence is incorrect", packet.GetSequence())
+ }
+
packetCommitment := types.CommitPacket(k.cdc, packet)
// verify we sent the packet and haven't cleared it out yet
@@ -92,7 +96,7 @@ func (k Keeper) TimeoutPacket(
// check that packet has not been received
if nextSequenceRecv > packet.GetSequence() {
return sdkerrors.Wrapf(
- types.ErrInvalidPacket,
+ types.ErrPacketReceived,
"packet already received, next sequence receive > packet sequence (%d > %d)", nextSequenceRecv, packet.GetSequence(),
)
}
diff --git a/modules/core/04-channel/keeper/timeout_test.go b/modules/core/04-channel/keeper/timeout_test.go
index ab6c4e49..460e6097 100644
--- a/modules/core/04-channel/keeper/timeout_test.go
+++ b/modules/core/04-channel/keeper/timeout_test.go
@@ -1,10 +1,14 @@
package keeper_test
import (
+ "errors"
"fmt"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
"github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
@@ -20,6 +24,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
packet types.Packet
nextSeqRecv uint64
ordered bool
+ expError *sdkerrors.Error
)
testCases := []testCase{
@@ -42,12 +47,41 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
// need to update chainA's client representing chainB to prove missing ack
path.EndpointA.UpdateClient()
}, true},
+ {"packet already timed out: ORDERED", func() {
+ expError = types.ErrInvalidChannelState
+ ordered = true
+ path.SetChannelOrdered()
+
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano()))
+ path.EndpointA.SendPacket(packet)
+ // need to update chainA's client representing chainB to prove missing ack
+ path.EndpointA.UpdateClient()
+
+ err := path.EndpointA.TimeoutPacket(packet)
+ suite.Require().NoError(err)
+ }, false},
+ {"packet already timed out: UNORDERED", func() {
+ expError = types.ErrPacketCommitmentNotFound
+ ordered = false
+
+ suite.coordinator.Setup(path)
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp)
+ path.EndpointA.SendPacket(packet)
+ // need to update chainA's client representing chainB to prove missing ack
+ path.EndpointA.UpdateClient()
+
+ err := path.EndpointA.TimeoutPacket(packet)
+ suite.Require().NoError(err)
+ }, false},
{"channel not found", func() {
+ expError = types.ErrChannelNotFound
// use wrong channel naming
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"channel not open", func() {
+ expError = types.ErrInvalidChannelState
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
@@ -55,16 +89,19 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
suite.Require().NoError(err)
}, false},
{"packet destination port ≠ channel counterparty port", func() {
+ expError = types.ErrInvalidPacket
suite.coordinator.Setup(path)
// use wrong port for dest
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"packet destination channel ID ≠ channel counterparty channel ID", func() {
+ expError = types.ErrInvalidPacket
suite.coordinator.Setup(path)
// use wrong channel for dest
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"connection not found", func() {
+ expError = connectiontypes.ErrConnectionNotFound
// pass channel check
suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(
suite.chainA.GetContext(),
@@ -74,12 +111,14 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
}, false},
{"timeout", func() {
+ expError = types.ErrPacketTimeout
suite.coordinator.Setup(path)
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
path.EndpointA.SendPacket(packet)
path.EndpointA.UpdateClient()
}, false},
{"packet already received ", func() {
+ expError = types.ErrPacketReceived
ordered = true
path.SetChannelOrdered()
@@ -91,6 +130,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
path.EndpointA.UpdateClient()
}, false},
{"packet hasn't been sent", func() {
+ expError = types.ErrPacketCommitmentNotFound
ordered = true
path.SetChannelOrdered()
@@ -99,6 +139,8 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
path.EndpointA.UpdateClient()
}, false},
{"next seq receive verification failed", func() {
+ // skip error check, error occurs in light-clients
+
// set ordered to false resulting in wrong proof provided
ordered = false
@@ -110,6 +152,8 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
path.EndpointA.UpdateClient()
}, false},
{"packet ack verification failed", func() {
+ // skip error check, error occurs in light-clients
+
// set ordered to true resulting in wrong proof provided
ordered = true
@@ -129,6 +173,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
)
suite.SetupTest() // reset
+ expError = nil // must be expliticly changed by failed cases
nextSeqRecv = 1 // must be explicitly changed
path = ibctesting.NewPath(suite.chainA, suite.chainB)
@@ -149,6 +194,11 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() {
suite.Require().NoError(err)
} else {
suite.Require().Error(err)
+ // only check if expError is set, since not all error codes can be known
+ if expError != nil {
+ suite.Require().True(errors.Is(err, expError))
+ }
+
}
})
}
diff --git a/modules/core/04-channel/types/errors.go b/modules/core/04-channel/types/errors.go
index 82cf7730..30293edd 100644
--- a/modules/core/04-channel/types/errors.go
+++ b/modules/core/04-channel/types/errors.go
@@ -21,8 +21,13 @@ var (
ErrPacketTimeout = sdkerrors.Register(SubModuleName, 14, "packet timeout")
ErrTooManyConnectionHops = sdkerrors.Register(SubModuleName, 15, "too many connection hops")
ErrInvalidAcknowledgement = sdkerrors.Register(SubModuleName, 16, "invalid acknowledgement")
- ErrPacketCommitmentNotFound = sdkerrors.Register(SubModuleName, 17, "packet commitment not found")
- ErrPacketReceived = sdkerrors.Register(SubModuleName, 18, "packet already received")
- ErrAcknowledgementExists = sdkerrors.Register(SubModuleName, 19, "acknowledgement for packet already exists")
- ErrInvalidChannelIdentifier = sdkerrors.Register(SubModuleName, 20, "invalid channel identifier")
+ ErrAcknowledgementExists = sdkerrors.Register(SubModuleName, 17, "acknowledgement for packet already exists")
+ ErrInvalidChannelIdentifier = sdkerrors.Register(SubModuleName, 18, "invalid channel identifier")
+
+ // packets already relayed errors
+ ErrPacketReceived = sdkerrors.Register(SubModuleName, 19, "packet already received")
+ ErrPacketCommitmentNotFound = sdkerrors.Register(SubModuleName, 20, "packet commitment not found") // may occur for already received acknowledgements or timeouts and in rare cases for packets never sent
+
+ // ORDERED channel error
+ ErrPacketSequenceOutOfOrder = sdkerrors.Register(SubModuleName, 21, "packet sequence is out of order")
)
diff --git a/modules/core/23-commitment/types/merkle.go b/modules/core/23-commitment/types/merkle.go
index 597a1ac9..72b3de47 100644
--- a/modules/core/23-commitment/types/merkle.go
+++ b/modules/core/23-commitment/types/merkle.go
@@ -253,8 +253,8 @@ func verifyChainedMembershipProof(root []byte, specs []*ics23.ProofSpec, proofs
value = subroot
case *ics23.CommitmentProof_Nonexist:
return sdkerrors.Wrapf(ErrInvalidProof,
- "chained membership proof contains nonexistence proof at index %d. If this is unexpected, please ensure that proof was queried from the height that contained the value in store and was queried with the correct key.",
- i)
+ "chained membership proof contains nonexistence proof at index %d. If this is unexpected, please ensure that proof was queried from a height that contained the value in store and was queried with the correct key. The key used: %s",
+ i, keys)
default:
return sdkerrors.Wrapf(ErrInvalidProof,
"expected proof type: %T, got: %T", &ics23.CommitmentProof_Exist{}, proofs[i].Proof)
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 06bbb902..4884c679 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -545,7 +545,7 @@ func produceVerificationArgs(
if cs.GetLatestHeight().LT(height) {
return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf(
sdkerrors.ErrInvalidHeight,
- "client state height < proof height (%d < %d)", cs.GetLatestHeight(), height,
+ "client state height < proof height (%d < %d), please ensure the client has been updated", cs.GetLatestHeight(), height,
)
}
diff --git a/testing/endpoint.go b/testing/endpoint.go
index e32d0cdf..bc03c548 100644
--- a/testing/endpoint.go
+++ b/testing/endpoint.go
@@ -403,8 +403,6 @@ func (endpoint *Endpoint) WriteAcknowledgement(ack exported.Acknowledgement, pac
}
// AcknowledgePacket sends a MsgAcknowledgement to the channel associated with the endpoint.
-// TODO: add a query for the acknowledgement by events
-// - https://github.com/cosmos/cosmos-sdk/issues/6509
func (endpoint *Endpoint) AcknowledgePacket(packet channeltypes.Packet, ack []byte) error {
// get proof of acknowledgement on counterparty
packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
@@ -415,6 +413,32 @@ func (endpoint *Endpoint) AcknowledgePacket(packet channeltypes.Packet, ack []by
return endpoint.Chain.sendMsgs(ackMsg)
}
+// TimeoutPacket sends a MsgTimeout to the channel associated with the endpoint.
+func (endpoint *Endpoint) TimeoutPacket(packet channeltypes.Packet) error {
+ // get proof for timeout based on channel order
+ var packetKey []byte
+
+ switch endpoint.ChannelConfig.Order {
+ case channeltypes.ORDERED:
+ packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel())
+ case channeltypes.UNORDERED:
+ packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
+ default:
+ return fmt.Errorf("unsupported order type %s", endpoint.ChannelConfig.Order)
+ }
+
+ proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey)
+ nextSeqRecv, found := endpoint.Counterparty.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(endpoint.Counterparty.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID)
+ require.True(endpoint.Chain.t, found)
+
+ timeoutMsg := channeltypes.NewMsgTimeout(
+ packet, nextSeqRecv,
+ proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String(),
+ )
+
+ return endpoint.Chain.sendMsgs(timeoutMsg)
+}
+
// SetChannelClosed sets a channel state to CLOSED.
func (endpoint *Endpoint) SetChannelClosed() error {
channel := endpoint.GetChannel()
From c3f482086ad3591a12f113d78446fdf73cfc4a5c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 27 May 2021 12:50:57 +0200
Subject: [PATCH 069/393] simplify UpdateClient proposal (#181)
* simplify UpdateClient proposal to copy over latest consensus state
* fix tests and update godoc
* update docs
* code coverage
* changelog
* remove unused code
* set subject chain-id using substitute
* fix tests
* Update modules/core/02-client/keeper/proposal.go
Co-authored-by: Aditya
---
CHANGELOG.md | 1 +
.../adr-026-ibc-client-recovery-mechanisms.md | 6 +-
docs/ibc/proposals.md | 15 +-
docs/ibc/proto-docs.md | 9 +-
modules/core/02-client/client/cli/tx.go | 11 +-
modules/core/02-client/keeper/proposal.go | 23 ++-
.../core/02-client/keeper/proposal_test.go | 43 +++--
.../core/02-client/proposal_handler_test.go | 3 +-
modules/core/02-client/types/client.pb.go | 152 ++++++------------
modules/core/02-client/types/proposal.go | 7 +-
modules/core/02-client/types/proposal_test.go | 20 +--
modules/core/exported/client.go | 2 +-
.../06-solomachine/types/proposal_handle.go | 1 -
.../types/proposal_handle_test.go | 2 +-
.../07-tendermint/types/proposal_handle.go | 73 +++------
.../types/proposal_handle_test.go | 85 ++++------
.../07-tendermint/types/store.go | 13 +-
.../09-localhost/types/client_state.go | 2 +-
.../09-localhost/types/client_state_test.go | 2 +-
proto/ibc/core/client/v1/client.proto | 13 +-
20 files changed, 198 insertions(+), 285 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 99b12c62..5f7d0054 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -47,6 +47,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (02-client) [\#181](https://github.com/cosmos/ibc-go/pull/181) Remove 'InitialHeight' from UpdateClient Proposal. Only copy over latest consensus state from substitute client.
* (06-solomachine) [\#169](https://github.com/cosmos/ibc-go/pull/169) Change FrozenSequence to boolean in solomachine ClientState. The solo machine proto package has been bumped from `v1` to `v2`.
* (module/core/02-client) [\#165](https://github.com/cosmos/ibc-go/pull/165) Remove GetFrozenHeight from the ClientState interface.
* (modules) [\#166](https://github.com/cosmos/ibc-go/pull/166) Remove GetHeight from the misbehaviour interface. The `consensus_height` attribute has been removed from Misbehaviour events.
diff --git a/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md b/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md
index 2e33bf58..c40e3b08 100644
--- a/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md
+++ b/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md
@@ -5,6 +5,7 @@
- 2020/06/23: Initial version
- 2020/08/06: Revisions per review & to reference version
- 2021/01/15: Revision to support substitute clients for unfreezing
+- 2021/05/20: Revision to simplify consensus state copying, remove initial height
## Status
@@ -42,11 +43,10 @@ We elect not to deal with chains which have actually halted, which is necessaril
1. Require Tendermint light clients (ICS 07) to expose the following additional state mutation functions
1. `Unfreeze()`, which unfreezes a light client after misbehaviour and clears any frozen height previously set
1. Add a new governance proposal type, `ClientUpdateProposal`, in the `x/ibc` module
- 1. Extend the base `Proposal` with two client identifiers (`string`) and an initial height ('exported.Height').
+ 1. Extend the base `Proposal` with two client identifiers (`string`).
1. The first client identifier is the proposed client to be updated. This client must be either frozen or expired.
1. The second client is a substitute client. It carries all the state for the client which may be updated. It must have identitical client and chain parameters to the client which may be updated (except for latest height, frozen height, and chain-id). It should be continually updated during the voting period.
- 1. The initial height represents the starting height consensus states which will be copied from the substitute client to the frozen/expired client.
- 1. If this governance proposal passes, the client on trial will be updated with all the state of the substitute, if and only if:
+ 1. If this governance proposal passes, the client on trial will be updated to the latest state of the substitute, if and only if:
1. `allow_governance_override_after_expiry` is true and the client has expired (`Expired()` returns true)
1. `allow_governance_override_after_misbehaviour` is true and the client has been frozen (`Frozen()` returns true)
1. In this case, additionally, the client is unfrozen by calling `Unfreeze()`
diff --git a/docs/ibc/proposals.md b/docs/ibc/proposals.md
index 6bdf9f70..b3ce139c 100644
--- a/docs/ibc/proposals.md
+++ b/docs/ibc/proposals.md
@@ -32,11 +32,10 @@ ultimately lead the on-chain light client to become expired.
In the case that a highly valued light client is frozen, expired, or rendered non-updateable, a
governance proposal may be submitted to update this client, known as the subject client. The
-proposal includes the client identifier for the subject, the client identifier for a substitute
-client, and an initial height to reference the substitute client from. Light client implementations
-may implement custom updating logic, but in most cases, the subject will be updated with information
-from the substitute client, if the proposal passes. The substitute client is used as a "stand in"
-while the subject is on trial. It is best practice to create a substitute client *after* the subject
-has become frozen to avoid the substitute from also becoming frozen. An active substitute client
-allows headers to be submitted during the voting period to prevent accidental expiry once the proposal
-passes.
+proposal includes the client identifier for the subject and the client identifier for a substitute
+client. Light client implementations may implement custom updating logic, but in most cases,
+the subject will be updated to the latest consensus state of the substitute client, if the proposal passes.
+The substitute client is used as a "stand in" while the subject is on trial. It is best practice to create
+a substitute client *after* the subject has become frozen to avoid the substitute from also becoming frozen.
+An active substitute client allows headers to be submitted during the voting period to prevent accidental expiry
+once the proposal passes.
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index b098c732..701d4684 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -477,11 +477,9 @@ client.
### ClientUpdateProposal
ClientUpdateProposal is a governance proposal. If it passes, the substitute
-client's consensus states starting from the 'initial height' are copied over
-to the subjects client state. The proposal handler may fail if the subject
-and the substitute do not match in client and chain parameters (with
-exception to latest height, frozen height, and chain-id). The updated client
-must also be valid (cannot be expired).
+client's latest consensus state is copied over to the subject client. The proposal
+handler may fail if the subject and the substitute do not match in client and
+chain parameters (with exception to latest height, frozen height, and chain-id).
| Field | Type | Label | Description |
@@ -490,7 +488,6 @@ must also be valid (cannot be expired).
| `description` | [string](#string) | | the description of the proposal |
| `subject_client_id` | [string](#string) | | the client identifier for the client to be updated if the proposal passes |
| `substitute_client_id` | [string](#string) | | the substitute client identifier for the client standing in for the subject client |
-| `initial_height` | [Height](#ibc.core.client.v1.Height) | | the intital height to copy consensus states from the substitute to the subject |
diff --git a/modules/core/02-client/client/cli/tx.go b/modules/core/02-client/client/cli/tx.go
index f195f0fa..1e6c2145 100644
--- a/modules/core/02-client/client/cli/tx.go
+++ b/modules/core/02-client/client/cli/tx.go
@@ -237,12 +237,12 @@ func NewUpgradeClientCmd() *cobra.Command {
// NewCmdSubmitUpdateClientProposal implements a command handler for submitting an update IBC client proposal transaction.
func NewCmdSubmitUpdateClientProposal() *cobra.Command {
cmd := &cobra.Command{
- Use: "update-client [subject-client-id] [substitute-client-id] [initial-height] [flags]",
+ Use: "update-client [subject-client-id] [substitute-client-id] [flags]",
Args: cobra.ExactArgs(3),
Short: "Submit an update IBC client proposal",
Long: "Submit an update IBC client proposal along with an initial deposit.\n" +
"Please specify a subject client identifier you want to update..\n" +
- "Please specify the substitute client the subject client will use and the initial height to reference the substitute client's state.",
+ "Please specify the substitute client the subject client will be updated to.",
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientTxContext(cmd)
if err != nil {
@@ -262,12 +262,7 @@ func NewCmdSubmitUpdateClientProposal() *cobra.Command {
subjectClientID := args[0]
substituteClientID := args[1]
- initialHeight, err := types.ParseHeight(args[2])
- if err != nil {
- return err
- }
-
- content := types.NewClientUpdateProposal(title, description, subjectClientID, substituteClientID, initialHeight)
+ content := types.NewClientUpdateProposal(title, description, subjectClientID, substituteClientID)
from := clientCtx.GetFromAddress()
diff --git a/modules/core/02-client/keeper/proposal.go b/modules/core/02-client/keeper/proposal.go
index b381b26e..1880e2cd 100644
--- a/modules/core/02-client/keeper/proposal.go
+++ b/modules/core/02-client/keeper/proposal.go
@@ -11,14 +11,13 @@ import (
)
// ClientUpdateProposal will retrieve the subject and substitute client.
-// The initial height must be greater than the latest height of the subject
-// client. A callback will occur to the subject client state with the client
+// A callback will occur to the subject client state with the client
// prefixed store being provided for both the subject and the substitute client.
// The localhost client is not allowed to be modified with a proposal. The IBC
// client implementations are responsible for validating the parameters of the
// subtitute (enusring they match the subject's parameters) as well as copying
// the necessary consensus states from the subtitute to the subject client
-// store.
+// store. The substitute must be Active and the subject must not be Active.
func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdateProposal) error {
if p.SubjectClientId == exported.Localhost || p.SubstituteClientId == exported.Localhost {
return sdkerrors.Wrap(types.ErrInvalidUpdateClientProposal, "cannot update localhost client with proposal")
@@ -29,8 +28,10 @@ func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdatePropo
return sdkerrors.Wrapf(types.ErrClientNotFound, "subject client with ID %s", p.SubjectClientId)
}
- if subjectClientState.GetLatestHeight().GTE(p.InitialHeight) {
- return sdkerrors.Wrapf(types.ErrInvalidHeight, "subject client state latest height is greater or equal to initial height (%s >= %s)", subjectClientState.GetLatestHeight(), p.InitialHeight)
+ subjectClientStore := k.ClientStore(ctx, p.SubjectClientId)
+
+ if status := subjectClientState.Status(ctx, subjectClientStore, k.cdc); status == exported.Active {
+ return sdkerrors.Wrap(types.ErrInvalidUpdateClientProposal, "cannot update Active subject client")
}
substituteClientState, found := k.GetClientState(ctx, p.SubstituteClientId)
@@ -38,7 +39,17 @@ func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdatePropo
return sdkerrors.Wrapf(types.ErrClientNotFound, "substitute client with ID %s", p.SubstituteClientId)
}
- clientState, err := subjectClientState.CheckSubstituteAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, p.SubjectClientId), k.ClientStore(ctx, p.SubstituteClientId), substituteClientState, p.InitialHeight)
+ if subjectClientState.GetLatestHeight().GTE(substituteClientState.GetLatestHeight()) {
+ return sdkerrors.Wrapf(types.ErrInvalidHeight, "subject client state latest height is greater or equal to substitute client state latest height (%s >= %s)", subjectClientState.GetLatestHeight(), substituteClientState.GetLatestHeight())
+ }
+
+ substituteClientStore := k.ClientStore(ctx, p.SubstituteClientId)
+
+ if status := substituteClientState.Status(ctx, substituteClientStore, k.cdc); status != exported.Active {
+ return sdkerrors.Wrapf(types.ErrClientNotActive, "substitute client is not Active, status is %s", status)
+ }
+
+ clientState, err := subjectClientState.CheckSubstituteAndUpdateState(ctx, k.cdc, subjectClientStore, substituteClientStore, substituteClientState)
if err != nil {
return err
}
diff --git a/modules/core/02-client/keeper/proposal_test.go b/modules/core/02-client/keeper/proposal_test.go
index cee39c3c..8a427065 100644
--- a/modules/core/02-client/keeper/proposal_test.go
+++ b/modules/core/02-client/keeper/proposal_test.go
@@ -13,7 +13,6 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
var (
subject, substitute string
subjectClientState, substituteClientState exported.ClientState
- initialHeight types.Height
content govtypes.Content
err error
)
@@ -25,7 +24,7 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
}{
{
"valid update client proposal", func() {
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute)
}, true,
},
{
@@ -37,31 +36,43 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
newRevisionNumber := tmClientState.GetLatestHeight().GetRevisionNumber() + 1
tmClientState.LatestHeight = types.NewHeight(newRevisionNumber, tmClientState.GetLatestHeight().GetRevisionHeight())
- initialHeight = types.NewHeight(newRevisionNumber, initialHeight.GetRevisionHeight())
+
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState)
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute)
+ ibctmtypes.SetProcessedTime(clientStore, tmClientState.LatestHeight, 100)
+ ibctmtypes.SetProcessedHeight(clientStore, tmClientState.LatestHeight, types.NewHeight(0, 1))
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute)
}, true,
},
{
"cannot use localhost as subject", func() {
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, exported.Localhost, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, exported.Localhost, substitute)
}, false,
},
{
"cannot use localhost as substitute", func() {
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, exported.Localhost, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, exported.Localhost)
+ }, false,
+ },
+ {
+ "cannot use solomachine as substitute for tendermint client", func() {
+ solomachine := ibctesting.NewSolomachine(suite.T(), suite.cdc, "solo machine", "", 1)
+ solomachine.Sequence = subjectClientState.GetLatestHeight().GetRevisionHeight() + 1
+ substituteClientState = solomachine.ClientState()
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, substituteClientState)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute)
}, false,
},
{
"subject client does not exist", func() {
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute)
}, false,
},
{
"substitute client does not exist", func() {
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID)
}, false,
},
{
@@ -71,7 +82,7 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
tmClientState.LatestHeight = substituteClientState.GetLatestHeight().(types.Height)
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute)
}, false,
},
{
@@ -81,7 +92,17 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
tmClientState.FrozenHeight = types.ZeroHeight()
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState)
- content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight)
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute)
+ }, false,
+ },
+ {
+ "substitute is frozen", func() {
+ tmClientState, ok := substituteClientState.(*ibctmtypes.ClientState)
+ suite.Require().True(ok)
+ tmClientState.FrozenHeight = types.NewHeight(0, 1)
+ suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
+
+ content = types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute)
}, false,
},
}
@@ -100,7 +121,6 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(substitutePath)
substitute = substitutePath.EndpointA.ClientID
- initialHeight = types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
// update substitute twice
substitutePath.EndpointA.UpdateClient()
@@ -118,7 +138,6 @@ func (suite *KeeperTestSuite) TestClientUpdateProposal() {
suite.Require().True(ok)
tmClientState.AllowUpdateAfterMisbehaviour = true
tmClientState.AllowUpdateAfterExpiry = true
- tmClientState.FrozenHeight = tmClientState.LatestHeight
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState)
tc.malleate()
diff --git a/modules/core/02-client/proposal_handler_test.go b/modules/core/02-client/proposal_handler_test.go
index 2c83b95b..ea4eb31b 100644
--- a/modules/core/02-client/proposal_handler_test.go
+++ b/modules/core/02-client/proposal_handler_test.go
@@ -29,7 +29,6 @@ func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(substitutePath)
- initialHeight := clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
// update substitute twice
err = substitutePath.EndpointA.UpdateClient()
@@ -50,7 +49,7 @@ func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() {
tmClientState.AllowUpdateAfterMisbehaviour = true
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, tmClientState)
- content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subjectPath.EndpointA.ClientID, substitutePath.EndpointA.ClientID, initialHeight)
+ content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subjectPath.EndpointA.ClientID, substitutePath.EndpointA.ClientID)
}, true,
},
{
diff --git a/modules/core/02-client/types/client.pb.go b/modules/core/02-client/types/client.pb.go
index 99f5fe17..bb251b6e 100644
--- a/modules/core/02-client/types/client.pb.go
+++ b/modules/core/02-client/types/client.pb.go
@@ -194,11 +194,9 @@ func (m *ClientConsensusStates) GetConsensusStates() []ConsensusStateWithHeight
}
// ClientUpdateProposal is a governance proposal. If it passes, the substitute
-// client's consensus states starting from the 'initial height' are copied over
-// to the subjects client state. The proposal handler may fail if the subject
-// and the substitute do not match in client and chain parameters (with
-// exception to latest height, frozen height, and chain-id). The updated client
-// must also be valid (cannot be expired).
+// client's latest consensus state is copied over to the subject client. The proposal
+// handler may fail if the subject and the substitute do not match in client and
+// chain parameters (with exception to latest height, frozen height, and chain-id).
type ClientUpdateProposal struct {
// the title of the update proposal
Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
@@ -208,10 +206,7 @@ type ClientUpdateProposal struct {
SubjectClientId string `protobuf:"bytes,3,opt,name=subject_client_id,json=subjectClientId,proto3" json:"subject_client_id,omitempty" yaml:"subject_client_id"`
// the substitute client identifier for the client standing in for the subject
// client
- SubstituteClientId string `protobuf:"bytes,4,opt,name=substitute_client_id,json=substituteClientId,proto3" json:"substitute_client_id,omitempty" yaml:"susbtitute_client_id"`
- // the intital height to copy consensus states from the substitute to the
- // subject
- InitialHeight Height `protobuf:"bytes,5,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height" yaml:"initial_height"`
+ SubstituteClientId string `protobuf:"bytes,4,opt,name=substitute_client_id,json=substituteClientId,proto3" json:"substitute_client_id,omitempty" yaml:"substitute_client_id"`
}
func (m *ClientUpdateProposal) Reset() { *m = ClientUpdateProposal{} }
@@ -402,54 +397,52 @@ func init() {
func init() { proto.RegisterFile("ibc/core/client/v1/client.proto", fileDescriptor_b6bc4c8185546947) }
var fileDescriptor_b6bc4c8185546947 = []byte{
- // 744 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x3d, 0x73, 0xd3, 0x4a,
- 0x14, 0xb5, 0x1c, 0xc7, 0x13, 0xaf, 0xf3, 0xec, 0x3c, 0xc5, 0x7e, 0x71, 0xfc, 0xf2, 0x2c, 0xcf,
- 0xce, 0x2b, 0x5c, 0x10, 0x09, 0x9b, 0x81, 0x61, 0xd2, 0x61, 0x37, 0x49, 0x01, 0x18, 0x31, 0x19,
- 0x18, 0x1a, 0xa3, 0x8f, 0x8d, 0xbc, 0x19, 0x59, 0xeb, 0xd1, 0xae, 0x0c, 0xfe, 0x07, 0x94, 0x94,
- 0x14, 0x14, 0xf9, 0x05, 0xfc, 0x0a, 0x8a, 0x94, 0x99, 0xa1, 0xa1, 0xd2, 0x30, 0x49, 0x43, 0xad,
- 0x96, 0x86, 0x91, 0x76, 0xe5, 0xd8, 0x4e, 0x02, 0x0c, 0x74, 0xab, 0xbb, 0xe7, 0x9e, 0x7b, 0xcf,
- 0xd9, 0xbd, 0x2b, 0xa0, 0x60, 0xd3, 0xd2, 0x2c, 0xe2, 0x23, 0xcd, 0x72, 0x31, 0xf2, 0x98, 0x36,
- 0x69, 0x8b, 0x95, 0x3a, 0xf6, 0x09, 0x23, 0xb2, 0x8c, 0x4d, 0x4b, 0x8d, 0x01, 0xaa, 0x08, 0x4f,
- 0xda, 0xf5, 0x8a, 0x43, 0x1c, 0x92, 0x6c, 0x6b, 0xf1, 0x8a, 0x23, 0xeb, 0xdb, 0x0e, 0x21, 0x8e,
- 0x8b, 0xb4, 0xe4, 0xcb, 0x0c, 0x8e, 0x34, 0xc3, 0x9b, 0x8a, 0xad, 0xff, 0x2d, 0x42, 0x47, 0x84,
- 0x6a, 0xc1, 0xd8, 0xf1, 0x0d, 0x1b, 0x69, 0x93, 0xb6, 0x89, 0x98, 0xd1, 0x4e, 0xbf, 0x39, 0x0a,
- 0xbe, 0x97, 0x40, 0xf5, 0xc0, 0x46, 0x1e, 0xc3, 0x47, 0x18, 0xd9, 0xbd, 0xa4, 0xdc, 0x53, 0x66,
- 0x30, 0x24, 0xb7, 0x41, 0x81, 0x57, 0x1f, 0x60, 0xbb, 0x26, 0x35, 0xa5, 0x56, 0xa1, 0x5b, 0x89,
- 0x42, 0x65, 0x63, 0x6a, 0x8c, 0xdc, 0x3d, 0x38, 0xdb, 0x82, 0xfa, 0x1a, 0x5f, 0x1f, 0xd8, 0x72,
- 0x1f, 0xac, 0x8b, 0x38, 0x8d, 0x29, 0x6a, 0xd9, 0xa6, 0xd4, 0x2a, 0x76, 0x2a, 0x2a, 0x6f, 0x52,
- 0x4d, 0x9b, 0x54, 0x1f, 0x78, 0xd3, 0xee, 0x56, 0x14, 0x2a, 0x9b, 0x0b, 0x5c, 0x49, 0x0e, 0xd4,
- 0x8b, 0xd6, 0x65, 0x13, 0xf0, 0x83, 0x04, 0x6a, 0x3d, 0xe2, 0x51, 0xe4, 0xd1, 0x80, 0x26, 0xa1,
- 0x67, 0x98, 0x0d, 0xf7, 0x11, 0x76, 0x86, 0x4c, 0xbe, 0x0f, 0xf2, 0xc3, 0x64, 0x95, 0xb4, 0x57,
- 0xec, 0xd4, 0xd5, 0xab, 0xbe, 0xa9, 0x1c, 0xdb, 0xcd, 0x9d, 0x86, 0x4a, 0x46, 0x17, 0x78, 0xf9,
- 0x39, 0x28, 0x5b, 0x29, 0xeb, 0x2f, 0xf4, 0xba, 0x1d, 0x85, 0x4a, 0x35, 0xee, 0x15, 0x2e, 0x65,
- 0x41, 0xbd, 0x64, 0x2d, 0x74, 0x07, 0x3f, 0x4a, 0xa0, 0xca, 0x5d, 0x5c, 0x6c, 0x9b, 0xfe, 0x8e,
- 0x9f, 0xaf, 0xc1, 0xc6, 0x52, 0x41, 0x5a, 0xcb, 0x36, 0x57, 0x5a, 0xc5, 0xce, 0xad, 0xeb, 0xa4,
- 0xde, 0x64, 0x54, 0x57, 0x89, 0xc5, 0x47, 0xa1, 0xb2, 0x25, 0x6a, 0x2d, 0x71, 0x42, 0xbd, 0xbc,
- 0xa8, 0x82, 0xc2, 0x4f, 0x59, 0x50, 0xe1, 0x32, 0x0e, 0xc7, 0xb6, 0xc1, 0x50, 0xdf, 0x27, 0x63,
- 0x42, 0x0d, 0x57, 0xae, 0x80, 0x55, 0x86, 0x99, 0x8b, 0xb8, 0x02, 0x9d, 0x7f, 0xc8, 0x4d, 0x50,
- 0xb4, 0x11, 0xb5, 0x7c, 0x3c, 0x66, 0x98, 0x78, 0x89, 0x97, 0x05, 0x7d, 0x3e, 0x24, 0xef, 0x83,
- 0xbf, 0x69, 0x60, 0x1e, 0x23, 0x8b, 0x0d, 0x2e, 0x5d, 0x58, 0x49, 0x5c, 0xd8, 0x89, 0x42, 0xa5,
- 0xc6, 0x3b, 0xbb, 0x02, 0x81, 0x7a, 0x59, 0xc4, 0x7a, 0xa9, 0x29, 0x4f, 0x40, 0x85, 0x06, 0x26,
- 0x65, 0x98, 0x05, 0x0c, 0xcd, 0x91, 0xe5, 0x12, 0x32, 0x25, 0x0a, 0x95, 0x7f, 0x53, 0x32, 0x6a,
- 0x2e, 0xa3, 0xa0, 0x2e, 0x5f, 0x26, 0xcf, 0x28, 0x5f, 0x82, 0x12, 0xf6, 0x30, 0xc3, 0x86, 0x3b,
- 0x10, 0x17, 0x6a, 0xf5, 0xa7, 0x17, 0xea, 0x3f, 0xe1, 0x69, 0x95, 0x17, 0x5b, 0xcc, 0x87, 0xfa,
- 0x5f, 0x22, 0xc0, 0xd1, 0x7b, 0xb9, 0x37, 0x27, 0x4a, 0x06, 0x7e, 0x93, 0x40, 0xf9, 0x90, 0x8f,
- 0xdf, 0x1f, 0x1b, 0x7a, 0x0f, 0xe4, 0xc6, 0xae, 0xe1, 0x25, 0x1e, 0x16, 0x3b, 0x3b, 0x2a, 0x9f,
- 0x76, 0x35, 0x9d, 0x6e, 0x31, 0xed, 0x6a, 0xdf, 0x35, 0x3c, 0x71, 0xf9, 0x13, 0xbc, 0x7c, 0x0c,
- 0xaa, 0x02, 0x63, 0x0f, 0x16, 0x86, 0x35, 0xf7, 0x83, 0x01, 0x68, 0x46, 0xa1, 0xb2, 0xc3, 0x85,
- 0x5e, 0x9b, 0x0c, 0xf5, 0xcd, 0x34, 0x3e, 0xf7, 0x84, 0xec, 0xad, 0xc7, 0xaa, 0xdf, 0x9d, 0x28,
- 0x99, 0xaf, 0x27, 0x8a, 0x14, 0x3f, 0x35, 0x79, 0x31, 0xb9, 0x3d, 0x50, 0xf6, 0xd1, 0x04, 0x53,
- 0x4c, 0xbc, 0x81, 0x17, 0x8c, 0x4c, 0xe4, 0x27, 0xf2, 0x73, 0xdd, 0x7a, 0x14, 0x2a, 0xff, 0xf0,
- 0x42, 0x4b, 0x00, 0xa8, 0x97, 0xd2, 0xc8, 0xa3, 0x24, 0xb0, 0x40, 0x22, 0x8e, 0x2d, 0x7b, 0x23,
- 0x49, 0x7a, 0x2e, 0x33, 0x12, 0x71, 0x30, 0x6b, 0x69, 0x8b, 0xf0, 0x21, 0xc8, 0xf7, 0x0d, 0xdf,
- 0x18, 0xd1, 0x98, 0xd8, 0x70, 0x5d, 0xf2, 0x6a, 0x26, 0x92, 0xd6, 0xa4, 0xe6, 0x4a, 0xab, 0x30,
- 0x4f, 0xbc, 0x04, 0x80, 0x7a, 0x49, 0x44, 0xb8, 0x7e, 0xda, 0x7d, 0x7c, 0x7a, 0xde, 0x90, 0xce,
- 0xce, 0x1b, 0xd2, 0x97, 0xf3, 0x86, 0xf4, 0xf6, 0xa2, 0x91, 0x39, 0xbb, 0x68, 0x64, 0x3e, 0x5f,
- 0x34, 0x32, 0x2f, 0xee, 0x3a, 0x98, 0x0d, 0x03, 0x53, 0xb5, 0xc8, 0x48, 0x13, 0x6f, 0x34, 0x36,
- 0xad, 0x5d, 0x87, 0x68, 0x23, 0x62, 0x07, 0x2e, 0xa2, 0xfc, 0xdf, 0x70, 0xbb, 0xb3, 0x2b, 0x7e,
- 0x0f, 0x6c, 0x3a, 0x46, 0xd4, 0xcc, 0x27, 0x27, 0x72, 0xe7, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0xca, 0x6e, 0xea, 0xc6, 0x3e, 0x06, 0x00, 0x00,
+ // 710 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x3f, 0x6f, 0xd3, 0x4c,
+ 0x1c, 0x8e, 0xdb, 0xbc, 0x51, 0x73, 0xa9, 0x9a, 0xbe, 0x6e, 0xf2, 0x36, 0xcd, 0x5b, 0xc5, 0xd1,
+ 0x89, 0x21, 0x03, 0xb5, 0x49, 0x10, 0x08, 0x65, 0x23, 0x59, 0xda, 0x01, 0x08, 0x46, 0x15, 0x88,
+ 0x25, 0xf2, 0x9f, 0xab, 0x73, 0x95, 0xe3, 0x8b, 0x7c, 0xe7, 0x40, 0xbe, 0x01, 0x23, 0x23, 0x03,
+ 0x43, 0x3f, 0x01, 0x9f, 0x82, 0xa1, 0x63, 0x47, 0x26, 0x0b, 0xb5, 0x0b, 0x2b, 0x5e, 0x59, 0x90,
+ 0xef, 0xce, 0x6d, 0x92, 0xb6, 0x08, 0xc1, 0x76, 0xf7, 0xdc, 0x73, 0xcf, 0xef, 0x79, 0x7e, 0xf6,
+ 0xef, 0x80, 0x86, 0x6d, 0xc7, 0x70, 0x48, 0x88, 0x0c, 0xc7, 0xc7, 0x28, 0x60, 0xc6, 0xb4, 0x2d,
+ 0x57, 0xfa, 0x24, 0x24, 0x8c, 0xa8, 0x2a, 0xb6, 0x1d, 0x3d, 0x25, 0xe8, 0x12, 0x9e, 0xb6, 0xeb,
+ 0x15, 0x8f, 0x78, 0x84, 0x1f, 0x1b, 0xe9, 0x4a, 0x30, 0xeb, 0x3b, 0x1e, 0x21, 0x9e, 0x8f, 0x0c,
+ 0xbe, 0xb3, 0xa3, 0x23, 0xc3, 0x0a, 0x66, 0xf2, 0xe8, 0x8e, 0x43, 0xe8, 0x98, 0x50, 0x23, 0x9a,
+ 0x78, 0xa1, 0xe5, 0x22, 0x63, 0xda, 0xb6, 0x11, 0xb3, 0xda, 0xd9, 0x5e, 0xb0, 0xe0, 0x47, 0x05,
+ 0x54, 0x0f, 0x5c, 0x14, 0x30, 0x7c, 0x84, 0x91, 0xdb, 0xe7, 0xe5, 0x5e, 0x30, 0x8b, 0x21, 0xb5,
+ 0x0d, 0x8a, 0xa2, 0xfa, 0x10, 0xbb, 0x35, 0xa5, 0xa9, 0xb4, 0x8a, 0xbd, 0x4a, 0x12, 0x6b, 0x9b,
+ 0x33, 0x6b, 0xec, 0x77, 0xe1, 0xe5, 0x11, 0x34, 0xd7, 0xc4, 0xfa, 0xc0, 0x55, 0x07, 0x60, 0x5d,
+ 0xe2, 0x34, 0x95, 0xa8, 0xad, 0x34, 0x95, 0x56, 0xa9, 0x53, 0xd1, 0x85, 0x49, 0x3d, 0x33, 0xa9,
+ 0x3f, 0x0e, 0x66, 0xbd, 0xed, 0x24, 0xd6, 0xb6, 0x16, 0xb4, 0xf8, 0x1d, 0x68, 0x96, 0x9c, 0x2b,
+ 0x13, 0xf0, 0x93, 0x02, 0x6a, 0x7d, 0x12, 0x50, 0x14, 0xd0, 0x88, 0x72, 0xe8, 0x25, 0x66, 0xa3,
+ 0x7d, 0x84, 0xbd, 0x11, 0x53, 0x1f, 0x81, 0xc2, 0x88, 0xaf, 0xb8, 0xbd, 0x52, 0xa7, 0xae, 0x5f,
+ 0xef, 0x9b, 0x2e, 0xb8, 0xbd, 0xfc, 0x69, 0xac, 0xe5, 0x4c, 0xc9, 0x57, 0x5f, 0x81, 0xb2, 0x93,
+ 0xa9, 0xfe, 0x86, 0xd7, 0x9d, 0x24, 0xd6, 0xaa, 0xa9, 0x57, 0xb8, 0x74, 0x0b, 0x9a, 0x1b, 0xce,
+ 0x82, 0x3b, 0xf8, 0x59, 0x01, 0x55, 0xd1, 0xc5, 0x45, 0xdb, 0xf4, 0x4f, 0xfa, 0xf9, 0x16, 0x6c,
+ 0x2e, 0x15, 0xa4, 0xb5, 0x95, 0xe6, 0x6a, 0xab, 0xd4, 0xb9, 0x7b, 0x53, 0xd4, 0xdb, 0x1a, 0xd5,
+ 0xd3, 0xd2, 0xf0, 0x49, 0xac, 0x6d, 0xcb, 0x5a, 0x4b, 0x9a, 0xd0, 0x2c, 0x2f, 0xa6, 0xa0, 0xf0,
+ 0xbb, 0x02, 0x2a, 0x22, 0xc6, 0xe1, 0xc4, 0xb5, 0x18, 0x1a, 0x84, 0x64, 0x42, 0xa8, 0xe5, 0xab,
+ 0x15, 0xf0, 0x0f, 0xc3, 0xcc, 0x47, 0x22, 0x81, 0x29, 0x36, 0x6a, 0x13, 0x94, 0x5c, 0x44, 0x9d,
+ 0x10, 0x4f, 0x18, 0x26, 0x01, 0xef, 0x65, 0xd1, 0x9c, 0x87, 0xd4, 0x7d, 0xf0, 0x2f, 0x8d, 0xec,
+ 0x63, 0xe4, 0xb0, 0xe1, 0x55, 0x17, 0x56, 0x79, 0x17, 0x76, 0x93, 0x58, 0xab, 0x09, 0x67, 0xd7,
+ 0x28, 0xd0, 0x2c, 0x4b, 0xac, 0x9f, 0x35, 0xe5, 0x39, 0xa8, 0xd0, 0xc8, 0xa6, 0x0c, 0xb3, 0x88,
+ 0xa1, 0x39, 0xb1, 0x3c, 0x17, 0xd3, 0x92, 0x58, 0xfb, 0xff, 0x52, 0xec, 0x1a, 0x0b, 0x9a, 0xea,
+ 0x15, 0x9c, 0x49, 0x76, 0xf3, 0xef, 0x4e, 0xb4, 0x1c, 0xfc, 0xa1, 0x80, 0xf2, 0xa1, 0x18, 0x8e,
+ 0xbf, 0x8e, 0xfb, 0x10, 0xe4, 0x27, 0xbe, 0x15, 0xf0, 0x84, 0xa5, 0xce, 0xae, 0x2e, 0x66, 0x51,
+ 0xcf, 0x66, 0x4f, 0xce, 0xa2, 0x3e, 0xf0, 0xad, 0x40, 0xfe, 0x9a, 0x9c, 0xaf, 0x1e, 0x83, 0xaa,
+ 0xe4, 0xb8, 0xc3, 0x85, 0x51, 0xca, 0xff, 0xe2, 0xf7, 0x6c, 0x26, 0xb1, 0xb6, 0x2b, 0x32, 0xdf,
+ 0x78, 0x19, 0x9a, 0x5b, 0x19, 0x3e, 0x37, 0xe0, 0xdd, 0xf5, 0x34, 0xf5, 0x87, 0x13, 0x2d, 0xf7,
+ 0xed, 0x44, 0x53, 0xd2, 0x87, 0xa0, 0x20, 0xe7, 0xaa, 0x0f, 0xca, 0x21, 0x9a, 0x62, 0x8a, 0x49,
+ 0x30, 0x0c, 0xa2, 0xb1, 0x8d, 0x42, 0x1e, 0x3f, 0xdf, 0xab, 0x27, 0xb1, 0xf6, 0x9f, 0x28, 0xb4,
+ 0x44, 0x80, 0xe6, 0x46, 0x86, 0x3c, 0xe5, 0xc0, 0x82, 0x88, 0x9c, 0xd2, 0x95, 0x5b, 0x45, 0x04,
+ 0x61, 0x4e, 0x44, 0x38, 0xe9, 0xae, 0x65, 0x16, 0xe1, 0x13, 0x50, 0x18, 0x58, 0xa1, 0x35, 0xa6,
+ 0xa9, 0xb0, 0xe5, 0xfb, 0xe4, 0xcd, 0x65, 0x48, 0x5a, 0x53, 0x9a, 0xab, 0xad, 0xe2, 0xbc, 0xf0,
+ 0x12, 0x01, 0x9a, 0x1b, 0x12, 0x11, 0xf9, 0x69, 0xef, 0xd9, 0xe9, 0x79, 0x43, 0x39, 0x3b, 0x6f,
+ 0x28, 0x5f, 0xcf, 0x1b, 0xca, 0xfb, 0x8b, 0x46, 0xee, 0xec, 0xa2, 0x91, 0xfb, 0x72, 0xd1, 0xc8,
+ 0xbd, 0x7e, 0xe0, 0x61, 0x36, 0x8a, 0x6c, 0xdd, 0x21, 0x63, 0x43, 0xbe, 0xa0, 0xd8, 0x76, 0xf6,
+ 0x3c, 0x62, 0x8c, 0x89, 0x1b, 0xf9, 0x88, 0x8a, 0x97, 0xfb, 0x5e, 0x67, 0x4f, 0x3e, 0xde, 0x6c,
+ 0x36, 0x41, 0xd4, 0x2e, 0xf0, 0x2f, 0x72, 0xff, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xcd,
+ 0xb5, 0xb7, 0xdc, 0x05, 0x00, 0x00,
}
func (this *UpgradeProposal) Equal(that interface{}) bool {
@@ -636,16 +629,6 @@ func (m *ClientUpdateProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- {
- size, err := m.InitialHeight.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintClient(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
if len(m.SubstituteClientId) > 0 {
i -= len(m.SubstituteClientId)
copy(dAtA[i:], m.SubstituteClientId)
@@ -885,8 +868,6 @@ func (m *ClientUpdateProposal) Size() (n int) {
if l > 0 {
n += 1 + l + sovClient(uint64(l))
}
- l = m.InitialHeight.Size()
- n += 1 + l + sovClient(uint64(l))
return n
}
@@ -1459,39 +1440,6 @@ func (m *ClientUpdateProposal) Unmarshal(dAtA []byte) error {
}
m.SubstituteClientId = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowClient
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthClient
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthClient
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.InitialHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipClient(dAtA[iNdEx:])
diff --git a/modules/core/02-client/types/proposal.go b/modules/core/02-client/types/proposal.go
index 61234228..bf5ac73e 100644
--- a/modules/core/02-client/types/proposal.go
+++ b/modules/core/02-client/types/proposal.go
@@ -28,13 +28,12 @@ func init() {
}
// NewClientUpdateProposal creates a new client update proposal.
-func NewClientUpdateProposal(title, description, subjectClientID, substituteClientID string, initialHeight Height) govtypes.Content {
+func NewClientUpdateProposal(title, description, subjectClientID, substituteClientID string) govtypes.Content {
return &ClientUpdateProposal{
Title: title,
Description: description,
SubjectClientId: subjectClientID,
SubstituteClientId: substituteClientID,
- InitialHeight: initialHeight,
}
}
@@ -67,10 +66,6 @@ func (cup *ClientUpdateProposal) ValidateBasic() error {
return err
}
- if cup.InitialHeight.IsZero() {
- return sdkerrors.Wrap(ErrInvalidHeight, "initial height cannot be zero height")
- }
-
return nil
}
diff --git a/modules/core/02-client/types/proposal_test.go b/modules/core/02-client/types/proposal_test.go
index 56d6103c..32521d8c 100644
--- a/modules/core/02-client/types/proposal_test.go
+++ b/modules/core/02-client/types/proposal_test.go
@@ -17,14 +17,11 @@ func (suite *TypesTestSuite) TestValidateBasic() {
subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(subjectPath)
subject := subjectPath.EndpointA.ClientID
- subjectClientState := suite.chainA.GetClientState(subject)
substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(substitutePath)
substitute := substitutePath.EndpointA.ClientID
- initialHeight := types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1)
-
testCases := []struct {
name string
proposal govtypes.Content
@@ -32,32 +29,27 @@ func (suite *TypesTestSuite) TestValidateBasic() {
}{
{
"success",
- types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight),
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute),
true,
},
{
"fails validate abstract - empty title",
- types.NewClientUpdateProposal("", ibctesting.Description, subject, substitute, initialHeight),
+ types.NewClientUpdateProposal("", ibctesting.Description, subject, substitute),
false,
},
{
"subject and substitute use the same identifier",
- types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, subject, initialHeight),
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, subject),
false,
},
{
"invalid subject clientID",
- types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight),
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute),
false,
},
{
"invalid substitute clientID",
- types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight),
- false,
- },
- {
- "initial height is zero",
- types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, types.ZeroHeight()),
+ types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID),
false,
},
}
@@ -77,7 +69,7 @@ func (suite *TypesTestSuite) TestValidateBasic() {
// tests a client update proposal can be marshaled and unmarshaled
func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() {
// create proposal
- proposal := types.NewClientUpdateProposal("update IBC client", "description", "subject", "substitute", types.NewHeight(1, 0))
+ proposal := types.NewClientUpdateProposal("update IBC client", "description", "subject", "substitute")
// create codec
ir := codectypes.NewInterfaceRegistry()
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index 8de7976c..1578900a 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -63,7 +63,7 @@ type ClientState interface {
CheckHeaderAndUpdateState(sdk.Context, codec.BinaryCodec, sdk.KVStore, Header) (ClientState, ConsensusState, error)
CheckMisbehaviourAndUpdateState(sdk.Context, codec.BinaryCodec, sdk.KVStore, Misbehaviour) (ClientState, error)
- CheckSubstituteAndUpdateState(ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore, substituteClientStore sdk.KVStore, substituteClient ClientState, height Height) (ClientState, error)
+ CheckSubstituteAndUpdateState(ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore, substituteClientStore sdk.KVStore, substituteClient ClientState) (ClientState, error)
// Upgrade functions
// NOTE: proof heights are not included as upgrade to a new revision is expected to pass only on the last
diff --git a/modules/light-clients/06-solomachine/types/proposal_handle.go b/modules/light-clients/06-solomachine/types/proposal_handle.go
index a4a89006..b4dab1e6 100644
--- a/modules/light-clients/06-solomachine/types/proposal_handle.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle.go
@@ -20,7 +20,6 @@ import (
func (cs ClientState) CheckSubstituteAndUpdateState(
ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore,
_ sdk.KVStore, substituteClient exported.ClientState,
- _ exported.Height,
) (exported.ClientState, error) {
if !cs.AllowUpdateAfterProposal {
diff --git a/modules/light-clients/06-solomachine/types/proposal_handle_test.go b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
index bc8e69ac..822a1c10 100644
--- a/modules/light-clients/06-solomachine/types/proposal_handle_test.go
+++ b/modules/light-clients/06-solomachine/types/proposal_handle_test.go
@@ -70,7 +70,7 @@ func (suite *SoloMachineTestSuite) TestCheckSubstituteAndUpdateState() {
subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID)
substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute.ClientID)
- updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, nil)
+ updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState)
if tc.expPass {
suite.Require().NoError(err)
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle.go b/modules/light-clients/07-tendermint/types/proposal_handle.go
index 780ffdf9..6364aa8f 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle.go
@@ -13,8 +13,8 @@ import (
// CheckSubstituteAndUpdateState will try to update the client with the state of the
// substitute if and only if the proposal passes and one of the following conditions are
// satisfied:
-// 1) AllowUpdateAfterMisbehaviour and IsFrozen() = true
-// 2) AllowUpdateAfterExpiry=true and Expire(ctx.BlockTime) = true
+// 1) AllowUpdateAfterMisbehaviour and Status() == Frozen
+// 2) AllowUpdateAfterExpiry=true and Status() == Expired
//
// The following must always be true:
// - The substitute client is the same type as the subject client
@@ -23,12 +23,9 @@ import (
// In case 1) before updating the client, the client will be unfrozen by resetting
// the FrozenHeight to the zero Height. If a client is frozen and AllowUpdateAfterMisbehaviour
// is set to true, the client will be unexpired even if AllowUpdateAfterExpiry is set to false.
-// Note, that even if the subject is updated to the state of the substitute, an error may be
-// returned if the updated client state is invalid or the client is expired.
func (cs ClientState) CheckSubstituteAndUpdateState(
ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore,
substituteClientStore sdk.KVStore, substituteClient exported.ClientState,
- initialHeight exported.Height,
) (exported.ClientState, error) {
substituteClientState, ok := substituteClient.(*ClientState)
if !ok {
@@ -37,31 +34,13 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
)
}
- // substitute clients are not allowed to be upgraded during the voting period
- // If an upgrade passes before the subject client has been updated, a new proposal must be created
- // with an initial height that contains the new revision number.
- if substituteClientState.GetLatestHeight().GetRevisionNumber() != initialHeight.GetRevisionNumber() {
- return nil, sdkerrors.Wrapf(
- clienttypes.ErrInvalidHeight, "substitute client revision number must equal initial height revision number (%d != %d)",
- substituteClientState.GetLatestHeight().GetRevisionNumber(), initialHeight.GetRevisionNumber(),
- )
- }
-
if !IsMatchingClientState(cs, *substituteClientState) {
return nil, sdkerrors.Wrap(clienttypes.ErrInvalidSubstitute, "subject client state does not match substitute client state")
}
- // get consensus state corresponding to client state to check if the client is expired
- consensusState, err := GetConsensusState(subjectClientStore, cdc, cs.GetLatestHeight())
- if err != nil {
- return nil, sdkerrors.Wrapf(
- err, "unexpected error: could not get consensus state from clientstore at height: %d", cs.GetLatestHeight(),
- )
- }
-
- switch {
+ switch cs.Status(ctx, subjectClientStore, cdc) {
- case !cs.FrozenHeight.IsZero():
+ case exported.Frozen:
if !cs.AllowUpdateAfterMisbehaviour {
return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unfrozen")
}
@@ -69,7 +48,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
// unfreeze the client
cs.FrozenHeight = clienttypes.ZeroHeight()
- case cs.IsExpired(consensusState.Timestamp, ctx.BlockTime()):
+ case exported.Expired:
if !cs.AllowUpdateAfterExpiry {
return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unexpired")
}
@@ -80,37 +59,33 @@ func (cs ClientState) CheckSubstituteAndUpdateState(
// copy consensus states and processed time from substitute to subject
// starting from initial height and ending on the latest height (inclusive)
- for i := initialHeight.GetRevisionHeight(); i <= substituteClientState.GetLatestHeight().GetRevisionHeight(); i++ {
- height := clienttypes.NewHeight(substituteClientState.GetLatestHeight().GetRevisionNumber(), i)
-
- consensusState, err := GetConsensusState(substituteClientStore, cdc, height)
- if err != nil {
- // not all consensus states will be filled in
- continue
- }
- SetConsensusState(subjectClientStore, cdc, consensusState, height)
+ height := substituteClientState.GetLatestHeight()
- // set metadata for this consensus state
- setConsensusMetadata(ctx, subjectClientStore, height)
+ consensusState, err := GetConsensusState(substituteClientStore, cdc, height)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "unable to retrieve latest consensus state for substitute client")
}
- cs.LatestHeight = substituteClientState.LatestHeight
+ SetConsensusState(subjectClientStore, cdc, consensusState, height)
- // validate the updated client and ensure it isn't expired
- if err := cs.Validate(); err != nil {
- return nil, sdkerrors.Wrap(err, "unexpected error: updated subject client state is invalid")
+ // set metadata stored for the substitute consensus state
+ processedHeight, found := GetProcessedHeight(substituteClientStore, height)
+ if !found {
+ return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "unable to retrieve processed height for substitute client latest height")
}
- latestConsensusState, err := GetConsensusState(subjectClientStore, cdc, cs.GetLatestHeight())
- if err != nil {
- return nil, sdkerrors.Wrapf(
- err, "unexpected error: could not get consensus state for updated subject client from clientstore at height: %d", cs.GetLatestHeight(),
- )
+ processedTime, found := GetProcessedTime(substituteClientStore, height)
+ if !found {
+ return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "unable to retrieve processed time for substitute client latest height")
}
- if cs.IsExpired(latestConsensusState.Timestamp, ctx.BlockTime()) {
- return nil, sdkerrors.Wrap(clienttypes.ErrInvalidClient, "updated subject client is expired")
- }
+ setConsensusMetadataWithValues(subjectClientStore, height, processedHeight, processedTime)
+
+ cs.LatestHeight = substituteClientState.LatestHeight
+ cs.ChainId = substituteClientState.ChainId
+
+ // no validation is necessary since the substitute is verified to be Active
+ // in 02-client.
return &cs, nil
}
diff --git a/modules/light-clients/07-tendermint/types/proposal_handle_test.go b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
index ce099178..e4236424 100644
--- a/modules/light-clients/07-tendermint/types/proposal_handle_test.go
+++ b/modules/light-clients/07-tendermint/types/proposal_handle_test.go
@@ -16,7 +16,6 @@ var (
func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
var (
substituteClientState exported.ClientState
- initialHeight clienttypes.Height
substitutePath *ibctesting.Path
)
testCases := []struct {
@@ -28,11 +27,6 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
substituteClientState = ibctesting.NewSolomachine(suite.T(), suite.cdc, "solo machine", "", 1).ClientState()
},
},
- {
- "initial height and substitute revision numbers do not match", func() {
- initialHeight = clienttypes.NewHeight(substituteClientState.GetLatestHeight().GetRevisionNumber()+1, 1)
- },
- },
{
"non-matching substitute", func() {
suite.coordinator.SetupClients(substitutePath)
@@ -43,49 +37,6 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
tmClientState.ChainId = tmClientState.ChainId + "different chain"
},
},
- {
- "updated client is invalid - revision height is zero", func() {
- suite.coordinator.SetupClients(substitutePath)
- substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
- tmClientState, ok := substituteClientState.(*types.ClientState)
- suite.Require().True(ok)
- // match subject
- tmClientState.AllowUpdateAfterMisbehaviour = true
- tmClientState.AllowUpdateAfterExpiry = true
-
- // will occur. This case should never occur (caught by upstream checks)
- initialHeight = clienttypes.NewHeight(5, 0)
- tmClientState.LatestHeight = clienttypes.NewHeight(5, 0)
- },
- },
- {
- "updated client is expired", func() {
- suite.coordinator.SetupClients(substitutePath)
- substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
- tmClientState, ok := substituteClientState.(*types.ClientState)
- suite.Require().True(ok)
- initialHeight = tmClientState.LatestHeight
-
- // match subject
- tmClientState.AllowUpdateAfterMisbehaviour = true
- tmClientState.AllowUpdateAfterExpiry = true
- suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, tmClientState)
-
- // update substitute a few times
- err := substitutePath.EndpointA.UpdateClient()
- suite.Require().NoError(err)
- substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID)
-
- err = substitutePath.EndpointA.UpdateClient()
- suite.Require().NoError(err)
-
- // expire client
- suite.coordinator.IncrementTimeBy(tmClientState.TrustingPeriod)
- suite.coordinator.CommitBlock(suite.chainA, suite.chainB)
-
- substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID)
- },
- },
}
for _, tc := range testCases {
@@ -111,7 +62,7 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() {
subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID)
substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID)
- updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight)
+ updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState)
suite.Require().Error(err)
suite.Require().Nil(updatedClient)
})
@@ -300,8 +251,6 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
substituteClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour
suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, substituteClientState)
- initialHeight := substituteClientState.GetLatestHeight()
-
// update substitute a few times
for i := 0; i < 3; i++ {
err := substitutePath.EndpointA.UpdateClient()
@@ -313,13 +262,43 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() {
// get updated substitute
substituteClientState = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*types.ClientState)
+ // test that subject gets updated chain-id
+ newChainID := "new-chain-id"
+ substituteClientState.ChainId = newChainID
+
subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID)
substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID)
- updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight)
+
+ expectedConsState := substitutePath.EndpointA.GetConsensusState(substituteClientState.GetLatestHeight())
+ expectedProcessedTime, found := types.GetProcessedTime(substituteClientStore, substituteClientState.GetLatestHeight())
+ suite.Require().True(found)
+ expectedProcessedHeight, found := types.GetProcessedTime(substituteClientStore, substituteClientState.GetLatestHeight())
+ suite.Require().True(found)
+ expectedIterationKey := types.GetIterationKey(substituteClientStore, substituteClientState.GetLatestHeight())
+
+ updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState)
if tc.expPass {
suite.Require().NoError(err)
suite.Require().Equal(clienttypes.ZeroHeight(), updatedClient.(*types.ClientState).FrozenHeight)
+
+ subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID)
+
+ // check that the correct consensus state was copied over
+ suite.Require().Equal(substituteClientState.GetLatestHeight(), updatedClient.GetLatestHeight())
+ subjectConsState := subjectPath.EndpointA.GetConsensusState(updatedClient.GetLatestHeight())
+ subjectProcessedTime, found := types.GetProcessedTime(subjectClientStore, updatedClient.GetLatestHeight())
+ suite.Require().True(found)
+ subjectProcessedHeight, found := types.GetProcessedTime(substituteClientStore, updatedClient.GetLatestHeight())
+ suite.Require().True(found)
+ subjectIterationKey := types.GetIterationKey(substituteClientStore, updatedClient.GetLatestHeight())
+
+ suite.Require().Equal(expectedConsState, subjectConsState)
+ suite.Require().Equal(expectedProcessedTime, subjectProcessedTime)
+ suite.Require().Equal(expectedProcessedHeight, subjectProcessedHeight)
+ suite.Require().Equal(expectedIterationKey, subjectIterationKey)
+
+ suite.Require().Equal(newChainID, updatedClient.(*types.ClientState).ChainId)
} else {
suite.Require().Error(err)
suite.Require().Nil(updatedClient)
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index ba21e81b..e86c8144 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -295,8 +295,17 @@ func bigEndianHeightBytes(height exported.Height) []byte {
// client state and consensus state will be set by client keeper
// set iteration key to provide ability for efficient ordered iteration of consensus states.
func setConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, height exported.Height) {
- SetProcessedTime(clientStore, height, uint64(ctx.BlockTime().UnixNano()))
- SetProcessedHeight(clientStore, height, clienttypes.GetSelfHeight(ctx))
+ setConsensusMetadataWithValues(clientStore, height, clienttypes.GetSelfHeight(ctx), uint64(ctx.BlockTime().UnixNano()))
+}
+
+// setConsensusMetadataWithValues sets the consensus metadata with the provided values
+func setConsensusMetadataWithValues(
+ clientStore sdk.KVStore, height,
+ processedHeight exported.Height,
+ processedTime uint64,
+) {
+ SetProcessedTime(clientStore, height, processedTime)
+ SetProcessedHeight(clientStore, height, processedHeight)
SetIterationKey(clientStore, height)
}
diff --git a/modules/light-clients/09-localhost/types/client_state.go b/modules/light-clients/09-localhost/types/client_state.go
index a0615b8f..4fe00390 100644
--- a/modules/light-clients/09-localhost/types/client_state.go
+++ b/modules/light-clients/09-localhost/types/client_state.go
@@ -107,7 +107,7 @@ func (cs ClientState) CheckMisbehaviourAndUpdateState(
// proposals.
func (cs ClientState) CheckSubstituteAndUpdateState(
ctx sdk.Context, _ codec.BinaryCodec, _, _ sdk.KVStore,
- _ exported.ClientState, _ exported.Height,
+ _ exported.ClientState,
) (exported.ClientState, error) {
return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "cannot update localhost client with a proposal")
}
diff --git a/modules/light-clients/09-localhost/types/client_state_test.go b/modules/light-clients/09-localhost/types/client_state_test.go
index e2dbe89b..46691a52 100644
--- a/modules/light-clients/09-localhost/types/client_state_test.go
+++ b/modules/light-clients/09-localhost/types/client_state_test.go
@@ -178,7 +178,7 @@ func (suite *LocalhostTestSuite) TestMisbehaviourAndUpdateState() {
func (suite *LocalhostTestSuite) TestProposedHeaderAndUpdateState() {
clientState := types.NewClientState("chainID", clientHeight)
- cs, err := clientState.CheckSubstituteAndUpdateState(suite.ctx, nil, nil, nil, nil, nil)
+ cs, err := clientState.CheckSubstituteAndUpdateState(suite.ctx, nil, nil, nil, nil)
suite.Require().Error(err)
suite.Require().Nil(cs)
}
diff --git a/proto/ibc/core/client/v1/client.proto b/proto/ibc/core/client/v1/client.proto
index a4a2cc85..88a6c343 100644
--- a/proto/ibc/core/client/v1/client.proto
+++ b/proto/ibc/core/client/v1/client.proto
@@ -37,11 +37,9 @@ message ClientConsensusStates {
}
// ClientUpdateProposal is a governance proposal. If it passes, the substitute
-// client's consensus states starting from the 'initial height' are copied over
-// to the subjects client state. The proposal handler may fail if the subject
-// and the substitute do not match in client and chain parameters (with
-// exception to latest height, frozen height, and chain-id). The updated client
-// must also be valid (cannot be expired).
+// client's latest consensus state is copied over to the subject client. The proposal
+// handler may fail if the subject and the substitute do not match in client and
+// chain parameters (with exception to latest height, frozen height, and chain-id).
message ClientUpdateProposal {
option (gogoproto.goproto_getters) = false;
// the title of the update proposal
@@ -52,10 +50,7 @@ message ClientUpdateProposal {
string subject_client_id = 3 [(gogoproto.moretags) = "yaml:\"subject_client_id\""];
// the substitute client identifier for the client standing in for the subject
// client
- string substitute_client_id = 4 [(gogoproto.moretags) = "yaml:\"susbtitute_client_id\""];
- // the intital height to copy consensus states from the substitute to the
- // subject
- Height initial_height = 5 [(gogoproto.moretags) = "yaml:\"initial_height\"", (gogoproto.nullable) = false];
+ string substitute_client_id = 4 [(gogoproto.moretags) = "yaml:\"substitute_client_id\""];
}
// UpgradeProposal is a gov Content type for initiating an IBC breaking
From 665287d12f46c030851d5acb2f788ac1c3519e93 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 27 May 2021 13:04:04 +0200
Subject: [PATCH 070/393] rename node-state to self-consensus-state (#196)
---
CHANGELOG.md | 3 +++
modules/core/02-client/client/cli/cli.go | 2 +-
modules/core/02-client/client/cli/query.go | 15 +++++++--------
modules/core/02-client/client/utils/utils.go | 4 ++--
4 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5f7d0054..ff768d80 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -77,6 +77,9 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Return early in case there's a duplicate update call to save Gas.
+### Client Breaking Changes
+
+* (02-client/cli) [\#196](https://github.com/cosmos/ibc-go/pull/196) Rename `node-state` cli command to `self-consensus-state`.
## IBC in the Cosmos SDK Repository
diff --git a/modules/core/02-client/client/cli/cli.go b/modules/core/02-client/client/cli/cli.go
index 9146e598..eade59ba 100644
--- a/modules/core/02-client/client/cli/cli.go
+++ b/modules/core/02-client/client/cli/cli.go
@@ -23,7 +23,7 @@ func GetQueryCmd() *cobra.Command {
GetCmdQueryConsensusStates(),
GetCmdQueryConsensusState(),
GetCmdQueryHeader(),
- GetCmdNodeConsensusState(),
+ GetCmdSelfConsensusState(),
GetCmdParams(),
)
diff --git a/modules/core/02-client/client/cli/query.go b/modules/core/02-client/client/cli/query.go
index 9f32383d..5ed7c049 100644
--- a/modules/core/02-client/client/cli/query.go
+++ b/modules/core/02-client/client/cli/query.go
@@ -206,21 +206,20 @@ func GetCmdQueryHeader() *cobra.Command {
return cmd
}
-// GetCmdNodeConsensusState defines the command to query the latest consensus state of a node
-// The result is feed to client creation
-func GetCmdNodeConsensusState() *cobra.Command {
+// GetCmdSelfConsensusState defines the command to query the self consensus state of a chain
+func GetCmdSelfConsensusState() *cobra.Command {
cmd := &cobra.Command{
- Use: "node-state",
- Short: "Query a node consensus state",
- Long: "Query a node consensus state. This result is feed to the client creation transaction.",
- Example: fmt.Sprintf("%s query %s %s node-state", version.AppName, host.ModuleName, types.SubModuleName),
+ Use: "self-consensus-state",
+ Short: "Query the self consensus state for this chain",
+ Long: "Query the self consensus state for this chain. This result may be used for verifying IBC clients representing this chain which are hosted on counterparty chains.",
+ Example: fmt.Sprintf("%s query %s %s self-consensus-state", version.AppName, host.ModuleName, types.SubModuleName),
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
- state, _, err := utils.QueryNodeConsensusState(clientCtx)
+ state, _, err := utils.QuerySelfConsensusState(clientCtx)
if err != nil {
return err
}
diff --git a/modules/core/02-client/client/utils/utils.go b/modules/core/02-client/client/utils/utils.go
index dfbbefcb..7f68db68 100644
--- a/modules/core/02-client/client/utils/utils.go
+++ b/modules/core/02-client/client/utils/utils.go
@@ -165,9 +165,9 @@ func QueryTendermintHeader(clientCtx client.Context) (ibctmtypes.Header, int64,
return header, height, nil
}
-// QueryNodeConsensusState takes a client context and returns the appropriate
+// QuerySelfConsensusState takes a client context and returns the appropriate
// tendermint consensus state
-func QueryNodeConsensusState(clientCtx client.Context) (*ibctmtypes.ConsensusState, int64, error) {
+func QuerySelfConsensusState(clientCtx client.Context) (*ibctmtypes.ConsensusState, int64, error) {
node, err := clientCtx.GetNode()
if err != nil {
return &ibctmtypes.ConsensusState{}, 0, err
From 79659be7bf5221e4202d9c2a2fd3f2fe8ab8cee6 Mon Sep 17 00:00:00 2001
From: Ethan Buchman
Date: Thu, 27 May 2021 07:43:32 -0400
Subject: [PATCH 071/393] New cmd: query ibc-transfer escrow-address (#198)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* New cmd: query ibc-transfer escrow-address
* Update modules/apps/transfer/client/cli/query.go
* fix build, add changelog
Co-authored-by: Aditya
Co-authored-by: Colin Axnér <25233464+colin-axner@users.noreply.github.com>
---
CHANGELOG.md | 4 ++++
modules/apps/transfer/client/cli/cli.go | 1 +
modules/apps/transfer/client/cli/query.go | 25 +++++++++++++++++++++++
3 files changed, 30 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ff768d80..88da6d05 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -77,6 +77,10 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (modules/light-clients/07-tendermint) [\#125](https://github.com/cosmos/ibc-go/pull/125) Implement efficient iteration of consensus states and pruning of earliest expired consensus state on UpdateClient.
* (modules/light-clients/07-tendermint) [\#141](https://github.com/cosmos/ibc-go/pull/141) Return early in case there's a duplicate update call to save Gas.
+### Features
+
+* [\#198](https://github.com/cosmos/ibc-go/pull/198) New CLI command `query ibc-transfer escrow-address ` to get the escrow address for a channel; can be used to then query balance of escrowed tokens
+
### Client Breaking Changes
* (02-client/cli) [\#196](https://github.com/cosmos/ibc-go/pull/196) Rename `node-state` cli command to `self-consensus-state`.
diff --git a/modules/apps/transfer/client/cli/cli.go b/modules/apps/transfer/client/cli/cli.go
index d3ca8341..643af504 100644
--- a/modules/apps/transfer/client/cli/cli.go
+++ b/modules/apps/transfer/client/cli/cli.go
@@ -19,6 +19,7 @@ func GetQueryCmd() *cobra.Command {
GetCmdQueryDenomTrace(),
GetCmdQueryDenomTraces(),
GetCmdParams(),
+ GetCmdQueryEscrowAddress(),
)
return queryCmd
diff --git a/modules/apps/transfer/client/cli/query.go b/modules/apps/transfer/client/cli/query.go
index 6dd2e6cf..3bf09c56 100644
--- a/modules/apps/transfer/client/cli/query.go
+++ b/modules/apps/transfer/client/cli/query.go
@@ -106,3 +106,28 @@ func GetCmdParams() *cobra.Command {
return cmd
}
+
+// GetCmdParams returns the command handler for ibc-transfer parameter querying.
+func GetCmdQueryEscrowAddress() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "escrow-address",
+ Short: "Get the escrow address for a channel",
+ Long: "Get the escrow address for a channel",
+ Args: cobra.ExactArgs(2),
+ Example: fmt.Sprintf("%s query ibc-transfer escrow-address [port] [channel-id]", version.AppName),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clientCtx, err := client.GetClientQueryContext(cmd)
+ if err != nil {
+ return err
+ }
+ port := args[0]
+ channel := args[1]
+ addr := types.GetEscrowAddress(port, channel)
+ return clientCtx.PrintString(fmt.Sprintf("%s\n", addr.String()))
+ },
+ }
+
+ flags.AddQueryFlagsToCmd(cmd)
+
+ return cmd
+}
From 4bb1960fe4a1fdbf150cc572e4ca081ba0b85a42 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 28 May 2021 11:28:42 +0200
Subject: [PATCH 072/393] Bump actions/cache from 2.1.5 to 2.1.6 (#201)
Bumps [actions/cache](https://github.com/actions/cache) from 2.1.5 to 2.1.6.
- [Release notes](https://github.com/actions/cache/releases)
- [Commits](https://github.com/actions/cache/compare/v2.1.5...v2.1.6)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index bc79999c..5418889c 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -26,7 +26,7 @@ jobs:
- name: install tparse
run: |
export GO111MODULE="on" && go get github.com/mfridman/tparse@v0.8.3
- - uses: actions/cache@v2.1.5
+ - uses: actions/cache@v2.1.6
with:
path: ~/go/bin
key: ${{ runner.os }}-go-tparse-binary
From 4db5fd11cc83c0324261e4e7a6ba63dab4261ea7 Mon Sep 17 00:00:00 2001
From: Ethan Frey
Date: Mon, 31 May 2021 17:28:14 +0200
Subject: [PATCH 073/393] Run go fmt ./... (#207)
---
modules/apps/transfer/keeper/keeper.go | 2 +-
modules/apps/transfer/simulation/decoder_test.go | 2 +-
modules/core/02-client/simulation/decoder_test.go | 2 +-
modules/core/03-connection/simulation/decoder_test.go | 2 +-
modules/core/04-channel/simulation/decoder_test.go | 2 +-
modules/core/05-port/keeper/keeper_test.go | 2 +-
modules/core/simulation/decoder_test.go | 2 +-
modules/light-clients/09-localhost/types/localhost_test.go | 2 +-
testing/simapp/simd/cmd/cmd_test.go | 2 +-
9 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/modules/apps/transfer/keeper/keeper.go b/modules/apps/transfer/keeper/keeper.go
index 2ec1b68f..08c75a26 100644
--- a/modules/apps/transfer/keeper/keeper.go
+++ b/modules/apps/transfer/keeper/keeper.go
@@ -11,10 +11,10 @@ import (
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
+ paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
"github.com/cosmos/ibc-go/modules/apps/transfer/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
- paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
// Keeper defines the IBC fungible transfer keeper
diff --git a/modules/apps/transfer/simulation/decoder_test.go b/modules/apps/transfer/simulation/decoder_test.go
index b4198136..08885a34 100644
--- a/modules/apps/transfer/simulation/decoder_test.go
+++ b/modules/apps/transfer/simulation/decoder_test.go
@@ -6,10 +6,10 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/modules/apps/transfer/simulation"
"github.com/cosmos/ibc-go/modules/apps/transfer/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/modules/core/02-client/simulation/decoder_test.go b/modules/core/02-client/simulation/decoder_test.go
index 4903fefe..f1c0ca51 100644
--- a/modules/core/02-client/simulation/decoder_test.go
+++ b/modules/core/02-client/simulation/decoder_test.go
@@ -7,12 +7,12 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/modules/core/02-client/simulation"
"github.com/cosmos/ibc-go/modules/core/02-client/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/modules/core/03-connection/simulation/decoder_test.go b/modules/core/03-connection/simulation/decoder_test.go
index 981da400..177f43a9 100644
--- a/modules/core/03-connection/simulation/decoder_test.go
+++ b/modules/core/03-connection/simulation/decoder_test.go
@@ -6,11 +6,11 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/modules/core/03-connection/simulation"
"github.com/cosmos/ibc-go/modules/core/03-connection/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/modules/core/04-channel/simulation/decoder_test.go b/modules/core/04-channel/simulation/decoder_test.go
index 0f6f83a9..4beeb40a 100644
--- a/modules/core/04-channel/simulation/decoder_test.go
+++ b/modules/core/04-channel/simulation/decoder_test.go
@@ -6,12 +6,12 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/cosmos/ibc-go/modules/core/04-channel/simulation"
"github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/modules/core/05-port/keeper/keeper_test.go b/modules/core/05-port/keeper/keeper_test.go
index 75d1064f..bdca5bef 100644
--- a/modules/core/05-port/keeper/keeper_test.go
+++ b/modules/core/05-port/keeper/keeper_test.go
@@ -7,9 +7,9 @@ import (
"github.com/stretchr/testify/suite"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
- "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/ibc-go/modules/core/05-port/keeper"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
var (
diff --git a/modules/core/simulation/decoder_test.go b/modules/core/simulation/decoder_test.go
index af023fed..4a24a05c 100644
--- a/modules/core/simulation/decoder_test.go
+++ b/modules/core/simulation/decoder_test.go
@@ -6,7 +6,6 @@ import (
"github.com/stretchr/testify/require"
- "github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/cosmos-sdk/types/kv"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
@@ -14,6 +13,7 @@ import (
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/simulation"
ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
func TestDecodeStore(t *testing.T) {
diff --git a/modules/light-clients/09-localhost/types/localhost_test.go b/modules/light-clients/09-localhost/types/localhost_test.go
index 46c29daa..4baa8f20 100644
--- a/modules/light-clients/09-localhost/types/localhost_test.go
+++ b/modules/light-clients/09-localhost/types/localhost_test.go
@@ -7,10 +7,10 @@ import (
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
- "github.com/cosmos/ibc-go/testing/simapp"
sdk "github.com/cosmos/cosmos-sdk/types"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
"github.com/cosmos/ibc-go/modules/core/exported"
+ "github.com/cosmos/ibc-go/testing/simapp"
)
const (
diff --git a/testing/simapp/simd/cmd/cmd_test.go b/testing/simapp/simd/cmd/cmd_test.go
index 363d06d8..1ae137a6 100644
--- a/testing/simapp/simd/cmd/cmd_test.go
+++ b/testing/simapp/simd/cmd/cmd_test.go
@@ -7,9 +7,9 @@ import (
"github.com/stretchr/testify/require"
svrcmd "github.com/cosmos/cosmos-sdk/server/cmd"
+ "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/ibc-go/testing/simapp/simd/cmd"
- "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
)
func TestInitCmd(t *testing.T) {
From 832044782f10fbd4ec720da4f4950095c52a48f9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 3 Jun 2021 08:36:24 +0200
Subject: [PATCH 074/393] improve error messages (#203)
* improve error messages
* Update modules/light-clients/07-tendermint/types/client_state.go
Co-authored-by: Aditya
Co-authored-by: Aditya
---
modules/core/04-channel/keeper/packet.go | 2 +-
modules/core/04-channel/keeper/timeout.go | 2 +-
modules/light-clients/07-tendermint/types/client_state.go | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
index f917353a..908a0809 100644
--- a/modules/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -471,7 +471,7 @@ func (k Keeper) AcknowledgePacket(
commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
if len(commitment) == 0 {
- return sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "packet with sequence (%d) has been acknowledged, or timed out. In rare cases the packet was never sent or the packet sequence is incorrect", packet.GetSequence())
+ return sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "packet with sequence (%d) has been acknowledged, or timed out. In rare cases, the packet referenced was never sent, likely due to the relayer being misconfigured", packet.GetSequence())
}
packetCommitment := types.CommitPacket(k.cdc, packet)
diff --git a/modules/core/04-channel/keeper/timeout.go b/modules/core/04-channel/keeper/timeout.go
index 08d10f83..caf4e037 100644
--- a/modules/core/04-channel/keeper/timeout.go
+++ b/modules/core/04-channel/keeper/timeout.go
@@ -81,7 +81,7 @@ func (k Keeper) TimeoutPacket(
commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())
if len(commitment) == 0 {
- return sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "packet with sequence (%d) has been acknowledged or timed out. In rare cases the packet was never sent or the packet sequence is incorrect", packet.GetSequence())
+ return sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "packet with sequence (%d) has been acknowledged or timed out. In rare cases, the packet referenced was never sent, likely due to the relayer being misconfigured", packet.GetSequence())
}
packetCommitment := types.CommitPacket(k.cdc, packet)
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 4884c679..996a2f3b 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -568,7 +568,7 @@ func produceVerificationArgs(
consensusState, err = GetConsensusState(store, cdc, height)
if err != nil {
- return commitmenttypes.MerkleProof{}, nil, err
+ return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(err, "please ensure the proof was constructed against a height that exists on the client")
}
return merkleProof, consensusState, nil
From 7a44bf728adfeb30aebce99b2fc17c668a7f7052 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 8 Jun 2021 13:25:46 +0200
Subject: [PATCH 075/393] fix genesis exporting of tendermint metadata (#210)
* fix genesis exporting of metadata
* changelog
* Update CHANGELOG.md
---
CHANGELOG.md | 1 +
.../07-tendermint/types/genesis.go | 4 +-
.../07-tendermint/types/genesis_test.go | 93 ++++++++++++++-----
.../07-tendermint/types/store.go | 23 ++++-
4 files changed, 95 insertions(+), 26 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 88da6d05..a559bed4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (07-tendermint) [#\210](https://github.com/cosmos/ibc-go/pull/210) Export all consensus metadata on genesis restarts for tendermint clients.
* (core) [\#200](https://github.com/cosmos/ibc-go/pull/200) Fixes incorrect export of IBC identifier sequences. Previously, the next identifier sequence for clients/connections/channels was not set during genesis export. This resulted in the next identifiers being generated on the new chain to reuse old identifiers (the sequences began again from 0).
* (02-client) [\#192](https://github.com/cosmos/ibc-go/pull/192) Fix IBC `query ibc client header` cli command. Support historical queries for query header/node-state commands.
* (modules/light-clients/06-solomachine) [\#153](https://github.com/cosmos/ibc-go/pull/153) Fix solo machine proof height sequence mismatch bug.
diff --git a/modules/light-clients/07-tendermint/types/genesis.go b/modules/light-clients/07-tendermint/types/genesis.go
index 9661b53e..64429673 100644
--- a/modules/light-clients/07-tendermint/types/genesis.go
+++ b/modules/light-clients/07-tendermint/types/genesis.go
@@ -6,11 +6,11 @@ import (
"github.com/cosmos/ibc-go/modules/core/exported"
)
-// ExportMetadata exports all the processed times in the client store so they can be included in clients genesis
+// ExportMetadata exports all the consensus metadata in the client store so they can be included in clients genesis
// and imported by a ClientKeeper
func (cs ClientState) ExportMetadata(store sdk.KVStore) []exported.GenesisMetadata {
gm := make([]exported.GenesisMetadata, 0)
- IterateProcessedTime(store, func(key, val []byte) bool {
+ IterateConsensusMetadata(store, func(key, val []byte) bool {
gm = append(gm, clienttypes.NewGenesisMetadata(key, val))
return false
})
diff --git a/modules/light-clients/07-tendermint/types/genesis_test.go b/modules/light-clients/07-tendermint/types/genesis_test.go
index 72b876e0..3a0038c0 100644
--- a/modules/light-clients/07-tendermint/types/genesis_test.go
+++ b/modules/light-clients/07-tendermint/types/genesis_test.go
@@ -1,38 +1,89 @@
package types_test
import (
- "time"
-
sdk "github.com/cosmos/cosmos-sdk/types"
+
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
- commitmenttypes "github.com/cosmos/ibc-go/modules/core/23-commitment/types"
"github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
)
+// expected export ordering:
+// processed height and processed time per height
+// then all iteration keys
func (suite *TendermintTestSuite) TestExportMetadata() {
- clientState := types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
- suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), "clientA", clientState)
+ // test intializing client and exporting metadata
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path)
+ clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID)
+ clientState := path.EndpointA.GetClientState()
+ height := clientState.GetLatestHeight()
+
+ initIteration := types.GetIterationKey(clientStore, height)
+ suite.Require().NotEqual(0, len(initIteration))
+ initProcessedTime, found := types.GetProcessedTime(clientStore, height)
+ suite.Require().True(found)
+ initProcessedHeight, found := types.GetProcessedHeight(clientStore, height)
+ suite.Require().True(found)
+
+ gm := clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID))
+ suite.Require().NotNil(gm, "client with metadata returned nil exported metadata")
+ suite.Require().Len(gm, 3, "exported metadata has unexpected length")
+
+ suite.Require().Equal(types.ProcessedHeightKey(height), gm[0].GetKey(), "metadata has unexpected key")
+ actualProcessedHeight, err := clienttypes.ParseHeight(string(gm[0].GetValue()))
+ suite.Require().NoError(err)
+ suite.Require().Equal(initProcessedHeight, actualProcessedHeight, "metadata has unexpected value")
+
+ suite.Require().Equal(types.ProcessedTimeKey(height), gm[1].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(initProcessedTime, sdk.BigEndianToUint64(gm[1].GetValue()), "metadata has unexpected value")
- gm := clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
- suite.Require().Nil(gm, "client with no metadata returned non-nil exported metadata")
+ suite.Require().Equal(types.IterationKey(height), gm[2].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(initIteration, gm[2].GetValue(), "metadata has unexpected value")
- clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA")
+ // test updating client and exporting metadata
+ err = path.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
- // set some processed times
- timestamp1 := uint64(time.Now().UnixNano())
- timestamp2 := uint64(time.Now().Add(time.Minute).UnixNano())
- timestampBz1 := sdk.Uint64ToBigEndian(timestamp1)
- timestampBz2 := sdk.Uint64ToBigEndian(timestamp2)
- types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 1), timestamp1)
- types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 2), timestamp2)
+ clientState = path.EndpointA.GetClientState()
+ updateHeight := clientState.GetLatestHeight()
- gm = clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA"))
+ iteration := types.GetIterationKey(clientStore, updateHeight)
+ suite.Require().NotEqual(0, len(initIteration))
+ processedTime, found := types.GetProcessedTime(clientStore, updateHeight)
+ suite.Require().True(found)
+ processedHeight, found := types.GetProcessedHeight(clientStore, updateHeight)
+ suite.Require().True(found)
+
+ gm = clientState.ExportMetadata(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID))
suite.Require().NotNil(gm, "client with metadata returned nil exported metadata")
- suite.Require().Len(gm, 2, "exported metadata has unexpected length")
+ suite.Require().Len(gm, 6, "exported metadata has unexpected length")
+
+ // expected ordering:
+ // initProcessedHeight, initProcessedTime, processedHeight, processedTime, initIteration, iteration
+
+ // check init processed height and time
+ suite.Require().Equal(types.ProcessedHeightKey(height), gm[0].GetKey(), "metadata has unexpected key")
+ actualProcessedHeight, err = clienttypes.ParseHeight(string(gm[0].GetValue()))
+ suite.Require().NoError(err)
+ suite.Require().Equal(initProcessedHeight, actualProcessedHeight, "metadata has unexpected value")
+
+ suite.Require().Equal(types.ProcessedTimeKey(height), gm[1].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(initProcessedTime, sdk.BigEndianToUint64(gm[1].GetValue()), "metadata has unexpected value")
+
+ // check processed height and time after update
+ suite.Require().Equal(types.ProcessedHeightKey(updateHeight), gm[2].GetKey(), "metadata has unexpected key")
+ actualProcessedHeight, err = clienttypes.ParseHeight(string(gm[2].GetValue()))
+ suite.Require().NoError(err)
+ suite.Require().Equal(processedHeight, actualProcessedHeight, "metadata has unexpected value")
+
+ suite.Require().Equal(types.ProcessedTimeKey(updateHeight), gm[3].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(processedTime, sdk.BigEndianToUint64(gm[3].GetValue()), "metadata has unexpected value")
- suite.Require().Equal(types.ProcessedTimeKey(clienttypes.NewHeight(0, 1)), gm[0].GetKey(), "metadata has unexpected key")
- suite.Require().Equal(timestampBz1, gm[0].GetValue(), "metadata has unexpected value")
+ // check iteration keys
+ suite.Require().Equal(types.IterationKey(height), gm[4].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(initIteration, gm[4].GetValue(), "metadata has unexpected value")
- suite.Require().Equal(types.ProcessedTimeKey(clienttypes.NewHeight(0, 2)), gm[1].GetKey(), "metadata has unexpected key")
- suite.Require().Equal(timestampBz2, gm[1].GetValue(), "metadata has unexpected value")
+ suite.Require().Equal(types.IterationKey(updateHeight), gm[5].GetKey(), "metadata has unexpected key")
+ suite.Require().Equal(iteration, gm[5].GetValue(), "metadata has unexpected value")
}
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index e86c8144..6e1d63ec 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -82,20 +82,37 @@ func deleteConsensusState(clientStore sdk.KVStore, height exported.Height) {
clientStore.Delete(key)
}
-// IterateProcessedTime iterates through the prefix store and applies the callback.
+// IterateConsensusMetadata iterates through the prefix store and applies the callback.
// If the cb returns true, then iterator will close and stop.
-func IterateProcessedTime(store sdk.KVStore, cb func(key, val []byte) bool) {
+func IterateConsensusMetadata(store sdk.KVStore, cb func(key, val []byte) bool) {
iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConsensusStatePrefix))
+ // iterate over processed time and processed height
defer iterator.Close()
for ; iterator.Valid(); iterator.Next() {
keySplit := strings.Split(string(iterator.Key()), "/")
// processed time key in prefix store has format: "consensusState//processedTime"
- if len(keySplit) != 3 || keySplit[2] != "processedTime" {
+ if len(keySplit) != 3 {
// ignore all consensus state keys
continue
+
+ }
+
+ if keySplit[2] != "processedTime" && keySplit[2] != "processedHeight" {
+ // only perform callback on consensus metadata
+ continue
+ }
+
+ if cb(iterator.Key(), iterator.Value()) {
+ break
}
+ }
+ // iterate over iteration keys
+ iterator = sdk.KVStorePrefixIterator(store, []byte(KeyIterateConsensusStatePrefix))
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
if cb(iterator.Key(), iterator.Value()) {
break
}
From 605a865c5ccb8d6c52b86d07b5c4908947c23935 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 9 Jun 2021 10:58:16 +0200
Subject: [PATCH 076/393] disable defensive timestamp check for solo machines
in SendPacket (#214)
* disable defensive timestamp check for solo machines
* add changelog
---
CHANGELOG.md | 1 +
modules/core/04-channel/keeper/packet.go | 23 +++++++++++-----
modules/core/04-channel/keeper/packet_test.go | 27 +++++++++++++++++++
3 files changed, 44 insertions(+), 7 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a559bed4..129fabf0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -38,6 +38,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (06-solomachine) [\#214](https://github.com/cosmos/ibc-go/pull/214) Disable defensive timestamp check in SendPacket for solo machine clients.
* (07-tendermint) [#\210](https://github.com/cosmos/ibc-go/pull/210) Export all consensus metadata on genesis restarts for tendermint clients.
* (core) [\#200](https://github.com/cosmos/ibc-go/pull/200) Fixes incorrect export of IBC identifier sequences. Previously, the next identifier sequence for clients/connections/channels was not set during genesis export. This resulted in the next identifiers being generated on the new chain to reuse old identifiers (the sequences began again from 0).
* (02-client) [\#192](https://github.com/cosmos/ibc-go/pull/192) Fix IBC `query ibc client header` cli command. Support historical queries for query header/node-state commands.
diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go
index 908a0809..571b7ae3 100644
--- a/modules/core/04-channel/keeper/packet.go
+++ b/modules/core/04-channel/keeper/packet.go
@@ -74,7 +74,7 @@ func (k Keeper) SendPacket(
return sdkerrors.Wrapf(clienttypes.ErrClientNotActive, "cannot send packet using client (%s) with status %s", connectionEnd.GetClientID(), status)
}
- // check if packet timeouted on the receiving chain
+ // check if packet is timed out on the receiving chain
latestHeight := clientState.GetLatestHeight()
timeoutHeight := packet.GetTimeoutHeight()
if !timeoutHeight.IsZero() && latestHeight.GTE(timeoutHeight) {
@@ -84,16 +84,25 @@ func (k Keeper) SendPacket(
)
}
- latestTimestamp, err := k.connectionKeeper.GetTimestampAtHeight(ctx, connectionEnd, latestHeight)
+ clientType, _, err := clienttypes.ParseClientIdentifier(connectionEnd.GetClientID())
if err != nil {
return err
}
- if packet.GetTimeoutTimestamp() != 0 && latestTimestamp >= packet.GetTimeoutTimestamp() {
- return sdkerrors.Wrapf(
- types.ErrPacketTimeout,
- "receiving chain block timestamp >= packet timeout timestamp (%s >= %s)", time.Unix(0, int64(latestTimestamp)), time.Unix(0, int64(packet.GetTimeoutTimestamp())),
- )
+ // NOTE: this is a temporary fix. Solo machine does not support usage of 'GetTimestampAtHeight'
+ // A future change should move this function to be a ClientState callback.
+ if clientType != exported.Solomachine {
+ latestTimestamp, err := k.connectionKeeper.GetTimestampAtHeight(ctx, connectionEnd, latestHeight)
+ if err != nil {
+ return err
+ }
+
+ if packet.GetTimeoutTimestamp() != 0 && latestTimestamp >= packet.GetTimeoutTimestamp() {
+ return sdkerrors.Wrapf(
+ types.ErrPacketTimeout,
+ "receiving chain block timestamp >= packet timeout timestamp (%s >= %s)", time.Unix(0, int64(latestTimestamp)), time.Unix(0, int64(packet.GetTimeoutTimestamp())),
+ )
+ }
}
nextSequenceSend, found := k.GetNextSequenceSend(ctx, packet.GetSourcePort(), packet.GetSourceChannel())
diff --git a/modules/core/04-channel/keeper/packet_test.go b/modules/core/04-channel/keeper/packet_test.go
index 91ddc7d1..d1cb1137 100644
--- a/modules/core/04-channel/keeper/packet_test.go
+++ b/modules/core/04-channel/keeper/packet_test.go
@@ -52,6 +52,33 @@ func (suite *KeeperTestSuite) TestSendPacket() {
packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
}, true},
+ {"success with solomachine: UNORDERED channel", func() {
+ suite.coordinator.Setup(path)
+ // swap client with solo machine
+ solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1)
+ path.EndpointA.ClientID = clienttypes.FormatClientIdentifier(exported.Solomachine, 10)
+ path.EndpointA.SetClientState(solomachine.ClientState())
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = path.EndpointA.ClientID
+ path.EndpointA.SetConnection(connection)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ }, true},
+ {"success with solomachine: ORDERED channel", func() {
+ path.SetChannelOrdered()
+ suite.coordinator.Setup(path)
+ // swap client with solomachine
+ solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1)
+ path.EndpointA.ClientID = clienttypes.FormatClientIdentifier(exported.Solomachine, 10)
+ path.EndpointA.SetClientState(solomachine.ClientState())
+ connection := path.EndpointA.GetConnection()
+ connection.ClientId = path.EndpointA.ClientID
+ path.EndpointA.SetConnection(connection)
+
+ packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp)
+ channelCap = suite.chainA.GetChannelCapability(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)
+ }, true},
{"sending packet out of order on UNORDERED channel", func() {
// setup creates an unordered channel
suite.coordinator.Setup(path)
From d9531ba850b6f23762b434ad44b4a25f314a1a66 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 9 Jun 2021 09:00:42 +0000
Subject: [PATCH 077/393] Bump codecov/codecov-action from 1.5.0 to 1.5.2
(#216)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 1.5.0 to 1.5.2.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md)
- [Commits](https://github.com/codecov/codecov-action/compare/v1.5.0...v1.5.2)
---
updated-dependencies:
- dependency-name: codecov/codecov-action
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
.github/workflows/test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 5418889c..941c697d 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -150,7 +150,7 @@ jobs:
sed -i.bak "/$(echo $filename | sed 's/\//\\\//g')/d" coverage.txt
done
if: env.GIT_DIFF
- - uses: codecov/codecov-action@v1.5.0
+ - uses: codecov/codecov-action@v1.5.2
with:
file: ./coverage.txt
if: env.GIT_DIFF
From 2548ab5f52d3dff51bb2e7075b4bb0d3b79949eb Mon Sep 17 00:00:00 2001
From: Ethan Frey
Date: Wed, 9 Jun 2021 18:42:50 +0200
Subject: [PATCH 078/393] Add relayer address to module callbacks (#206)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Add relayer address to module callbacks
* Update CHANGELOG
* update migration docs
Co-authored-by: Colin Axnér <25233464+colin-axner@users.noreply.github.com>
---
CHANGELOG.md | 1 +
docs/migrations/ibc-migration-043.md | 2 ++
modules/apps/transfer/module.go | 3 +++
modules/core/05-port/types/module.go | 3 +++
modules/core/keeper/msg_server.go | 29 ++++++++++++++++++++++++----
testing/mock/mock.go | 9 ++++++---
6 files changed, 40 insertions(+), 7 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 129fabf0..b8bb91dd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -49,6 +49,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking
+* (modules) [\#206](https://github.com/cosmos/ibc-go/pull/206) Expose `relayer sdk.AccAddress` on `OnRecvPacket`, `OnAcknowledgementPacket`, `OnTimeoutPacket` module callbacks to enable incentivization.
* (02-client) [\#181](https://github.com/cosmos/ibc-go/pull/181) Remove 'InitialHeight' from UpdateClient Proposal. Only copy over latest consensus state from substitute client.
* (06-solomachine) [\#169](https://github.com/cosmos/ibc-go/pull/169) Change FrozenSequence to boolean in solomachine ClientState. The solo machine proto package has been bumped from `v1` to `v2`.
* (module/core/02-client) [\#165](https://github.com/cosmos/ibc-go/pull/165) Remove GetFrozenHeight from the ClientState interface.
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 177bb3c7..239900c1 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -106,6 +106,8 @@ Application developers need to update their `OnRecvPacket` callback logic.
The `OnRecvPacket` callback has been modified to only return the acknowledgement. The acknowledgement returned must implement the `Acknowledgement` interface. The acknowledgement should indicate if it represents a successful processing of a packet by returning true on `Success()` and false in all other cases. A return value of false on `Success()` will result in all state changes which occurred in the callback being discarded. More information can be found in the [documentation](https://github.com/cosmos/ibc-go/blob/main/docs/custom.md#receiving-packets).
+The `OnRecvPacket`, `OnAcknowledgementPacket`, and `OnTimeoutPacket` callbacks are now passed the `sdk.AccAddress` of the relayer who relayed the IBC packet. Applications may use or ignore this information.
+
## IBC Event changes
The `packet_data` attribute has been deprecated in favor of `packet_data_hex`, in order to provide standardized encoding/decoding of packet data in events. While the `packet_data` event still exists, all relayers and IBC Event consumers are strongly encouraged to switch over to using `packet_data_hex` as soon as possible.
diff --git a/modules/apps/transfer/module.go b/modules/apps/transfer/module.go
index b0addfaf..1c9afbe9 100644
--- a/modules/apps/transfer/module.go
+++ b/modules/apps/transfer/module.go
@@ -323,6 +323,7 @@ func (am AppModule) OnChanCloseConfirm(
func (am AppModule) OnRecvPacket(
ctx sdk.Context,
packet channeltypes.Packet,
+ relayer sdk.AccAddress,
) ibcexported.Acknowledgement {
ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)})
@@ -360,6 +361,7 @@ func (am AppModule) OnAcknowledgementPacket(
ctx sdk.Context,
packet channeltypes.Packet,
acknowledgement []byte,
+ relayer sdk.AccAddress,
) (*sdk.Result, error) {
var ack channeltypes.Acknowledgement
if err := types.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil {
@@ -411,6 +413,7 @@ func (am AppModule) OnAcknowledgementPacket(
func (am AppModule) OnTimeoutPacket(
ctx sdk.Context,
packet channeltypes.Packet,
+ relayer sdk.AccAddress,
) (*sdk.Result, error) {
var data types.FungibleTokenPacketData
if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
diff --git a/modules/core/05-port/types/module.go b/modules/core/05-port/types/module.go
index 10a756bb..4a1c2596 100644
--- a/modules/core/05-port/types/module.go
+++ b/modules/core/05-port/types/module.go
@@ -67,16 +67,19 @@ type IBCModule interface {
OnRecvPacket(
ctx sdk.Context,
packet channeltypes.Packet,
+ relayer sdk.AccAddress,
) exported.Acknowledgement
OnAcknowledgementPacket(
ctx sdk.Context,
packet channeltypes.Packet,
acknowledgement []byte,
+ relayer sdk.AccAddress,
) (*sdk.Result, error)
OnTimeoutPacket(
ctx sdk.Context,
packet channeltypes.Packet,
+ relayer sdk.AccAddress,
) (*sdk.Result, error)
}
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index 2edcdd15..e1f20810 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -424,6 +424,11 @@ func (k Keeper) ChannelCloseConfirm(goCtx context.Context, msg *channeltypes.Msg
func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacket) (*channeltypes.MsgRecvPacketResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
+ relayer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "Invalid address for msg Signer")
+ }
+
// Lookup module by channel capability
module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.DestinationPort, msg.Packet.DestinationChannel)
if err != nil {
@@ -444,7 +449,7 @@ func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacke
// Perform application logic callback
// Cache context so that we may discard state changes from callback if the acknowledgement is unsuccessful.
cacheCtx, writeFn := ctx.CacheContext()
- ack := cbs.OnRecvPacket(cacheCtx, msg.Packet)
+ ack := cbs.OnRecvPacket(cacheCtx, msg.Packet, relayer)
if ack == nil || ack.Success() {
// write application state changes for asynchronous and successful acknowledgements
writeFn()
@@ -478,6 +483,12 @@ func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacke
// Timeout defines a rpc handler method for MsgTimeout.
func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*channeltypes.MsgTimeoutResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
+
+ relayer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "Invalid address for msg Signer")
+ }
+
// Lookup module by channel capability
module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel)
if err != nil {
@@ -496,7 +507,7 @@ func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*c
}
// Perform application logic callback
- _, err = cbs.OnTimeoutPacket(ctx, msg.Packet)
+ _, err = cbs.OnTimeoutPacket(ctx, msg.Packet, relayer)
if err != nil {
return nil, sdkerrors.Wrap(err, "timeout packet callback failed")
}
@@ -527,6 +538,11 @@ func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*c
func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeoutOnClose) (*channeltypes.MsgTimeoutOnCloseResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
+ relayer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "Invalid address for msg Signer")
+ }
+
// Lookup module by channel capability
module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel)
if err != nil {
@@ -547,7 +563,7 @@ func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeo
// Perform application logic callback
// NOTE: MsgTimeout and MsgTimeoutOnClose use the same "OnTimeoutPacket"
// application logic callback.
- _, err = cbs.OnTimeoutPacket(ctx, msg.Packet)
+ _, err = cbs.OnTimeoutPacket(ctx, msg.Packet, relayer)
if err != nil {
return nil, sdkerrors.Wrap(err, "timeout packet callback failed")
}
@@ -578,6 +594,11 @@ func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeo
func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAcknowledgement) (*channeltypes.MsgAcknowledgementResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
+ relayer, err := sdk.AccAddressFromBech32(msg.Signer)
+ if err != nil {
+ return nil, sdkerrors.Wrap(err, "Invalid address for msg Signer")
+ }
+
// Lookup module by channel capability
module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel)
if err != nil {
@@ -596,7 +617,7 @@ func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAckn
}
// Perform application logic callback
- _, err = cbs.OnAcknowledgementPacket(ctx, msg.Packet, msg.Acknowledgement)
+ _, err = cbs.OnAcknowledgementPacket(ctx, msg.Packet, msg.Acknowledgement, relayer)
if err != nil {
return nil, sdkerrors.Wrap(err, "acknowledge packet callback failed")
}
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index d6673d6d..e2062dda 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -17,6 +17,7 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
"github.com/cosmos/ibc-go/modules/core/exported"
)
@@ -34,6 +35,8 @@ var (
MockCanaryCapabilityName = "mock canary capability name"
)
+var _ porttypes.IBCModule = AppModule{}
+
// Expected Interface
// PortKeeper defines the expected IBC port keeper
type PortKeeper interface {
@@ -189,7 +192,7 @@ func (am AppModule) OnChanCloseConfirm(sdk.Context, string, string) error {
}
// OnRecvPacket implements the IBCModule interface.
-func (am AppModule) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet) exported.Acknowledgement {
+func (am AppModule) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) exported.Acknowledgement {
// set state by claiming capability to check if revert happens return
am.scopedKeeper.NewCapability(ctx, MockCanaryCapabilityName)
if bytes.Equal(MockPacketData, packet.GetData()) {
@@ -202,11 +205,11 @@ func (am AppModule) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet) ex
}
// OnAcknowledgementPacket implements the IBCModule interface.
-func (am AppModule) OnAcknowledgementPacket(sdk.Context, channeltypes.Packet, []byte) (*sdk.Result, error) {
+func (am AppModule) OnAcknowledgementPacket(sdk.Context, channeltypes.Packet, []byte, sdk.AccAddress) (*sdk.Result, error) {
return nil, nil
}
// OnTimeoutPacket implements the IBCModule interface.
-func (am AppModule) OnTimeoutPacket(sdk.Context, channeltypes.Packet) (*sdk.Result, error) {
+func (am AppModule) OnTimeoutPacket(sdk.Context, channeltypes.Packet, sdk.AccAddress) (*sdk.Result, error) {
return nil, nil
}
From 2e95805e2d474b85c837ec433e64947782039047 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 17 Jun 2021 13:43:56 +0200
Subject: [PATCH 079/393] Add in-place and genesis migrations (#205)
* add in-place migrations
Prunes solomachines and expired tendermint consensus states via an x/upgrade
* update migrations
fix iteration bug
remove solo machine connections
remove solo machine channels
* migrate solomachine from v1 to v2 during in place migration
Regenerate v1 solo machine definition in 02-client legacy
Migrate from v1 to v2 solo machine client state
Prune v1 solo machine consensus states
* fix build
* add genesis migration
* code cleanup
* add store migration test for expired tendermint consensus states
* finish adding in place migration store tests
* add genesis test for solo machines
* fix genesis migration bug, add tendermint tests
* test fix, changelog, migration docs
* Apply suggestions from code review
* Update docs/migrations/ibc-migration-043.md
* apply Aditya's review suggestions
* fix tests
* add genesis json unmarshal test
Test that the legacy solo machines can be successfully unmarshalled.
This requires registering an implementation for the legacy solo machine. An implemenation which panics has been added.
This implementation should only be registered against a clientCtx during a migrate cli cmd. The implementation is only briefly used in order to decode the previous solo machine set in genesis.
* add migration support for max expected time per block
* fix docs
* fix bug found by Aditya
The genesis client metadata was being set independently for each unexpired height. It needed to be moved outside the unexpired for loop
* remove unnecessary code
* apply Aditya review suggestions, fix bug
There was a bug in adding consensus metadata since it relied on the iteration key not yet set.
This is fixed by using traditional iteration using the consensus state key, setting metadata for all consensus states, and then pruning expired consensus states. The store test has been updated to set create two tendermint clients
Co-authored-by: Aditya
---
CHANGELOG.md | 1 +
docs/ibc/proto-docs.md | 340 +-
docs/migrations/ibc-migration-043.md | 60 +
go.mod | 1 +
modules/core/02-client/keeper/migrations.go | 27 +
modules/core/02-client/legacy/v100/genesis.go | 153 +
.../02-client/legacy/v100/genesis_test.go | 311 ++
.../core/02-client/legacy/v100/solomachine.go | 208 +
.../02-client/legacy/v100/solomachine.pb.go | 4121 +++++++++++++++++
modules/core/02-client/legacy/v100/store.go | 180 +
.../core/02-client/legacy/v100/store_test.go | 231 +
.../core/03-connection/types/connection.pb.go | 6 +-
modules/core/03-connection/types/params.go | 2 +-
modules/core/exported/client.go | 3 +-
modules/core/keeper/migrations.go | 32 +
modules/core/legacy/v100/genesis.go | 54 +
modules/core/legacy/v100/genesis_test.go | 178 +
modules/core/module.go | 6 +-
.../07-tendermint/types/store.go | 36 +
.../07-tendermint/types/tendermint.pb.go | 4 +-
.../07-tendermint/types/update.go | 1 -
.../07-tendermint/types/update_test.go | 12 +
proto/ibc/core/connection/v1/connection.proto | 6 +-
.../solomachine/v1/solomachine.proto | 189 +
24 files changed, 6148 insertions(+), 14 deletions(-)
create mode 100644 modules/core/02-client/keeper/migrations.go
create mode 100644 modules/core/02-client/legacy/v100/genesis.go
create mode 100644 modules/core/02-client/legacy/v100/genesis_test.go
create mode 100644 modules/core/02-client/legacy/v100/solomachine.go
create mode 100644 modules/core/02-client/legacy/v100/solomachine.pb.go
create mode 100644 modules/core/02-client/legacy/v100/store.go
create mode 100644 modules/core/02-client/legacy/v100/store_test.go
create mode 100644 modules/core/keeper/migrations.go
create mode 100644 modules/core/legacy/v100/genesis.go
create mode 100644 modules/core/legacy/v100/genesis_test.go
create mode 100644 proto/ibc/lightclients/solomachine/v1/solomachine.proto
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b8bb91dd..c4420fb8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -72,6 +72,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Improvements
+* (core/02-client) [\#205](https://github.com/cosmos/ibc-go/pull/205) Add in-place and genesis migrations from SDK v0.42.0 to ibc-go v1.0.0. Solo machine protobuf defintions are migrated from v1 to v2. All solo machine consensus states are pruned. All expired tendermint consensus states are pruned.
* (modules/core) [\#184](https://github.com/cosmos/ibc-go/pull/184) Improve error messages. Uses unique error codes to indicate already relayed packets.
* (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic.
* (modules/core/04-channel) [\#7949](https://github.com/cosmos/cosmos-sdk/issues/7949) Standardized channel `Acknowledgement` moved to its own file. Codec registration redundancy removed.
diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md
index 701d4684..cae068cf 100644
--- a/docs/ibc/proto-docs.md
+++ b/docs/ibc/proto-docs.md
@@ -195,6 +195,26 @@
- [ibc/lightclients/localhost/v1/localhost.proto](#ibc/lightclients/localhost/v1/localhost.proto)
- [ClientState](#ibc.lightclients.localhost.v1.ClientState)
+- [ibc/lightclients/solomachine/v1/solomachine.proto](#ibc/lightclients/solomachine/v1/solomachine.proto)
+ - [ChannelStateData](#ibc.lightclients.solomachine.v1.ChannelStateData)
+ - [ClientState](#ibc.lightclients.solomachine.v1.ClientState)
+ - [ClientStateData](#ibc.lightclients.solomachine.v1.ClientStateData)
+ - [ConnectionStateData](#ibc.lightclients.solomachine.v1.ConnectionStateData)
+ - [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState)
+ - [ConsensusStateData](#ibc.lightclients.solomachine.v1.ConsensusStateData)
+ - [Header](#ibc.lightclients.solomachine.v1.Header)
+ - [HeaderData](#ibc.lightclients.solomachine.v1.HeaderData)
+ - [Misbehaviour](#ibc.lightclients.solomachine.v1.Misbehaviour)
+ - [NextSequenceRecvData](#ibc.lightclients.solomachine.v1.NextSequenceRecvData)
+ - [PacketAcknowledgementData](#ibc.lightclients.solomachine.v1.PacketAcknowledgementData)
+ - [PacketCommitmentData](#ibc.lightclients.solomachine.v1.PacketCommitmentData)
+ - [PacketReceiptAbsenceData](#ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData)
+ - [SignBytes](#ibc.lightclients.solomachine.v1.SignBytes)
+ - [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData)
+ - [TimestampedSignatureData](#ibc.lightclients.solomachine.v1.TimestampedSignatureData)
+
+ - [DataType](#ibc.lightclients.solomachine.v1.DataType)
+
- [ibc/lightclients/solomachine/v2/solomachine.proto](#ibc/lightclients/solomachine/v2/solomachine.proto)
- [ChannelStateData](#ibc.lightclients.solomachine.v2.ChannelStateData)
- [ClientState](#ibc.lightclients.solomachine.v2.ClientState)
@@ -2405,7 +2425,7 @@ Params defines the set of Connection parameters.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
-| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. |
+| `max_expected_time_per_block` | [uint64](#uint64) | | maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the largest amount of time that the chain might reasonably take to produce the next block under normal operating conditions. A safe choice is 3-5x the expected time per block. |
@@ -2919,6 +2939,324 @@ access to keys outside the client prefix.
+
+Top
+
+## ibc/lightclients/solomachine/v1/solomachine.proto
+
+
+
+
+
+### ChannelStateData
+ChannelStateData returns the SignBytes data for channel state
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `channel` | [ibc.core.channel.v1.Channel](#ibc.core.channel.v1.Channel) | | |
+
+
+
+
+
+
+
+
+### ClientState
+ClientState defines a solo machine client that tracks the current consensus
+state and if the client is frozen.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | latest sequence of the client state |
+| `frozen_sequence` | [uint64](#uint64) | | frozen sequence of the solo machine |
+| `consensus_state` | [ConsensusState](#ibc.lightclients.solomachine.v1.ConsensusState) | | |
+| `allow_update_after_proposal` | [bool](#bool) | | when set to true, will allow governance to update a solo machine client. The client will be unfrozen if it is frozen. |
+
+
+
+
+
+
+
+
+### ClientStateData
+ClientStateData returns the SignBytes data for client state verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `client_state` | [google.protobuf.Any](#google.protobuf.Any) | | |
+
+
+
+
+
+
+
+
+### ConnectionStateData
+ConnectionStateData returns the SignBytes data for connection state
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `connection` | [ibc.core.connection.v1.ConnectionEnd](#ibc.core.connection.v1.ConnectionEnd) | | |
+
+
+
+
+
+
+
+
+### ConsensusState
+ConsensusState defines a solo machine consensus state. The sequence of a
+consensus state is contained in the "height" key used in storing the
+consensus state.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `public_key` | [google.protobuf.Any](#google.protobuf.Any) | | public key of the solo machine |
+| `diversifier` | [string](#string) | | diversifier allows the same public key to be re-used across different solo machine clients (potentially on different chains) without being considered misbehaviour. |
+| `timestamp` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### ConsensusStateData
+ConsensusStateData returns the SignBytes data for consensus state
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `consensus_state` | [google.protobuf.Any](#google.protobuf.Any) | | |
+
+
+
+
+
+
+
+
+### Header
+Header defines a solo machine consensus header
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | sequence to update solo machine public key at |
+| `timestamp` | [uint64](#uint64) | | |
+| `signature` | [bytes](#bytes) | | |
+| `new_public_key` | [google.protobuf.Any](#google.protobuf.Any) | | |
+| `new_diversifier` | [string](#string) | | |
+
+
+
+
+
+
+
+
+### HeaderData
+HeaderData returns the SignBytes data for update verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `new_pub_key` | [google.protobuf.Any](#google.protobuf.Any) | | header public key |
+| `new_diversifier` | [string](#string) | | header diversifier |
+
+
+
+
+
+
+
+
+### Misbehaviour
+Misbehaviour defines misbehaviour for a solo machine which consists
+of a sequence and two signatures over different messages at that sequence.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `client_id` | [string](#string) | | |
+| `sequence` | [uint64](#uint64) | | |
+| `signature_one` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | |
+| `signature_two` | [SignatureAndData](#ibc.lightclients.solomachine.v1.SignatureAndData) | | |
+
+
+
+
+
+
+
+
+### NextSequenceRecvData
+NextSequenceRecvData returns the SignBytes data for verification of the next
+sequence to be received.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `next_seq_recv` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### PacketAcknowledgementData
+PacketAcknowledgementData returns the SignBytes data for acknowledgement
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `acknowledgement` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### PacketCommitmentData
+PacketCommitmentData returns the SignBytes data for packet commitment
+verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+| `commitment` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### PacketReceiptAbsenceData
+PacketReceiptAbsenceData returns the SignBytes data for
+packet receipt absence verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `path` | [bytes](#bytes) | | |
+
+
+
+
+
+
+
+
+### SignBytes
+SignBytes defines the signed bytes used for signature verification.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `sequence` | [uint64](#uint64) | | |
+| `timestamp` | [uint64](#uint64) | | |
+| `diversifier` | [string](#string) | | |
+| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | type of the data used |
+| `data` | [bytes](#bytes) | | marshaled data |
+
+
+
+
+
+
+
+
+### SignatureAndData
+SignatureAndData contains a signature and the data signed over to create that
+signature.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `signature` | [bytes](#bytes) | | |
+| `data_type` | [DataType](#ibc.lightclients.solomachine.v1.DataType) | | |
+| `data` | [bytes](#bytes) | | |
+| `timestamp` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+### TimestampedSignatureData
+TimestampedSignatureData contains the signature data and the timestamp of the
+signature.
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| `signature_data` | [bytes](#bytes) | | |
+| `timestamp` | [uint64](#uint64) | | |
+
+
+
+
+
+
+
+
+
+
+### DataType
+DataType defines the type of solo machine proof being created. This is done
+to preserve uniqueness of different data sign byte encodings.
+
+| Name | Number | Description |
+| ---- | ------ | ----------- |
+| DATA_TYPE_UNINITIALIZED_UNSPECIFIED | 0 | Default State |
+| DATA_TYPE_CLIENT_STATE | 1 | Data type for client state verification |
+| DATA_TYPE_CONSENSUS_STATE | 2 | Data type for consensus state verification |
+| DATA_TYPE_CONNECTION_STATE | 3 | Data type for connection state verification |
+| DATA_TYPE_CHANNEL_STATE | 4 | Data type for channel state verification |
+| DATA_TYPE_PACKET_COMMITMENT | 5 | Data type for packet commitment verification |
+| DATA_TYPE_PACKET_ACKNOWLEDGEMENT | 6 | Data type for packet acknowledgement verification |
+| DATA_TYPE_PACKET_RECEIPT_ABSENCE | 7 | Data type for packet receipt absence verification |
+| DATA_TYPE_NEXT_SEQUENCE_RECV | 8 | Data type for next sequence recv verification |
+| DATA_TYPE_HEADER | 9 | Data type for header verification |
+
+
+
+
+
+
+
+
+
+
Top
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 239900c1..82154645 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -27,6 +27,66 @@ Feel free to use your own method for modifying import names.
NOTE: Updating to the `v0.43.0` SDK release and then running `go mod tidy` will cause a downgrade to `v0.42.0` in order to support the old IBC import paths.
Update the import paths before running `go mod tidy`.
+## Chain Upgrades
+
+Chains may choose to upgrade via an upgrade proposal or genesis upgrades. Both in-place store migrations and genesis migrations are supported.
+
+**WARNING**: Please read at least the quick guide for [IBC client upgrades](../ibc/upgrades/README.md) before upgrading your chain. It is highly recommended you do not change the chain-ID during an upgrade, otherwise you must follow the IBC client upgrade instructions.
+
+Both in-place store migrations and genesis migrations will:
+- migrate the solo machine client state from v1 to v2 protobuf definitions
+- prune all solo machine consensus states
+- prune all expired tendermint consensus states
+
+Chains must set a new connection parameter during either in place store migrations or genesis migration. The new parameter, max expected block time, is used to enforce packet processing delays on the receiving end of an IBC packet flow. Checkout the [docs](https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2) for more information.
+
+### In-Place Store Migrations
+
+The new chain binary will need to run migrations in the upgrade handler. The fromVM (previous module version) for the IBC module should be 1. This will allow migrations to be run for IBC updating the version from 1 to 2.
+
+Ex:
+```go
+app.UpgradeKeeper.SetUpgradeHandler("my-upgrade-proposal",
+ func(ctx sdk.Context, _ upgradetypes.Plan, _ module.VersionMap) (module.VersionMap, error) {
+ // set max expected block time parameter. Replace the default with your expected value
+ // https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2
+ app.IBCKeeper.ConnectionKeeper.SetParams(ctx, ibcconnectiontypes.DefaultParams())
+
+ fromVM := map[string]uint64{
+ ... // other modules
+ "ibc": 1,
+ ...
+ }
+ return app.mm.RunMigrations(ctx, app.configurator, fromVM)
+ })
+
+```
+
+### Genesis Migrations
+
+To perform genesis migrations, the following code must be added to your existing migration code.
+
+```go
+// add imports as necessary
+import (
+ ibcv100 "github.com/cosmos/ibc-go/modules/core/legacy/v100"
+ ibchost "github.com/cosmos/ibc-go/modules/core/24-host"
+)
+
+...
+
+// add in migrate cmd function
+// expectedTimePerBlock is a new connection parameter
+// https://github.com/cosmos/ibc-go/blob/release/v1.0.x/docs/ibc/proto-docs.md#params-2
+newGenState, err = ibcv100.MigrateGenesis(newGenState, clientCtx, *genDoc, expectedTimePerBlock)
+if err != nil {
+ return err
+}
+```
+
+**NOTE:** The genesis chain-id, time and height MUST be updated before migrating IBC, otherwise the tendermint consensus state will not be pruned.
+
+
## IBC Keeper Changes
The IBC Keeper now takes in the Upgrade Keeper. Please add the chains' Upgrade Keeper after the Staking Keeper:
diff --git a/go.mod b/go.mod
index 233f26b7..f4100fcb 100644
--- a/go.mod
+++ b/go.mod
@@ -22,4 +22,5 @@ require (
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
google.golang.org/grpc v1.37.0
+ google.golang.org/protobuf v1.26.0
)
diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go
new file mode 100644
index 00000000..5f2088d6
--- /dev/null
+++ b/modules/core/02-client/keeper/migrations.go
@@ -0,0 +1,27 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"
+)
+
+// Migrator is a struct for handling in-place store migrations.
+type Migrator struct {
+ keeper Keeper
+}
+
+// NewMigrator returns a new Migrator.
+func NewMigrator(keeper Keeper) Migrator {
+ return Migrator{keeper: keeper}
+}
+
+// Migrate1to2 migrates from version 1 to 2.
+// This migration
+// - migrates solo machine client states from v1 to v2 protobuf definition
+// - prunes solo machine consensus states
+// - prunes expired tendermint consensus states
+// - adds iteration and processed height keys for unexpired tendermint consensus states
+func (m Migrator) Migrate1to2(ctx sdk.Context) error {
+ return v100.MigrateStore(ctx, m.keeper.storeKey, m.keeper.cdc)
+}
diff --git a/modules/core/02-client/legacy/v100/genesis.go b/modules/core/02-client/legacy/v100/genesis.go
new file mode 100644
index 00000000..65aa4210
--- /dev/null
+++ b/modules/core/02-client/legacy/v100/genesis.go
@@ -0,0 +1,153 @@
+package v100
+
+import (
+ "bytes"
+ "time"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+)
+
+// MigrateGenesis accepts exported v1.0.0 IBC client genesis file and migrates it to:
+//
+// - Update solo machine client state protobuf definition (v1 to v2)
+// - Remove all solo machine consensus states
+// - Remove all expired tendermint consensus states
+// - Adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states
+func MigrateGenesis(cdc codec.BinaryCodec, clientGenState *types.GenesisState, genesisBlockTime time.Time, selfHeight exported.Height) (*types.GenesisState, error) {
+ // To prune the consensus states, we will create new clientsConsensus
+ // and clientsMetadata. These slices will be filled up with consensus states
+ // which should not be pruned. No solo machine consensus states should be added
+ // and only unexpired consensus states for tendermint clients will be added.
+ // The metadata keys for unexpired consensus states will be added to clientsMetadata
+ var (
+ clientsConsensus []types.ClientConsensusStates
+ clientsMetadata []types.IdentifiedGenesisMetadata
+ )
+
+ for i, client := range clientGenState.Clients {
+ clientType, _, err := types.ParseClientIdentifier(client.ClientId)
+ if err != nil {
+ return nil, err
+ }
+
+ // update solo machine client state defintions
+ if clientType == exported.Solomachine {
+ clientState := &ClientState{}
+ if err := cdc.Unmarshal(client.ClientState.Value, clientState); err != nil {
+ return nil, sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state")
+ }
+
+ updatedClientState := migrateSolomachine(clientState)
+
+ any, err := types.PackClientState(updatedClientState)
+ if err != nil {
+ return nil, err
+ }
+
+ clientGenState.Clients[i] = types.IdentifiedClientState{
+ ClientId: client.ClientId,
+ ClientState: any,
+ }
+ }
+
+ // iterate consensus states by client
+ for _, clientConsensusStates := range clientGenState.ClientsConsensus {
+ // look for consensus states for the current client
+ if clientConsensusStates.ClientId == client.ClientId {
+ switch clientType {
+ case exported.Solomachine:
+ // remove all consensus states for the solo machine
+ // do not add to new clientsConsensus
+
+ case exported.Tendermint:
+ // only add non expired consensus states to new clientsConsensus
+ tmClientState, ok := client.ClientState.GetCachedValue().(*ibctmtypes.ClientState)
+ if !ok {
+ return nil, types.ErrInvalidClient
+ }
+
+ // collect unexpired consensus states
+ var unexpiredConsensusStates []types.ConsensusStateWithHeight
+ for _, consState := range clientConsensusStates.ConsensusStates {
+ tmConsState := consState.ConsensusState.GetCachedValue().(*ibctmtypes.ConsensusState)
+ if !tmClientState.IsExpired(tmConsState.Timestamp, genesisBlockTime) {
+ unexpiredConsensusStates = append(unexpiredConsensusStates, consState)
+ }
+ }
+
+ // if we found at least one unexpired consensus state, create a clientConsensusState
+ // and add it to clientsConsensus
+ if len(unexpiredConsensusStates) != 0 {
+ clientsConsensus = append(clientsConsensus, types.ClientConsensusStates{
+ ClientId: client.ClientId,
+ ConsensusStates: unexpiredConsensusStates,
+ })
+ }
+
+ // collect metadata for unexpired consensus states
+ var clientMetadata []types.GenesisMetadata
+
+ // remove all expired tendermint consensus state metadata by adding only
+ // unexpired consensus state metadata
+ for _, consState := range unexpiredConsensusStates {
+ for _, identifiedGenMetadata := range clientGenState.ClientsMetadata {
+ // look for metadata for current client
+ if identifiedGenMetadata.ClientId == client.ClientId {
+
+ // obtain height for consensus state being pruned
+ height := consState.Height
+
+ // iterate through metadata and find metadata for current unexpired height
+ // only unexpired consensus state metadata should be added
+ for _, metadata := range identifiedGenMetadata.ClientMetadata {
+ // the previous version of IBC only contained the processed time metadata
+ // if we find the processed time metadata for an unexpired height, add the
+ // iteration key and processed height keys.
+ if bytes.Equal(metadata.Key, ibctmtypes.ProcessedTimeKey(height)) {
+ clientMetadata = append(clientMetadata,
+ // set the processed height using the current self height
+ // this is safe, it may cause delays in packet processing if there
+ // is a non zero connection delay time
+ types.GenesisMetadata{
+ Key: ibctmtypes.ProcessedHeightKey(height),
+ Value: []byte(selfHeight.String()),
+ },
+ metadata, // processed time
+ types.GenesisMetadata{
+ Key: ibctmtypes.IterationKey(height),
+ Value: host.ConsensusStateKey(height),
+ })
+
+ }
+ }
+
+ }
+ }
+
+ }
+
+ // if we have metadata for unexipred consensus states, add it to consensusMetadata
+ if len(clientMetadata) != 0 {
+ clientsMetadata = append(clientsMetadata, types.IdentifiedGenesisMetadata{
+ ClientId: client.ClientId,
+ ClientMetadata: clientMetadata,
+ })
+ }
+
+ default:
+ break
+ }
+ }
+ }
+ }
+
+ clientGenState.ClientsConsensus = clientsConsensus
+ clientGenState.ClientsMetadata = clientsMetadata
+ return clientGenState, nil
+}
diff --git a/modules/core/02-client/legacy/v100/genesis_test.go b/modules/core/02-client/legacy/v100/genesis_test.go
new file mode 100644
index 00000000..0c3235c6
--- /dev/null
+++ b/modules/core/02-client/legacy/v100/genesis_test.go
@@ -0,0 +1,311 @@
+package v100_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "time"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+
+ ibcclient "github.com/cosmos/ibc-go/modules/core/02-client"
+ v100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/simapp"
+)
+
+func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() {
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ encodingConfig := simapp.MakeTestEncodingConfig()
+ clientCtx := client.Context{}.
+ WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
+ WithTxConfig(encodingConfig.TxConfig).
+ WithJSONCodec(encodingConfig.Marshaler)
+
+ // create multiple legacy solo machine clients
+ solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1)
+ solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4)
+
+ // create tendermint clients
+ suite.coordinator.SetupClients(path)
+ err := path.EndpointA.UpdateClient()
+ suite.Require().NoError(err)
+ clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper)
+
+ // manually generate old proto buf definitions and set in genesis
+ // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are
+ // using client states and consensus states which do not implement the exported.ClientState
+ // and exported.ConsensusState interface
+ var clients []types.IdentifiedClientState
+ for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} {
+ clientState := sm.ClientState()
+
+ var seq uint64
+ if clientState.IsFrozen {
+ seq = 1
+ }
+
+ // generate old client state proto defintion
+ legacyClientState := &v100.ClientState{
+ Sequence: clientState.Sequence,
+ FrozenSequence: seq,
+ ConsensusState: &v100.ConsensusState{
+ PublicKey: clientState.ConsensusState.PublicKey,
+ Diversifier: clientState.ConsensusState.Diversifier,
+ Timestamp: clientState.ConsensusState.Timestamp,
+ },
+ AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal,
+ }
+
+ // set client state
+ any, err := codectypes.NewAnyWithValue(legacyClientState)
+ suite.Require().NoError(err)
+ suite.Require().NotNil(any)
+ client := types.IdentifiedClientState{
+ ClientId: sm.ClientID,
+ ClientState: any,
+ }
+ clients = append(clients, client)
+
+ // set in store for ease of determining expected genesis
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID)
+ bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState)
+ suite.Require().NoError(err)
+ clientStore.Set(host.ClientStateKey(), bz)
+
+ // set some consensus states
+ height1 := types.NewHeight(0, 1)
+ height2 := types.NewHeight(1, 2)
+ height3 := types.NewHeight(0, 123)
+
+ any, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState)
+ suite.Require().NoError(err)
+ suite.Require().NotNil(any)
+ consensusState1 := types.ConsensusStateWithHeight{
+ Height: height1,
+ ConsensusState: any,
+ }
+ consensusState2 := types.ConsensusStateWithHeight{
+ Height: height2,
+ ConsensusState: any,
+ }
+ consensusState3 := types.ConsensusStateWithHeight{
+ Height: height3,
+ ConsensusState: any,
+ }
+
+ clientConsensusState := types.ClientConsensusStates{
+ ClientId: sm.ClientID,
+ ConsensusStates: []types.ConsensusStateWithHeight{consensusState1, consensusState2, consensusState3},
+ }
+
+ clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus, clientConsensusState)
+
+ // set in store for ease of determining expected genesis
+ bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState)
+ suite.Require().NoError(err)
+ clientStore.Set(host.ConsensusStateKey(height1), bz)
+ clientStore.Set(host.ConsensusStateKey(height2), bz)
+ clientStore.Set(host.ConsensusStateKey(height3), bz)
+ }
+ // solo machine clients must come before tendermint in expected
+ clientGenState.Clients = append(clients, clientGenState.Clients...)
+
+ // migrate store get expected genesis
+ // store migration and genesis migration should produce identical results
+ err = v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec())
+ suite.Require().NoError(err)
+ expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper)
+
+ // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning
+ migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext()))
+ suite.Require().NoError(err)
+
+ // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys
+ // In order to match the genesis migration with export genesis (from store migrations) we must reorder the iteration keys to be last
+ // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version
+ // which provides no benefit except nicer testing
+ for i, clientMetadata := range migrated.ClientsMetadata {
+ var updatedMetadata []types.GenesisMetadata
+ var iterationKeys []types.GenesisMetadata
+ for _, metadata := range clientMetadata.ClientMetadata {
+ if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) {
+ iterationKeys = append(iterationKeys, metadata)
+ } else {
+ updatedMetadata = append(updatedMetadata, metadata)
+ }
+ }
+ updatedMetadata = append(updatedMetadata, iterationKeys...)
+ migrated.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{
+ ClientId: clientMetadata.ClientId,
+ ClientMetadata: updatedMetadata,
+ }
+ }
+
+ bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState)
+ suite.Require().NoError(err)
+
+ // Indent the JSON bz correctly.
+ var jsonObj map[string]interface{}
+ err = json.Unmarshal(bz, &jsonObj)
+ suite.Require().NoError(err)
+ expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t")
+ suite.Require().NoError(err)
+
+ bz, err = clientCtx.JSONCodec.MarshalJSON(migrated)
+ suite.Require().NoError(err)
+
+ // Indent the JSON bz correctly.
+ err = json.Unmarshal(bz, &jsonObj)
+ suite.Require().NoError(err)
+ indentedBz, err := json.MarshalIndent(jsonObj, "", "\t")
+ suite.Require().NoError(err)
+
+ suite.Require().Equal(string(expectedIndentedBz), string(indentedBz))
+}
+
+func (suite *LegacyTestSuite) TestMigrateGenesisTendermint() {
+ // create two paths and setup clients
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ encodingConfig := simapp.MakeTestEncodingConfig()
+ clientCtx := client.Context{}.
+ WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
+ WithTxConfig(encodingConfig.TxConfig).
+ WithJSONCodec(encodingConfig.Marshaler)
+
+ suite.coordinator.SetupClients(path1)
+ suite.coordinator.SetupClients(path2)
+
+ // collect all heights expected to be pruned
+ var path1PruneHeights, path2PruneHeights []exported.Height
+ path1PruneHeights = append(path1PruneHeights, path1.EndpointA.GetClientState().GetLatestHeight())
+ path2PruneHeights = append(path2PruneHeights, path2.EndpointA.GetClientState().GetLatestHeight())
+
+ // these heights will be expired and also pruned
+ for i := 0; i < 3; i++ {
+ path1.EndpointA.UpdateClient()
+ path1PruneHeights = append(path1PruneHeights, path1.EndpointA.GetClientState().GetLatestHeight())
+ }
+ for i := 0; i < 3; i++ {
+ path2.EndpointA.UpdateClient()
+ path2PruneHeights = append(path2PruneHeights, path2.EndpointA.GetClientState().GetLatestHeight())
+ }
+
+ // Increment the time by a week
+ suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour)
+
+ // create the consensus state that can be used as trusted height for next update
+ path1.EndpointA.UpdateClient()
+ path1.EndpointA.UpdateClient()
+ path2.EndpointA.UpdateClient()
+ path2.EndpointA.UpdateClient()
+
+ clientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper)
+ suite.Require().NotNil(clientGenState.Clients)
+ suite.Require().NotNil(clientGenState.ClientsConsensus)
+ suite.Require().NotNil(clientGenState.ClientsMetadata)
+
+ // Increment the time by another week, then update the client.
+ // This will cause the consensus states created before the first time increment
+ // to be expired
+ suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour)
+
+ // migrate store get expected genesis
+ // store migration and genesis migration should produce identical results
+ err := v100.MigrateStore(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path1.EndpointA.Chain.App.AppCodec())
+ suite.Require().NoError(err)
+ expectedClientGenState := ibcclient.ExportGenesis(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper)
+
+ migrated, err := v100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &clientGenState, suite.coordinator.CurrentTime, types.GetSelfHeight(suite.chainA.GetContext()))
+ suite.Require().NoError(err)
+
+ // 'ExportGenesis' order metadata keys by processedheight, processedtime for all heights, then it appends all iteration keys
+ // In order to match the genesis migration with export genesis we must reorder the iteration keys to be last
+ // This isn't ideal, but it is better than modifying the genesis migration from a previous version to match the export genesis of a new version
+ // which provides no benefit except nicer testing
+ for i, clientMetadata := range migrated.ClientsMetadata {
+ var updatedMetadata []types.GenesisMetadata
+ var iterationKeys []types.GenesisMetadata
+ for _, metadata := range clientMetadata.ClientMetadata {
+ if bytes.HasPrefix(metadata.Key, []byte(ibctmtypes.KeyIterateConsensusStatePrefix)) {
+ iterationKeys = append(iterationKeys, metadata)
+ } else {
+ updatedMetadata = append(updatedMetadata, metadata)
+ }
+ }
+ updatedMetadata = append(updatedMetadata, iterationKeys...)
+ migrated.ClientsMetadata[i] = types.IdentifiedGenesisMetadata{
+ ClientId: clientMetadata.ClientId,
+ ClientMetadata: updatedMetadata,
+ }
+ }
+
+ // check path 1 client pruning
+ for _, height := range path1PruneHeights {
+ for _, client := range migrated.ClientsConsensus {
+ if client.ClientId == path1.EndpointA.ClientID {
+ for _, consensusState := range client.ConsensusStates {
+ suite.Require().NotEqual(height, consensusState.Height)
+ }
+ }
+
+ }
+ for _, client := range migrated.ClientsMetadata {
+ if client.ClientId == path1.EndpointA.ClientID {
+ for _, metadata := range client.ClientMetadata {
+ suite.Require().NotEqual(ibctmtypes.ProcessedTimeKey(height), metadata.Key)
+ suite.Require().NotEqual(ibctmtypes.ProcessedHeightKey(height), metadata.Key)
+ suite.Require().NotEqual(ibctmtypes.IterationKey(height), metadata.Key)
+ }
+ }
+ }
+ }
+
+ // check path 2 client pruning
+ for _, height := range path2PruneHeights {
+ for _, client := range migrated.ClientsConsensus {
+ if client.ClientId == path2.EndpointA.ClientID {
+ for _, consensusState := range client.ConsensusStates {
+ suite.Require().NotEqual(height, consensusState.Height)
+ }
+ }
+
+ }
+ for _, client := range migrated.ClientsMetadata {
+ if client.ClientId == path2.EndpointA.ClientID {
+ for _, metadata := range client.ClientMetadata {
+ suite.Require().NotEqual(ibctmtypes.ProcessedTimeKey(height), metadata.Key)
+ suite.Require().NotEqual(ibctmtypes.ProcessedHeightKey(height), metadata.Key)
+ suite.Require().NotEqual(ibctmtypes.IterationKey(height), metadata.Key)
+ }
+ }
+
+ }
+ }
+ bz, err := clientCtx.JSONCodec.MarshalJSON(&expectedClientGenState)
+ suite.Require().NoError(err)
+
+ // Indent the JSON bz correctly.
+ var jsonObj map[string]interface{}
+ err = json.Unmarshal(bz, &jsonObj)
+ suite.Require().NoError(err)
+ expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t")
+ suite.Require().NoError(err)
+
+ bz, err = clientCtx.JSONCodec.MarshalJSON(migrated)
+ suite.Require().NoError(err)
+
+ // Indent the JSON bz correctly.
+ err = json.Unmarshal(bz, &jsonObj)
+ suite.Require().NoError(err)
+ indentedBz, err := json.MarshalIndent(jsonObj, "", "\t")
+ suite.Require().NoError(err)
+
+ suite.Require().Equal(string(expectedIndentedBz), string(indentedBz))
+}
diff --git a/modules/core/02-client/legacy/v100/solomachine.go b/modules/core/02-client/legacy/v100/solomachine.go
new file mode 100644
index 00000000..80b062fa
--- /dev/null
+++ b/modules/core/02-client/legacy/v100/solomachine.go
@@ -0,0 +1,208 @@
+package v100
+
+import (
+ ics23 "github.com/confio/ics23/go"
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ "github.com/cosmos/ibc-go/modules/core/exported"
+)
+
+// NOTE: this is a mock implmentation for exported.ClientState. This implementation
+// should only be registered on the InterfaceRegistry during cli command genesis migration.
+// This implementation is only used to successfully unmarshal the previous solo machine
+// client state and consensus state and migrate them to the new implementations. When the proto
+// codec unmarshals, it calls UnpackInterfaces() to create a cached value of the any. The
+// UnpackInterfaces function for IdenitifiedClientState will attempt to unpack the any to
+// exported.ClientState. If the solomachine v1 type is not registered against the exported.ClientState
+// the unmarshal will fail. This implementation will panic on every interface function.
+// The same is done for the ConsensusState.
+
+// Interface implementation checks.
+var (
+ _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{}
+ _ exported.ClientState = (*ClientState)(nil)
+ _ exported.ConsensusState = &ConsensusState{}
+)
+
+func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
+ registry.RegisterImplementations(
+ (*exported.ClientState)(nil),
+ &ClientState{},
+ )
+ registry.RegisterImplementations(
+ (*exported.ConsensusState)(nil),
+ &ConsensusState{},
+ )
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return cs.ConsensusState.UnpackInterfaces(unpacker)
+}
+
+// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method
+func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
+ return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey))
+}
+
+// ClientType panics!
+func (cs ClientState) ClientType() string {
+ panic("legacy solo machine is deprecated!")
+}
+
+// GetLatestHeight panics!
+func (cs ClientState) GetLatestHeight() exported.Height {
+ panic("legacy solo machine is deprecated!")
+}
+
+// Status panics!
+func (cs ClientState) Status(_ sdk.Context, _ sdk.KVStore, _ codec.BinaryCodec) exported.Status {
+ panic("legacy solo machine is deprecated!")
+}
+
+// Validate panics!
+func (cs ClientState) Validate() error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// GetProofSpecs panics!
+func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec {
+ panic("legacy solo machine is deprecated!")
+}
+
+// ZeroCustomFields panics!
+func (cs ClientState) ZeroCustomFields() exported.ClientState {
+ panic("legacy solo machine is deprecated!")
+}
+
+// Initialize panics!
+func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, consState exported.ConsensusState) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// ExportMetadata panics!
+func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata {
+ panic("legacy solo machine is deprecated!")
+}
+
+// CheckHeaderAndUpdateState panics!
+func (cs *ClientState) CheckHeaderAndUpdateState(
+ _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Header,
+) (exported.ClientState, exported.ConsensusState, error) {
+ panic("legacy solo machine is deprecated!")
+}
+
+// CheckMisbehaviourAndUpdateState panics!
+func (cs ClientState) CheckMisbehaviourAndUpdateState(
+ _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore, _ exported.Misbehaviour,
+) (exported.ClientState, error) {
+ panic("legacy solo machine is deprecated!")
+}
+
+// CheckSubstituteAndUpdateState panics!
+func (cs ClientState) CheckSubstituteAndUpdateState(
+ ctx sdk.Context, _ codec.BinaryCodec, _, _ sdk.KVStore,
+ _ exported.ClientState,
+) (exported.ClientState, error) {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyUpgradeAndUpdateState panics!
+func (cs ClientState) VerifyUpgradeAndUpdateState(
+ _ sdk.Context, _ codec.BinaryCodec, _ sdk.KVStore,
+ _ exported.ClientState, _ exported.ConsensusState, _, _ []byte,
+) (exported.ClientState, exported.ConsensusState, error) {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyClientState panics!
+func (cs ClientState) VerifyClientState(
+ store sdk.KVStore, cdc codec.BinaryCodec,
+ _ exported.Height, _ exported.Prefix, _ string, _ []byte, clientState exported.ClientState,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyClientConsensusState panics!
+func (cs ClientState) VerifyClientConsensusState(
+ sdk.KVStore, codec.BinaryCodec,
+ exported.Height, string, exported.Height, exported.Prefix,
+ []byte, exported.ConsensusState,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyConnectionState panics!
+func (cs ClientState) VerifyConnectionState(
+ sdk.KVStore, codec.BinaryCodec, exported.Height,
+ exported.Prefix, []byte, string, exported.ConnectionI,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyChannelState panics!
+func (cs ClientState) VerifyChannelState(
+ sdk.KVStore, codec.BinaryCodec, exported.Height, exported.Prefix,
+ []byte, string, string, exported.ChannelI,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyPacketCommitment panics!
+func (cs ClientState) VerifyPacketCommitment(
+ sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height,
+ uint64, uint64, exported.Prefix, []byte,
+ string, string, uint64, []byte,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyPacketAcknowledgement panics!
+func (cs ClientState) VerifyPacketAcknowledgement(
+ sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height,
+ uint64, uint64, exported.Prefix, []byte,
+ string, string, uint64, []byte,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyPacketReceiptAbsence panics!
+func (cs ClientState) VerifyPacketReceiptAbsence(
+ sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height,
+ uint64, uint64, exported.Prefix, []byte,
+ string, string, uint64,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// VerifyNextSequenceRecv panics!
+func (cs ClientState) VerifyNextSequenceRecv(
+ sdk.Context, sdk.KVStore, codec.BinaryCodec, exported.Height,
+ uint64, uint64, exported.Prefix, []byte,
+ string, string, uint64,
+) error {
+ panic("legacy solo machine is deprecated!")
+}
+
+// ClientType panics!
+func (ConsensusState) ClientType() string {
+ panic("legacy solo machine is deprecated!")
+}
+
+// GetTimestamp panics!
+func (cs ConsensusState) GetTimestamp() uint64 {
+ panic("legacy solo machine is deprecated!")
+}
+
+// GetRoot panics!
+func (cs ConsensusState) GetRoot() exported.Root {
+ panic("legacy solo machine is deprecated!")
+}
+
+// ValidateBasic panics!
+func (cs ConsensusState) ValidateBasic() error {
+ panic("legacy solo machine is deprecated!")
+}
diff --git a/modules/core/02-client/legacy/v100/solomachine.pb.go b/modules/core/02-client/legacy/v100/solomachine.pb.go
new file mode 100644
index 00000000..c35edaf8
--- /dev/null
+++ b/modules/core/02-client/legacy/v100/solomachine.pb.go
@@ -0,0 +1,4121 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: ibc/lightclients/solomachine/v1/solomachine.proto
+
+package v100
+
+import (
+ fmt "fmt"
+ types "github.com/cosmos/cosmos-sdk/codec/types"
+ types1 "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ types2 "github.com/cosmos/ibc-go/modules/core/04-channel/types"
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/gogo/protobuf/proto"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+// DataType defines the type of solo machine proof being created. This is done
+// to preserve uniqueness of different data sign byte encodings.
+type DataType int32
+
+const (
+ // Default State
+ UNSPECIFIED DataType = 0
+ // Data type for client state verification
+ CLIENT DataType = 1
+ // Data type for consensus state verification
+ CONSENSUS DataType = 2
+ // Data type for connection state verification
+ CONNECTION DataType = 3
+ // Data type for channel state verification
+ CHANNEL DataType = 4
+ // Data type for packet commitment verification
+ PACKETCOMMITMENT DataType = 5
+ // Data type for packet acknowledgement verification
+ PACKETACKNOWLEDGEMENT DataType = 6
+ // Data type for packet receipt absence verification
+ PACKETRECEIPTABSENCE DataType = 7
+ // Data type for next sequence recv verification
+ NEXTSEQUENCERECV DataType = 8
+ // Data type for header verification
+ HEADER DataType = 9
+)
+
+var DataType_name = map[int32]string{
+ 0: "DATA_TYPE_UNINITIALIZED_UNSPECIFIED",
+ 1: "DATA_TYPE_CLIENT_STATE",
+ 2: "DATA_TYPE_CONSENSUS_STATE",
+ 3: "DATA_TYPE_CONNECTION_STATE",
+ 4: "DATA_TYPE_CHANNEL_STATE",
+ 5: "DATA_TYPE_PACKET_COMMITMENT",
+ 6: "DATA_TYPE_PACKET_ACKNOWLEDGEMENT",
+ 7: "DATA_TYPE_PACKET_RECEIPT_ABSENCE",
+ 8: "DATA_TYPE_NEXT_SEQUENCE_RECV",
+ 9: "DATA_TYPE_HEADER",
+}
+
+var DataType_value = map[string]int32{
+ "DATA_TYPE_UNINITIALIZED_UNSPECIFIED": 0,
+ "DATA_TYPE_CLIENT_STATE": 1,
+ "DATA_TYPE_CONSENSUS_STATE": 2,
+ "DATA_TYPE_CONNECTION_STATE": 3,
+ "DATA_TYPE_CHANNEL_STATE": 4,
+ "DATA_TYPE_PACKET_COMMITMENT": 5,
+ "DATA_TYPE_PACKET_ACKNOWLEDGEMENT": 6,
+ "DATA_TYPE_PACKET_RECEIPT_ABSENCE": 7,
+ "DATA_TYPE_NEXT_SEQUENCE_RECV": 8,
+ "DATA_TYPE_HEADER": 9,
+}
+
+func (x DataType) String() string {
+ return proto.EnumName(DataType_name, int32(x))
+}
+
+func (DataType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{0}
+}
+
+// ClientState defines a solo machine client that tracks the current consensus
+// state and if the client is frozen.
+type ClientState struct {
+ // latest sequence of the client state
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ // frozen sequence of the solo machine
+ FrozenSequence uint64 `protobuf:"varint,2,opt,name=frozen_sequence,json=frozenSequence,proto3" json:"frozen_sequence,omitempty" yaml:"frozen_sequence"`
+ ConsensusState *ConsensusState `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
+ // when set to true, will allow governance to update a solo machine client.
+ // The client will be unfrozen if it is frozen.
+ AllowUpdateAfterProposal bool `protobuf:"varint,4,opt,name=allow_update_after_proposal,json=allowUpdateAfterProposal,proto3" json:"allow_update_after_proposal,omitempty" yaml:"allow_update_after_proposal"`
+}
+
+func (m *ClientState) Reset() { *m = ClientState{} }
+func (m *ClientState) String() string { return proto.CompactTextString(m) }
+func (*ClientState) ProtoMessage() {}
+func (*ClientState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{0}
+}
+func (m *ClientState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientState.Merge(m, src)
+}
+func (m *ClientState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientState proto.InternalMessageInfo
+
+// ConsensusState defines a solo machine consensus state. The sequence of a
+// consensus state is contained in the "height" key used in storing the
+// consensus state.
+type ConsensusState struct {
+ // public key of the solo machine
+ PublicKey *types.Any `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" yaml:"public_key"`
+ // diversifier allows the same public key to be re-used across different solo
+ // machine clients (potentially on different chains) without being considered
+ // misbehaviour.
+ Diversifier string `protobuf:"bytes,2,opt,name=diversifier,proto3" json:"diversifier,omitempty"`
+ Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *ConsensusState) Reset() { *m = ConsensusState{} }
+func (m *ConsensusState) String() string { return proto.CompactTextString(m) }
+func (*ConsensusState) ProtoMessage() {}
+func (*ConsensusState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{1}
+}
+func (m *ConsensusState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConsensusState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConsensusState.Merge(m, src)
+}
+func (m *ConsensusState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConsensusState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConsensusState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConsensusState proto.InternalMessageInfo
+
+// Header defines a solo machine consensus header
+type Header struct {
+ // sequence to update solo machine public key at
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
+ NewPublicKey *types.Any `protobuf:"bytes,4,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"`
+ NewDiversifier string `protobuf:"bytes,5,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"`
+}
+
+func (m *Header) Reset() { *m = Header{} }
+func (m *Header) String() string { return proto.CompactTextString(m) }
+func (*Header) ProtoMessage() {}
+func (*Header) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{2}
+}
+func (m *Header) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Header.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Header) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Header.Merge(m, src)
+}
+func (m *Header) XXX_Size() int {
+ return m.Size()
+}
+func (m *Header) XXX_DiscardUnknown() {
+ xxx_messageInfo_Header.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Header proto.InternalMessageInfo
+
+// Misbehaviour defines misbehaviour for a solo machine which consists
+// of a sequence and two signatures over different messages at that sequence.
+type Misbehaviour struct {
+ ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"`
+ Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ SignatureOne *SignatureAndData `protobuf:"bytes,3,opt,name=signature_one,json=signatureOne,proto3" json:"signature_one,omitempty" yaml:"signature_one"`
+ SignatureTwo *SignatureAndData `protobuf:"bytes,4,opt,name=signature_two,json=signatureTwo,proto3" json:"signature_two,omitempty" yaml:"signature_two"`
+}
+
+func (m *Misbehaviour) Reset() { *m = Misbehaviour{} }
+func (m *Misbehaviour) String() string { return proto.CompactTextString(m) }
+func (*Misbehaviour) ProtoMessage() {}
+func (*Misbehaviour) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{3}
+}
+func (m *Misbehaviour) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Misbehaviour) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Misbehaviour.Merge(m, src)
+}
+func (m *Misbehaviour) XXX_Size() int {
+ return m.Size()
+}
+func (m *Misbehaviour) XXX_DiscardUnknown() {
+ xxx_messageInfo_Misbehaviour.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo
+
+// SignatureAndData contains a signature and the data signed over to create that
+// signature.
+type SignatureAndData struct {
+ Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"`
+ DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+ Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *SignatureAndData) Reset() { *m = SignatureAndData{} }
+func (m *SignatureAndData) String() string { return proto.CompactTextString(m) }
+func (*SignatureAndData) ProtoMessage() {}
+func (*SignatureAndData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{4}
+}
+func (m *SignatureAndData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignatureAndData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SignatureAndData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SignatureAndData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignatureAndData.Merge(m, src)
+}
+func (m *SignatureAndData) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignatureAndData) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignatureAndData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignatureAndData proto.InternalMessageInfo
+
+// TimestampedSignatureData contains the signature data and the timestamp of the
+// signature.
+type TimestampedSignatureData struct {
+ SignatureData []byte `protobuf:"bytes,1,opt,name=signature_data,json=signatureData,proto3" json:"signature_data,omitempty" yaml:"signature_data"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+}
+
+func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureData{} }
+func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) }
+func (*TimestampedSignatureData) ProtoMessage() {}
+func (*TimestampedSignatureData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{5}
+}
+func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TimestampedSignatureData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TimestampedSignatureData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TimestampedSignatureData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimestampedSignatureData.Merge(m, src)
+}
+func (m *TimestampedSignatureData) XXX_Size() int {
+ return m.Size()
+}
+func (m *TimestampedSignatureData) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimestampedSignatureData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimestampedSignatureData proto.InternalMessageInfo
+
+// SignBytes defines the signed bytes used for signature verification.
+type SignBytes struct {
+ Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"`
+ Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"`
+ // type of the data used
+ DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibc.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"`
+ // marshaled data
+ Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *SignBytes) Reset() { *m = SignBytes{} }
+func (m *SignBytes) String() string { return proto.CompactTextString(m) }
+func (*SignBytes) ProtoMessage() {}
+func (*SignBytes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{6}
+}
+func (m *SignBytes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SignBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SignBytes.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SignBytes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SignBytes.Merge(m, src)
+}
+func (m *SignBytes) XXX_Size() int {
+ return m.Size()
+}
+func (m *SignBytes) XXX_DiscardUnknown() {
+ xxx_messageInfo_SignBytes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SignBytes proto.InternalMessageInfo
+
+// HeaderData returns the SignBytes data for update verification.
+type HeaderData struct {
+ // header public key
+ NewPubKey *types.Any `protobuf:"bytes,1,opt,name=new_pub_key,json=newPubKey,proto3" json:"new_pub_key,omitempty" yaml:"new_pub_key"`
+ // header diversifier
+ NewDiversifier string `protobuf:"bytes,2,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"`
+}
+
+func (m *HeaderData) Reset() { *m = HeaderData{} }
+func (m *HeaderData) String() string { return proto.CompactTextString(m) }
+func (*HeaderData) ProtoMessage() {}
+func (*HeaderData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{7}
+}
+func (m *HeaderData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HeaderData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HeaderData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HeaderData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HeaderData.Merge(m, src)
+}
+func (m *HeaderData) XXX_Size() int {
+ return m.Size()
+}
+func (m *HeaderData) XXX_DiscardUnknown() {
+ xxx_messageInfo_HeaderData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HeaderData proto.InternalMessageInfo
+
+// ClientStateData returns the SignBytes data for client state verification.
+type ClientStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"`
+}
+
+func (m *ClientStateData) Reset() { *m = ClientStateData{} }
+func (m *ClientStateData) String() string { return proto.CompactTextString(m) }
+func (*ClientStateData) ProtoMessage() {}
+func (*ClientStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{8}
+}
+func (m *ClientStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClientStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClientStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClientStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientStateData.Merge(m, src)
+}
+func (m *ClientStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClientStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientStateData proto.InternalMessageInfo
+
+// ConsensusStateData returns the SignBytes data for consensus state
+// verification.
+type ConsensusStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"`
+}
+
+func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} }
+func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) }
+func (*ConsensusStateData) ProtoMessage() {}
+func (*ConsensusStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{9}
+}
+func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConsensusStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConsensusStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConsensusStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConsensusStateData.Merge(m, src)
+}
+func (m *ConsensusStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConsensusStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConsensusStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConsensusStateData proto.InternalMessageInfo
+
+// ConnectionStateData returns the SignBytes data for connection state
+// verification.
+type ConnectionStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Connection *types1.ConnectionEnd `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"`
+}
+
+func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} }
+func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) }
+func (*ConnectionStateData) ProtoMessage() {}
+func (*ConnectionStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{10}
+}
+func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConnectionStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConnectionStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConnectionStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionStateData.Merge(m, src)
+}
+func (m *ConnectionStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConnectionStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionStateData proto.InternalMessageInfo
+
+// ChannelStateData returns the SignBytes data for channel state
+// verification.
+type ChannelStateData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Channel *types2.Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"`
+}
+
+func (m *ChannelStateData) Reset() { *m = ChannelStateData{} }
+func (m *ChannelStateData) String() string { return proto.CompactTextString(m) }
+func (*ChannelStateData) ProtoMessage() {}
+func (*ChannelStateData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{11}
+}
+func (m *ChannelStateData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ChannelStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ChannelStateData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ChannelStateData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ChannelStateData.Merge(m, src)
+}
+func (m *ChannelStateData) XXX_Size() int {
+ return m.Size()
+}
+func (m *ChannelStateData) XXX_DiscardUnknown() {
+ xxx_messageInfo_ChannelStateData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChannelStateData proto.InternalMessageInfo
+
+// PacketCommitmentData returns the SignBytes data for packet commitment
+// verification.
+type PacketCommitmentData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Commitment []byte `protobuf:"bytes,2,opt,name=commitment,proto3" json:"commitment,omitempty"`
+}
+
+func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} }
+func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) }
+func (*PacketCommitmentData) ProtoMessage() {}
+func (*PacketCommitmentData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{12}
+}
+func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketCommitmentData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketCommitmentData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketCommitmentData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketCommitmentData.Merge(m, src)
+}
+func (m *PacketCommitmentData) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketCommitmentData) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketCommitmentData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketCommitmentData proto.InternalMessageInfo
+
+func (m *PacketCommitmentData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *PacketCommitmentData) GetCommitment() []byte {
+ if m != nil {
+ return m.Commitment
+ }
+ return nil
+}
+
+// PacketAcknowledgementData returns the SignBytes data for acknowledgement
+// verification.
+type PacketAcknowledgementData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"`
+}
+
+func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgementData{} }
+func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) }
+func (*PacketAcknowledgementData) ProtoMessage() {}
+func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{13}
+}
+func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketAcknowledgementData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketAcknowledgementData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketAcknowledgementData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketAcknowledgementData.Merge(m, src)
+}
+func (m *PacketAcknowledgementData) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketAcknowledgementData) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketAcknowledgementData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketAcknowledgementData proto.InternalMessageInfo
+
+func (m *PacketAcknowledgementData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *PacketAcknowledgementData) GetAcknowledgement() []byte {
+ if m != nil {
+ return m.Acknowledgement
+ }
+ return nil
+}
+
+// PacketReceiptAbsenceData returns the SignBytes data for
+// packet receipt absence verification.
+type PacketReceiptAbsenceData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceData{} }
+func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) }
+func (*PacketReceiptAbsenceData) ProtoMessage() {}
+func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{14}
+}
+func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PacketReceiptAbsenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PacketReceiptAbsenceData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PacketReceiptAbsenceData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PacketReceiptAbsenceData.Merge(m, src)
+}
+func (m *PacketReceiptAbsenceData) XXX_Size() int {
+ return m.Size()
+}
+func (m *PacketReceiptAbsenceData) XXX_DiscardUnknown() {
+ xxx_messageInfo_PacketReceiptAbsenceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketReceiptAbsenceData proto.InternalMessageInfo
+
+func (m *PacketReceiptAbsenceData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+// NextSequenceRecvData returns the SignBytes data for verification of the next
+// sequence to be received.
+type NextSequenceRecvData struct {
+ Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ NextSeqRecv uint64 `protobuf:"varint,2,opt,name=next_seq_recv,json=nextSeqRecv,proto3" json:"next_seq_recv,omitempty" yaml:"next_seq_recv"`
+}
+
+func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} }
+func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) }
+func (*NextSequenceRecvData) ProtoMessage() {}
+func (*NextSequenceRecvData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cc2ee18f7f86d4e, []int{15}
+}
+func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NextSequenceRecvData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_NextSequenceRecvData.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *NextSequenceRecvData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NextSequenceRecvData.Merge(m, src)
+}
+func (m *NextSequenceRecvData) XXX_Size() int {
+ return m.Size()
+}
+func (m *NextSequenceRecvData) XXX_DiscardUnknown() {
+ xxx_messageInfo_NextSequenceRecvData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NextSequenceRecvData proto.InternalMessageInfo
+
+func (m *NextSequenceRecvData) GetPath() []byte {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 {
+ if m != nil {
+ return m.NextSeqRecv
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("ibc.lightclients.solomachine.v1.DataType", DataType_name, DataType_value)
+ proto.RegisterType((*ClientState)(nil), "ibc.lightclients.solomachine.v1.ClientState")
+ proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.solomachine.v1.ConsensusState")
+ proto.RegisterType((*Header)(nil), "ibc.lightclients.solomachine.v1.Header")
+ proto.RegisterType((*Misbehaviour)(nil), "ibc.lightclients.solomachine.v1.Misbehaviour")
+ proto.RegisterType((*SignatureAndData)(nil), "ibc.lightclients.solomachine.v1.SignatureAndData")
+ proto.RegisterType((*TimestampedSignatureData)(nil), "ibc.lightclients.solomachine.v1.TimestampedSignatureData")
+ proto.RegisterType((*SignBytes)(nil), "ibc.lightclients.solomachine.v1.SignBytes")
+ proto.RegisterType((*HeaderData)(nil), "ibc.lightclients.solomachine.v1.HeaderData")
+ proto.RegisterType((*ClientStateData)(nil), "ibc.lightclients.solomachine.v1.ClientStateData")
+ proto.RegisterType((*ConsensusStateData)(nil), "ibc.lightclients.solomachine.v1.ConsensusStateData")
+ proto.RegisterType((*ConnectionStateData)(nil), "ibc.lightclients.solomachine.v1.ConnectionStateData")
+ proto.RegisterType((*ChannelStateData)(nil), "ibc.lightclients.solomachine.v1.ChannelStateData")
+ proto.RegisterType((*PacketCommitmentData)(nil), "ibc.lightclients.solomachine.v1.PacketCommitmentData")
+ proto.RegisterType((*PacketAcknowledgementData)(nil), "ibc.lightclients.solomachine.v1.PacketAcknowledgementData")
+ proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibc.lightclients.solomachine.v1.PacketReceiptAbsenceData")
+ proto.RegisterType((*NextSequenceRecvData)(nil), "ibc.lightclients.solomachine.v1.NextSequenceRecvData")
+}
+
+func init() {
+ proto.RegisterFile("ibc/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_6cc2ee18f7f86d4e)
+}
+
+var fileDescriptor_6cc2ee18f7f86d4e = []byte{
+ // 1368 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdf, 0x8e, 0xdb, 0x54,
+ 0x13, 0x5f, 0xa7, 0xe9, 0x76, 0x33, 0xd9, 0xee, 0xe6, 0x73, 0xd3, 0x36, 0xeb, 0x56, 0x89, 0x3f,
+ 0x7f, 0xfa, 0xca, 0x82, 0x68, 0xd2, 0x5d, 0x44, 0x85, 0x0a, 0x02, 0x1c, 0xc7, 0xd0, 0xb4, 0xbb,
+ 0xde, 0xe0, 0x38, 0x40, 0x2b, 0x24, 0xcb, 0x71, 0xce, 0x26, 0x56, 0x13, 0x9f, 0x10, 0x3b, 0x49,
+ 0x83, 0x84, 0x84, 0xb8, 0x2a, 0x11, 0x17, 0xbc, 0x40, 0x24, 0x04, 0xe2, 0x55, 0x80, 0xcb, 0x72,
+ 0xc7, 0x55, 0x40, 0xed, 0x1b, 0xe4, 0x09, 0x90, 0x7d, 0x4e, 0x62, 0x3b, 0xdb, 0xcd, 0x8a, 0x7f,
+ 0x77, 0xe7, 0xcc, 0xfc, 0xe6, 0x37, 0x73, 0x66, 0xc6, 0x73, 0x8e, 0x61, 0xcf, 0xaa, 0x9b, 0x85,
+ 0xb6, 0xd5, 0x6c, 0xb9, 0x66, 0xdb, 0x42, 0xb6, 0xeb, 0x14, 0x1c, 0xdc, 0xc6, 0x1d, 0xc3, 0x6c,
+ 0x59, 0x36, 0x2a, 0x0c, 0xf6, 0xc2, 0xdb, 0x7c, 0xb7, 0x87, 0x5d, 0xcc, 0xe6, 0xac, 0xba, 0x99,
+ 0x0f, 0x9b, 0xe4, 0xc3, 0x98, 0xc1, 0x1e, 0xf7, 0x92, 0xc7, 0x69, 0xe2, 0x1e, 0x2a, 0x98, 0xd8,
+ 0xb6, 0x91, 0xe9, 0x5a, 0xd8, 0xf6, 0xa8, 0x82, 0x1d, 0x61, 0xe2, 0xfe, 0x1b, 0x00, 0x5b, 0x86,
+ 0x6d, 0xa3, 0xb6, 0x8f, 0x22, 0x4b, 0x0a, 0x49, 0x37, 0x71, 0x13, 0xfb, 0xcb, 0x82, 0xb7, 0xa2,
+ 0xd2, 0x9d, 0x26, 0xc6, 0xcd, 0x36, 0x2a, 0xf8, 0xbb, 0x7a, 0xff, 0xb8, 0x60, 0xd8, 0x23, 0xa2,
+ 0x12, 0x7e, 0x89, 0x41, 0x52, 0xf2, 0xe3, 0xaa, 0xba, 0x86, 0x8b, 0x58, 0x0e, 0x36, 0x1c, 0xf4,
+ 0x69, 0x1f, 0xd9, 0x26, 0xca, 0x30, 0x3c, 0xb3, 0x1b, 0x57, 0x17, 0x7b, 0x56, 0x82, 0xed, 0xe3,
+ 0x1e, 0xfe, 0x0c, 0xd9, 0xfa, 0x02, 0x12, 0xf3, 0x20, 0x45, 0x6e, 0x36, 0xcd, 0x5d, 0x19, 0x19,
+ 0x9d, 0xf6, 0x1d, 0x61, 0x09, 0x20, 0xa8, 0x5b, 0x44, 0x52, 0x9d, 0x93, 0xb8, 0xb0, 0x6d, 0x62,
+ 0xdb, 0x41, 0xb6, 0xd3, 0x77, 0x74, 0xc7, 0xf3, 0x99, 0x39, 0xc7, 0x33, 0xbb, 0xc9, 0xfd, 0x42,
+ 0xfe, 0x8c, 0x44, 0xe5, 0xa5, 0xb9, 0x9d, 0x1f, 0x6a, 0xd8, 0xeb, 0x12, 0xa3, 0xa0, 0x6e, 0x99,
+ 0x11, 0x2c, 0x8b, 0xe0, 0x9a, 0xd1, 0x6e, 0xe3, 0xa1, 0xde, 0xef, 0x36, 0x0c, 0x17, 0xe9, 0xc6,
+ 0xb1, 0x8b, 0x7a, 0x7a, 0xb7, 0x87, 0xbb, 0xd8, 0x31, 0xda, 0x99, 0x38, 0xcf, 0xec, 0x6e, 0x14,
+ 0x6f, 0xcc, 0xa6, 0x39, 0x81, 0x10, 0xae, 0x00, 0x0b, 0x6a, 0xc6, 0xd7, 0xd6, 0x7c, 0xa5, 0xe8,
+ 0xe9, 0x2a, 0x54, 0x75, 0x27, 0xfe, 0xe4, 0xdb, 0xdc, 0x9a, 0xf0, 0x1d, 0x03, 0x5b, 0xd1, 0x58,
+ 0xd9, 0x7b, 0x00, 0xdd, 0x7e, 0xbd, 0x6d, 0x99, 0xfa, 0x23, 0x34, 0xf2, 0x13, 0x9b, 0xdc, 0x4f,
+ 0xe7, 0x49, 0x59, 0xf2, 0xf3, 0xb2, 0xe4, 0x45, 0x7b, 0x54, 0xbc, 0x3c, 0x9b, 0xe6, 0xfe, 0x43,
+ 0x82, 0x08, 0x2c, 0x04, 0x35, 0x41, 0x36, 0xf7, 0xd1, 0x88, 0xe5, 0x21, 0xd9, 0xb0, 0x06, 0xa8,
+ 0xe7, 0x58, 0xc7, 0x16, 0xea, 0xf9, 0x25, 0x48, 0xa8, 0x61, 0x11, 0x7b, 0x1d, 0x12, 0xae, 0xd5,
+ 0x41, 0x8e, 0x6b, 0x74, 0xba, 0x7e, 0x76, 0xe3, 0x6a, 0x20, 0xa0, 0x41, 0x7e, 0x19, 0x83, 0xf5,
+ 0xbb, 0xc8, 0x68, 0xa0, 0xde, 0xca, 0x9a, 0x47, 0xa8, 0x62, 0x4b, 0x54, 0x9e, 0xd6, 0xb1, 0x9a,
+ 0xb6, 0xe1, 0xf6, 0x7b, 0xa4, 0x8c, 0x9b, 0x6a, 0x20, 0x60, 0x6b, 0xb0, 0x65, 0xa3, 0xa1, 0x1e,
+ 0x3a, 0x78, 0x7c, 0xc5, 0xc1, 0x77, 0x66, 0xd3, 0xdc, 0x65, 0x72, 0xf0, 0xa8, 0x95, 0xa0, 0x6e,
+ 0xda, 0x68, 0x58, 0x59, 0x9c, 0x5f, 0x82, 0x6d, 0x0f, 0x10, 0xce, 0xc1, 0x79, 0x2f, 0x07, 0xe1,
+ 0x86, 0x58, 0x02, 0x08, 0xaa, 0x17, 0x49, 0x29, 0x10, 0xd0, 0x24, 0xfc, 0x14, 0x83, 0xcd, 0x43,
+ 0xcb, 0xa9, 0xa3, 0x96, 0x31, 0xb0, 0x70, 0xbf, 0xc7, 0xee, 0x41, 0x82, 0x34, 0x9f, 0x6e, 0x35,
+ 0xfc, 0x5c, 0x24, 0x8a, 0xe9, 0xd9, 0x34, 0x97, 0xa2, 0x6d, 0x36, 0x57, 0x09, 0xea, 0x06, 0x59,
+ 0x97, 0x1b, 0x91, 0xec, 0xc5, 0x96, 0xb2, 0xd7, 0x85, 0x8b, 0x8b, 0x74, 0xe8, 0xd8, 0x9e, 0xb7,
+ 0xfa, 0xde, 0x99, 0xad, 0x5e, 0x9d, 0x5b, 0x89, 0x76, 0xa3, 0x64, 0xb8, 0x46, 0x31, 0x33, 0x9b,
+ 0xe6, 0xd2, 0x24, 0x8a, 0x08, 0xa3, 0xa0, 0x6e, 0x2e, 0xf6, 0x47, 0xf6, 0x92, 0x47, 0x77, 0x88,
+ 0x69, 0xca, 0xff, 0x29, 0x8f, 0xee, 0x10, 0x87, 0x3d, 0x6a, 0x43, 0x4c, 0x33, 0xf9, 0x23, 0x03,
+ 0xa9, 0x65, 0x8a, 0x68, 0x7b, 0x30, 0xcb, 0xed, 0xf1, 0x09, 0x24, 0x1a, 0x86, 0x6b, 0xe8, 0xee,
+ 0xa8, 0x4b, 0x32, 0xb7, 0xb5, 0xff, 0xf2, 0x99, 0x61, 0x7a, 0xbc, 0xda, 0xa8, 0x8b, 0xc2, 0x65,
+ 0x59, 0xb0, 0x08, 0xea, 0x46, 0x83, 0xea, 0x59, 0x16, 0xe2, 0xde, 0x9a, 0x76, 0xa5, 0xbf, 0x8e,
+ 0x36, 0x73, 0xfc, 0xc5, 0xdf, 0xc5, 0x17, 0x0c, 0x64, 0xb4, 0xb9, 0x0c, 0x35, 0x16, 0x67, 0xf2,
+ 0x0f, 0xf4, 0x2e, 0x6c, 0x05, 0xb9, 0xf0, 0xe9, 0xfd, 0x53, 0x85, 0x7b, 0x37, 0xaa, 0x17, 0xd4,
+ 0xa0, 0x1c, 0xa5, 0x13, 0x21, 0xc4, 0x5e, 0x1c, 0xc2, 0x6f, 0x0c, 0x24, 0x3c, 0xbf, 0xc5, 0x91,
+ 0x8b, 0x9c, 0xbf, 0xf1, 0x75, 0x2e, 0x0d, 0x8a, 0x73, 0x27, 0x07, 0x45, 0xa4, 0x04, 0xf1, 0x7f,
+ 0xab, 0x04, 0xe7, 0x83, 0x12, 0xd0, 0x13, 0xfe, 0xc0, 0x00, 0x90, 0xe1, 0xe3, 0x27, 0xe5, 0x00,
+ 0x92, 0xf4, 0x93, 0x3f, 0x73, 0x3c, 0x5e, 0x99, 0x4d, 0x73, 0x6c, 0x64, 0x4a, 0xd0, 0xf9, 0x48,
+ 0x46, 0xc4, 0x29, 0xf3, 0x21, 0xf6, 0x17, 0xe7, 0xc3, 0xe7, 0xb0, 0x1d, 0xba, 0x1c, 0xfd, 0x58,
+ 0x59, 0x88, 0x77, 0x0d, 0xb7, 0x45, 0xdb, 0xd9, 0x5f, 0xb3, 0x15, 0xd8, 0xa4, 0xa3, 0x81, 0x5c,
+ 0x68, 0xb1, 0x15, 0x07, 0xb8, 0x3a, 0x9b, 0xe6, 0x2e, 0x45, 0xc6, 0x09, 0xbd, 0xb2, 0x92, 0x66,
+ 0xe0, 0x89, 0xba, 0xff, 0x8a, 0x01, 0x36, 0x7a, 0x91, 0x9c, 0x1a, 0xc2, 0x83, 0x93, 0xd7, 0xea,
+ 0xaa, 0x28, 0xfe, 0xc4, 0xdd, 0x49, 0x63, 0x19, 0xc0, 0x25, 0x69, 0xf1, 0x20, 0x59, 0x1d, 0x8b,
+ 0x0c, 0x10, 0xbc, 0x5d, 0x68, 0x18, 0xff, 0xf7, 0xdb, 0xca, 0x7b, 0xbc, 0xe4, 0x43, 0xef, 0x1a,
+ 0x72, 0xa9, 0xd3, 0x9d, 0x6c, 0x37, 0xd4, 0x90, 0x21, 0xf5, 0xdb, 0x80, 0x94, 0x44, 0x9e, 0x38,
+ 0xab, 0x9d, 0xde, 0x86, 0x0b, 0xf4, 0x29, 0x44, 0x3d, 0x5e, 0x0f, 0x79, 0xa4, 0x6f, 0x24, 0xcf,
+ 0x1d, 0x59, 0xaa, 0x73, 0x30, 0xf5, 0x72, 0x0f, 0xd2, 0x15, 0xc3, 0x7c, 0x84, 0x5c, 0x09, 0x77,
+ 0x3a, 0x96, 0xdb, 0x41, 0xb6, 0x7b, 0xaa, 0xa7, 0xac, 0x77, 0xbc, 0x39, 0xca, 0x77, 0xb6, 0xa9,
+ 0x86, 0x24, 0xc2, 0x03, 0xd8, 0x21, 0x5c, 0xa2, 0xf9, 0xc8, 0xc6, 0xc3, 0x36, 0x6a, 0x34, 0xd1,
+ 0x4a, 0xc2, 0x5d, 0xd8, 0x36, 0xa2, 0x50, 0xca, 0xba, 0x2c, 0x16, 0xf2, 0x90, 0x21, 0xd4, 0x2a,
+ 0x32, 0x91, 0xd5, 0x75, 0xc5, 0xba, 0xe3, 0xcd, 0x81, 0xd3, 0x98, 0x85, 0x16, 0xa4, 0x15, 0xf4,
+ 0xd8, 0x9d, 0x3f, 0xbe, 0x54, 0x64, 0x0e, 0x4e, 0x8d, 0xe2, 0x2d, 0xb8, 0x68, 0xa3, 0xc7, 0xae,
+ 0xf7, 0x74, 0xd3, 0x7b, 0xc8, 0x1c, 0xd0, 0xb7, 0x5d, 0xe8, 0x1a, 0x88, 0xa8, 0x05, 0x35, 0x69,
+ 0x13, 0x6a, 0x8f, 0xf5, 0x95, 0xaf, 0xe3, 0xb0, 0x31, 0x1f, 0x0c, 0xec, 0x1b, 0xf0, 0xbf, 0x92,
+ 0xa8, 0x89, 0xba, 0xf6, 0xa0, 0x22, 0xeb, 0x35, 0xa5, 0xac, 0x94, 0xb5, 0xb2, 0x78, 0x50, 0x7e,
+ 0x28, 0x97, 0xf4, 0x9a, 0x52, 0xad, 0xc8, 0x52, 0xf9, 0xbd, 0xb2, 0x5c, 0x4a, 0xad, 0x71, 0xdb,
+ 0xe3, 0x09, 0x9f, 0x0c, 0x89, 0xd8, 0x1b, 0x70, 0x25, 0xb0, 0x94, 0x0e, 0xca, 0xb2, 0xa2, 0xe9,
+ 0x55, 0x4d, 0xd4, 0xe4, 0x14, 0xc3, 0xc1, 0x78, 0xc2, 0xaf, 0x13, 0x19, 0xfb, 0x2a, 0xec, 0x84,
+ 0x70, 0x47, 0x4a, 0x55, 0x56, 0xaa, 0xb5, 0x2a, 0x85, 0xc6, 0xb8, 0x8b, 0xe3, 0x09, 0x9f, 0x58,
+ 0x88, 0xd9, 0x3c, 0x70, 0x11, 0xb4, 0x22, 0x4b, 0x5a, 0xf9, 0x48, 0xa1, 0xf0, 0x73, 0xdc, 0xd6,
+ 0x78, 0xc2, 0x43, 0x20, 0x67, 0x77, 0xe1, 0x6a, 0x08, 0x7f, 0x57, 0x54, 0x14, 0xf9, 0x80, 0x82,
+ 0xe3, 0x5c, 0x72, 0x3c, 0xe1, 0x2f, 0x50, 0x21, 0xfb, 0x3a, 0x5c, 0x0b, 0x90, 0x15, 0x51, 0xba,
+ 0x2f, 0x6b, 0xba, 0x74, 0x74, 0x78, 0x58, 0xd6, 0x0e, 0x65, 0x45, 0x4b, 0x9d, 0xe7, 0xd2, 0xe3,
+ 0x09, 0x9f, 0x22, 0x8a, 0x40, 0xce, 0xbe, 0x03, 0xfc, 0x09, 0x33, 0x51, 0xba, 0xaf, 0x1c, 0x7d,
+ 0x74, 0x20, 0x97, 0xde, 0x97, 0x7d, 0xdb, 0x75, 0x6e, 0x67, 0x3c, 0xe1, 0x2f, 0x13, 0xed, 0x92,
+ 0x92, 0x7d, 0xfb, 0x05, 0x04, 0xaa, 0x2c, 0xc9, 0xe5, 0x8a, 0xa6, 0x8b, 0xc5, 0xaa, 0xac, 0x48,
+ 0x72, 0xea, 0x02, 0x97, 0x19, 0x4f, 0xf8, 0x34, 0xd1, 0x52, 0x25, 0xd5, 0xb1, 0xb7, 0xe1, 0x7a,
+ 0x60, 0xaf, 0xc8, 0x1f, 0x6b, 0x7a, 0x55, 0xfe, 0xa0, 0xe6, 0xa9, 0x3c, 0x9a, 0x0f, 0x53, 0x1b,
+ 0x24, 0x70, 0x4f, 0x33, 0x57, 0x78, 0x72, 0x96, 0x87, 0x54, 0x60, 0x77, 0x57, 0x16, 0x4b, 0xb2,
+ 0x9a, 0x4a, 0x90, 0xca, 0x90, 0x1d, 0x17, 0x7f, 0xf2, 0x7d, 0x76, 0xad, 0x58, 0xfb, 0xf9, 0x59,
+ 0x96, 0x79, 0xfa, 0x2c, 0xcb, 0xfc, 0xfe, 0x2c, 0xcb, 0x7c, 0xf3, 0x3c, 0xbb, 0xf6, 0xf4, 0x79,
+ 0x76, 0xed, 0xd7, 0xe7, 0xd9, 0xb5, 0x87, 0x6f, 0x36, 0x2d, 0xb7, 0xd5, 0xaf, 0xe7, 0x4d, 0xdc,
+ 0x29, 0x98, 0xd8, 0xe9, 0x60, 0xa7, 0x60, 0xd5, 0xcd, 0x9b, 0x4d, 0x5c, 0xe8, 0xe0, 0x46, 0xbf,
+ 0x8d, 0x1c, 0xf2, 0x87, 0x73, 0x6b, 0xff, 0x26, 0x99, 0x87, 0x85, 0x36, 0x6a, 0x1a, 0xe6, 0xa8,
+ 0x30, 0xd8, 0xbb, 0x75, 0xab, 0xbe, 0xee, 0x0f, 0xb1, 0xd7, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff,
+ 0xef, 0x59, 0x70, 0x2d, 0x87, 0x0d, 0x00, 0x00,
+}
+
+func (m *ClientState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.AllowUpdateAfterProposal {
+ i--
+ if m.AllowUpdateAfterProposal {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.FrozenSequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.FrozenSequence))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConsensusState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Diversifier) > 0 {
+ i -= len(m.Diversifier)
+ copy(dAtA[i:], m.Diversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.PublicKey != nil {
+ {
+ size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Header) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Header) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NewDiversifier) > 0 {
+ i -= len(m.NewDiversifier)
+ copy(dAtA[i:], m.NewDiversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NewPublicKey != nil {
+ {
+ size, err := m.NewPublicKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Signature) > 0 {
+ i -= len(m.Signature)
+ copy(dAtA[i:], m.Signature)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Misbehaviour) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.SignatureTwo != nil {
+ {
+ size, err := m.SignatureTwo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.SignatureOne != nil {
+ {
+ size, err := m.SignatureOne.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.ClientId) > 0 {
+ i -= len(m.ClientId)
+ copy(dAtA[i:], m.ClientId)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.ClientId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SignatureAndData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignatureAndData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignatureAndData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.DataType != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Signature) > 0 {
+ i -= len(m.Signature)
+ copy(dAtA[i:], m.Signature)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TimestampedSignatureData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TimestampedSignatureData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TimestampedSignatureData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.SignatureData) > 0 {
+ i -= len(m.SignatureData)
+ copy(dAtA[i:], m.SignatureData)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.SignatureData)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SignBytes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SignBytes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SignBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DataType != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.Diversifier) > 0 {
+ i -= len(m.Diversifier)
+ copy(dAtA[i:], m.Diversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Timestamp != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Sequence != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HeaderData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HeaderData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HeaderData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.NewDiversifier) > 0 {
+ i -= len(m.NewDiversifier)
+ copy(dAtA[i:], m.NewDiversifier)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.NewPubKey != nil {
+ {
+ size, err := m.NewPubKey.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClientStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClientStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ClientState != nil {
+ {
+ size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConsensusStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConsensusStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConsensusStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ConsensusState != nil {
+ {
+ size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConnectionStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConnectionStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConnectionStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Connection != nil {
+ {
+ size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ChannelStateData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ChannelStateData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ChannelStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Channel != nil {
+ {
+ size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintSolomachine(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketCommitmentData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketCommitmentData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketCommitmentData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Commitment) > 0 {
+ i -= len(m.Commitment)
+ copy(dAtA[i:], m.Commitment)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Commitment)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketAcknowledgementData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketAcknowledgementData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketAcknowledgementData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Acknowledgement) > 0 {
+ i -= len(m.Acknowledgement)
+ copy(dAtA[i:], m.Acknowledgement)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Acknowledgement)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PacketReceiptAbsenceData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PacketReceiptAbsenceData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PacketReceiptAbsenceData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NextSequenceRecvData) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NextSequenceRecvData) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NextSequenceRecvData) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NextSeqRecv != 0 {
+ i = encodeVarintSolomachine(dAtA, i, uint64(m.NextSeqRecv))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintSolomachine(dAtA []byte, offset int, v uint64) int {
+ offset -= sovSolomachine(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ClientState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.FrozenSequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.FrozenSequence))
+ }
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.AllowUpdateAfterProposal {
+ n += 2
+ }
+ return n
+}
+
+func (m *ConsensusState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.PublicKey != nil {
+ l = m.PublicKey.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.Diversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *Header) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ l = len(m.Signature)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.NewPublicKey != nil {
+ l = m.NewPublicKey.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.NewDiversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *Misbehaviour) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ClientId)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.SignatureOne != nil {
+ l = m.SignatureOne.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.SignatureTwo != nil {
+ l = m.SignatureTwo.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *SignatureAndData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Signature)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.DataType != 0 {
+ n += 1 + sovSolomachine(uint64(m.DataType))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *TimestampedSignatureData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignatureData)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *SignBytes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Sequence != 0 {
+ n += 1 + sovSolomachine(uint64(m.Sequence))
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovSolomachine(uint64(m.Timestamp))
+ }
+ l = len(m.Diversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.DataType != 0 {
+ n += 1 + sovSolomachine(uint64(m.DataType))
+ }
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *HeaderData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NewPubKey != nil {
+ l = m.NewPubKey.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.NewDiversifier)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ClientStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.ClientState != nil {
+ l = m.ClientState.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ConsensusStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.ConsensusState != nil {
+ l = m.ConsensusState.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ConnectionStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Connection != nil {
+ l = m.Connection.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *ChannelStateData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.Channel != nil {
+ l = m.Channel.Size()
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *PacketCommitmentData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.Commitment)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *PacketAcknowledgementData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ l = len(m.Acknowledgement)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *PacketReceiptAbsenceData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ return n
+}
+
+func (m *NextSequenceRecvData) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + sovSolomachine(uint64(l))
+ }
+ if m.NextSeqRecv != 0 {
+ n += 1 + sovSolomachine(uint64(m.NextSeqRecv))
+ }
+ return n
+}
+
+func sovSolomachine(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozSolomachine(x uint64) (n int) {
+ return sovSolomachine(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ClientState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FrozenSequence", wireType)
+ }
+ m.FrozenSequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.FrozenSequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &ConsensusState{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterProposal", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowUpdateAfterProposal = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConsensusState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PublicKey == nil {
+ m.PublicKey = &types.Any{}
+ }
+ if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Diversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Header) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Header: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
+ if m.Signature == nil {
+ m.Signature = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewPublicKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NewPublicKey == nil {
+ m.NewPublicKey = &types.Any{}
+ }
+ if err := m.NewPublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NewDiversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Misbehaviour) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientId = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureOne", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SignatureOne == nil {
+ m.SignatureOne = &SignatureAndData{}
+ }
+ if err := m.SignatureOne.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureTwo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SignatureTwo == nil {
+ m.SignatureTwo = &SignatureAndData{}
+ }
+ if err := m.SignatureTwo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignatureAndData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignatureAndData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignatureAndData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...)
+ if m.Signature == nil {
+ m.Signature = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType)
+ }
+ m.DataType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DataType |= DataType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TimestampedSignatureData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimestampedSignatureData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimestampedSignatureData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignatureData", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignatureData = append(m.SignatureData[:0], dAtA[iNdEx:postIndex]...)
+ if m.SignatureData == nil {
+ m.SignatureData = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SignBytes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SignBytes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SignBytes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType)
+ }
+ m.Sequence = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Sequence |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Diversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType)
+ }
+ m.DataType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DataType |= DataType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HeaderData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HeaderData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HeaderData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewPubKey", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NewPubKey == nil {
+ m.NewPubKey = &types.Any{}
+ }
+ if err := m.NewPubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.NewDiversifier = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClientStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClientStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClientStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClientState == nil {
+ m.ClientState = &types.Any{}
+ }
+ if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConsensusStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConsensusStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConsensusStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConsensusState == nil {
+ m.ConsensusState = &types.Any{}
+ }
+ if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConnectionStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConnectionStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConnectionStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Connection == nil {
+ m.Connection = &types1.ConnectionEnd{}
+ }
+ if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ChannelStateData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ChannelStateData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ChannelStateData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Channel == nil {
+ m.Channel = &types2.Channel{}
+ }
+ if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketCommitmentData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketCommitmentData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketCommitmentData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...)
+ if m.Commitment == nil {
+ m.Commitment = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketAcknowledgementData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketAcknowledgementData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketAcknowledgementData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...)
+ if m.Acknowledgement == nil {
+ m.Acknowledgement = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PacketReceiptAbsenceData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PacketReceiptAbsenceData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PacketReceiptAbsenceData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NextSequenceRecvData) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NextSequenceRecvData: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NextSequenceRecvData: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...)
+ if m.Path == nil {
+ m.Path = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NextSeqRecv", wireType)
+ }
+ m.NextSeqRecv = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NextSeqRecv |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSolomachine(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSolomachine
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSolomachine(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSolomachine
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthSolomachine
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupSolomachine
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSolomachine
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthSolomachine = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSolomachine = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSolomachine = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/modules/core/02-client/legacy/v100/store.go b/modules/core/02-client/legacy/v100/store.go
new file mode 100644
index 00000000..842086f9
--- /dev/null
+++ b/modules/core/02-client/legacy/v100/store.go
@@ -0,0 +1,180 @@
+package v100
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/cosmos/cosmos-sdk/codec"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ "github.com/cosmos/cosmos-sdk/store/prefix"
+ sdk "github.com/cosmos/cosmos-sdk/types"
+ sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
+
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ smtypes "github.com/cosmos/ibc-go/modules/light-clients/06-solomachine/types"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+)
+
+// MigrateStore performs in-place store migrations from SDK v0.40 of the IBC module to v1.0.0 of ibc-go.
+// The migration includes:
+//
+// - Migrating solo machine client states from v1 to v2 protobuf definition
+// - Pruning all solo machine consensus states
+// - Pruning expired tendermint consensus states
+// - Adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states
+func MigrateStore(ctx sdk.Context, storeKey sdk.StoreKey, cdc codec.BinaryCodec) (err error) {
+ store := ctx.KVStore(storeKey)
+ iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix)
+
+ var clients []string
+
+ // collect all clients
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ if keySplit[len(keySplit)-1] != host.KeyClientState {
+ continue
+ }
+
+ // key is clients/{clientid}/clientState
+ // Thus, keySplit[1] is clientID
+ clients = append(clients, keySplit[1])
+ }
+
+ for _, clientID := range clients {
+ clientType, _, err := types.ParseClientIdentifier(clientID)
+ if err != nil {
+ return err
+ }
+
+ clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID))
+ clientStore := prefix.NewStore(ctx.KVStore(storeKey), clientPrefix)
+
+ bz := clientStore.Get(host.ClientStateKey())
+ if bz == nil {
+ return clienttypes.ErrClientNotFound
+ }
+
+ switch clientType {
+ case exported.Solomachine:
+ any := &codectypes.Any{}
+ if err := cdc.Unmarshal(bz, any); err != nil {
+ return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state")
+ }
+
+ clientState := &ClientState{}
+ if err := cdc.Unmarshal(any.Value, clientState); err != nil {
+ return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state")
+ }
+
+ updatedClientState := migrateSolomachine(clientState)
+
+ bz, err := clienttypes.MarshalClientState(cdc, updatedClientState)
+ if err != nil {
+ return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into solo machine client state")
+ }
+
+ // update solomachine in store
+ clientStore.Set(host.ClientStateKey(), bz)
+
+ pruneSolomachineConsensusStates(clientStore)
+
+ case exported.Tendermint:
+ var clientState exported.ClientState
+ if err := cdc.UnmarshalInterface(bz, &clientState); err != nil {
+ return sdkerrors.Wrap(err, "failed to unmarshal client state bytes into tendermint client state")
+ }
+
+ tmClientState, ok := clientState.(*ibctmtypes.ClientState)
+ if !ok {
+ return sdkerrors.Wrap(types.ErrInvalidClient, "client state is not tendermint even though client id contains 07-tendermint")
+ }
+
+ // add iteration keys so pruning will be successful
+ if err = addConsensusMetadata(ctx, clientStore, cdc, tmClientState); err != nil {
+ return err
+ }
+
+ if err = ibctmtypes.PruneAllExpiredConsensusStates(ctx, clientStore, cdc, tmClientState); err != nil {
+ return err
+ }
+
+ default:
+ continue
+ }
+ }
+
+ return nil
+}
+
+// migrateSolomachine migrates the solomachine from v1 to v2 solo machine protobuf defintion.
+func migrateSolomachine(clientState *ClientState) *smtypes.ClientState {
+ isFrozen := clientState.FrozenSequence != 0
+ consensusState := &smtypes.ConsensusState{
+ PublicKey: clientState.ConsensusState.PublicKey,
+ Diversifier: clientState.ConsensusState.Diversifier,
+ Timestamp: clientState.ConsensusState.Timestamp,
+ }
+
+ return &smtypes.ClientState{
+ Sequence: clientState.Sequence,
+ IsFrozen: isFrozen,
+ ConsensusState: consensusState,
+ AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal,
+ }
+}
+
+// pruneSolomachineConsensusStates removes all solomachine consensus states from the
+// client store.
+func pruneSolomachineConsensusStates(clientStore sdk.KVStore) {
+ iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix))
+ var heights []exported.Height
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ // key is in the format "consensusStates/"
+ if len(keySplit) != 2 || keySplit[0] != string(host.KeyConsensusStatePrefix) {
+ continue
+ }
+
+ // collect consensus states to be pruned
+ heights = append(heights, types.MustParseHeight(keySplit[1]))
+ }
+
+ // delete all consensus states
+ for _, height := range heights {
+ clientStore.Delete(host.ConsensusStateKey(height))
+ }
+}
+
+// addConsensusMetadata adds the iteration key and processed height for all tendermint consensus states
+// These keys were not included in the previous release of the IBC module. Adding the iteration keys allows
+// for pruning iteration.
+func addConsensusMetadata(ctx sdk.Context, clientStore sdk.KVStore, cdc codec.BinaryCodec, clientState *ibctmtypes.ClientState) error {
+ var heights []exported.Height
+ iterator := sdk.KVStorePrefixIterator(clientStore, []byte(host.KeyConsensusStatePrefix))
+
+ defer iterator.Close()
+ for ; iterator.Valid(); iterator.Next() {
+ keySplit := strings.Split(string(iterator.Key()), "/")
+ // consensus key is in the format "consensusStates/"
+ if len(keySplit) != 2 {
+ continue
+ }
+
+ heights = append(heights, types.MustParseHeight(keySplit[1]))
+ }
+
+ for _, height := range heights {
+ // set the iteration key and processed height
+ // these keys were not included in the SDK v0.42.0 release
+ ibctmtypes.SetProcessedHeight(clientStore, height, clienttypes.GetSelfHeight(ctx))
+ ibctmtypes.SetIterationKey(clientStore, height)
+ }
+
+ return nil
+}
diff --git a/modules/core/02-client/legacy/v100/store_test.go b/modules/core/02-client/legacy/v100/store_test.go
new file mode 100644
index 00000000..1b9856da
--- /dev/null
+++ b/modules/core/02-client/legacy/v100/store_test.go
@@ -0,0 +1,231 @@
+package v100_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/suite"
+
+ "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"
+ "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/exported"
+ ibctmtypes "github.com/cosmos/ibc-go/modules/light-clients/07-tendermint/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+)
+
+type LegacyTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+// TestLegacyTestSuite runs all the tests within this package.
+func TestLegacyTestSuite(t *testing.T) {
+ suite.Run(t, new(LegacyTestSuite))
+}
+
+// SetupTest creates a coordinator with 2 test chains.
+func (suite *LegacyTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)
+ suite.coordinator.CommitNBlocks(suite.chainA, 2)
+ suite.coordinator.CommitNBlocks(suite.chainB, 2)
+}
+
+// only test migration for solo machines
+// ensure all client states are migrated and all consensus states
+// are removed
+func (suite *LegacyTestSuite) TestMigrateStoreSolomachine() {
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+
+ // create multiple legacy solo machine clients
+ solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1)
+ solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4)
+
+ // manually generate old proto buf definitions and set in store
+ // NOTE: we cannot use 'CreateClient' and 'UpdateClient' functions since we are
+ // using client states and consensus states which do not implement the exported.ClientState
+ // and exported.ConsensusState interface
+ for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} {
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID)
+ clientState := sm.ClientState()
+
+ var seq uint64
+ if clientState.IsFrozen {
+ seq = 1
+ }
+
+ // generate old client state proto defintion
+ legacyClientState := &v100.ClientState{
+ Sequence: clientState.Sequence,
+ FrozenSequence: seq,
+ ConsensusState: &v100.ConsensusState{
+ PublicKey: clientState.ConsensusState.PublicKey,
+ Diversifier: clientState.ConsensusState.Diversifier,
+ Timestamp: clientState.ConsensusState.Timestamp,
+ },
+ AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal,
+ }
+
+ // set client state
+ bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState)
+ suite.Require().NoError(err)
+ clientStore.Set(host.ClientStateKey(), bz)
+
+ // set some consensus states
+ height1 := types.NewHeight(0, 1)
+ height2 := types.NewHeight(1, 2)
+ height3 := types.NewHeight(0, 123)
+
+ bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState)
+ suite.Require().NoError(err)
+ clientStore.Set(host.ConsensusStateKey(height1), bz)
+ clientStore.Set(host.ConsensusStateKey(height2), bz)
+ clientStore.Set(host.ConsensusStateKey(height3), bz)
+ }
+
+ // create tendermint clients
+ suite.coordinator.SetupClients(path)
+
+ err := v100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec())
+ suite.Require().NoError(err)
+
+ // verify client state has been migrated
+ for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} {
+ clientState, ok := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.GetClientState(path.EndpointA.Chain.GetContext(), sm.ClientID)
+ suite.Require().True(ok)
+ suite.Require().Equal(sm.ClientState(), clientState)
+ }
+
+ // verify consensus states have been removed
+ for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} {
+ clientConsensusStates := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.GetAllConsensusStates(path.EndpointA.Chain.GetContext())
+ for _, client := range clientConsensusStates {
+ // GetAllConsensusStates should not return consensus states for our solo machine clients
+ suite.Require().NotEqual(sm.ClientID, client.ClientId)
+ }
+ }
+}
+
+// only test migration for tendermint clients
+// ensure all expired consensus states are removed from tendermint client stores
+func (suite *LegacyTestSuite) TestMigrateStoreTendermint() {
+ // create path and setup clients
+ path1 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path1)
+
+ path2 := ibctesting.NewPath(suite.chainA, suite.chainB)
+ suite.coordinator.SetupClients(path2)
+ pruneHeightMap := make(map[*ibctesting.Path][]exported.Height)
+ unexpiredHeightMap := make(map[*ibctesting.Path][]exported.Height)
+
+ for _, path := range []*ibctesting.Path{path1, path2} {
+ // collect all heights expected to be pruned
+ var pruneHeights []exported.Height
+ pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight())
+
+ // these heights will be expired and also pruned
+ for i := 0; i < 3; i++ {
+ path.EndpointA.UpdateClient()
+ pruneHeights = append(pruneHeights, path.EndpointA.GetClientState().GetLatestHeight())
+ }
+
+ // double chedck all information is currently stored
+ for _, pruneHeight := range pruneHeights {
+ consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight)
+ suite.Require().True(ok)
+ suite.Require().NotNil(consState)
+
+ ctx := path.EndpointA.Chain.GetContext()
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
+
+ processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight)
+ suite.Require().True(ok)
+ suite.Require().NotNil(processedTime)
+
+ processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight)
+ suite.Require().True(ok)
+ suite.Require().NotNil(processedHeight)
+
+ expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight)
+ suite.Require().NotNil(expectedConsKey)
+ }
+ pruneHeightMap[path] = pruneHeights
+ }
+
+ // Increment the time by a week
+ suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour)
+
+ for _, path := range []*ibctesting.Path{path1, path2} {
+ // create the consensus state that can be used as trusted height for next update
+ var unexpiredHeights []exported.Height
+ path.EndpointA.UpdateClient()
+ unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientState().GetLatestHeight())
+ path.EndpointA.UpdateClient()
+ unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientState().GetLatestHeight())
+
+ // remove processed height and iteration keys since these were missing from previous version of ibc module
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), path.EndpointA.ClientID)
+ for _, height := range unexpiredHeights {
+ clientStore.Delete(ibctmtypes.ProcessedHeightKey(height))
+ clientStore.Delete(ibctmtypes.IterationKey(height))
+ }
+
+ unexpiredHeightMap[path] = unexpiredHeights
+ }
+
+ // Increment the time by another week, then update the client.
+ // This will cause the consensus states created before the first time increment
+ // to be expired
+ suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour)
+ err := v100.MigrateStore(path1.EndpointA.Chain.GetContext(), path1.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path1.EndpointA.Chain.App.AppCodec())
+ suite.Require().NoError(err)
+
+ for _, path := range []*ibctesting.Path{path1, path2} {
+ ctx := path.EndpointA.Chain.GetContext()
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
+
+ // ensure everything has been pruned
+ for i, pruneHeight := range pruneHeightMap[path] {
+ consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight)
+ suite.Require().False(ok, i)
+ suite.Require().Nil(consState, i)
+
+ processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, pruneHeight)
+ suite.Require().False(ok, i)
+ suite.Require().Equal(uint64(0), processedTime, i)
+
+ processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, pruneHeight)
+ suite.Require().False(ok, i)
+ suite.Require().Nil(processedHeight, i)
+
+ expectedConsKey := ibctmtypes.GetIterationKey(clientStore, pruneHeight)
+ suite.Require().Nil(expectedConsKey, i)
+ }
+
+ // ensure metadata is set for unexpired consensus state
+ for _, height := range unexpiredHeightMap[path] {
+ consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, height)
+ suite.Require().True(ok)
+ suite.Require().NotNil(consState)
+
+ processedTime, ok := ibctmtypes.GetProcessedTime(clientStore, height)
+ suite.Require().True(ok)
+ suite.Require().NotEqual(uint64(0), processedTime)
+
+ processedHeight, ok := ibctmtypes.GetProcessedHeight(clientStore, height)
+ suite.Require().True(ok)
+ suite.Require().Equal(types.GetSelfHeight(path.EndpointA.Chain.GetContext()), processedHeight)
+
+ consKey := ibctmtypes.GetIterationKey(clientStore, height)
+ suite.Require().Equal(host.ConsensusStateKey(height), consKey)
+ }
+ }
+}
diff --git a/modules/core/03-connection/types/connection.pb.go b/modules/core/03-connection/types/connection.pb.go
index 07577489..6fbe5ba9 100644
--- a/modules/core/03-connection/types/connection.pb.go
+++ b/modules/core/03-connection/types/connection.pb.go
@@ -356,9 +356,9 @@ var xxx_messageInfo_Version proto.InternalMessageInfo
// Params defines the set of Connection parameters.
type Params struct {
- // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of
- // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe
- // choice is 3-5x the expected time per block.
+ // maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the
+ // largest amount of time that the chain might reasonably take to produce the next block under normal operating
+ // conditions. A safe choice is 3-5x the expected time per block.
MaxExpectedTimePerBlock uint64 `protobuf:"varint,1,opt,name=max_expected_time_per_block,json=maxExpectedTimePerBlock,proto3" json:"max_expected_time_per_block,omitempty" yaml:"max_expected_time_per_block"`
}
diff --git a/modules/core/03-connection/types/params.go b/modules/core/03-connection/types/params.go
index 904bde60..35677062 100644
--- a/modules/core/03-connection/types/params.go
+++ b/modules/core/03-connection/types/params.go
@@ -7,7 +7,7 @@ import (
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
-// DefaultTimePerBlock is the default value for maximum expected time per block.
+// DefaultTimePerBlock is the default value for maximum expected time per block (in nanoseconds).
const DefaultTimePerBlock = 30 * time.Second
// KeyMaxExpectedTimePerBlock is store's key for MaxExpectedTimePerBlock parameter
diff --git a/modules/core/exported/client.go b/modules/core/exported/client.go
index 1578900a..de4cbe48 100644
--- a/modules/core/exported/client.go
+++ b/modules/core/exported/client.go
@@ -2,10 +2,9 @@ package exported
import (
ics23 "github.com/confio/ics23/go"
- proto "github.com/gogo/protobuf/proto"
-
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
+ proto "github.com/gogo/protobuf/proto"
)
// Status represents the status of a client
diff --git a/modules/core/keeper/migrations.go b/modules/core/keeper/migrations.go
new file mode 100644
index 00000000..c6691005
--- /dev/null
+++ b/modules/core/keeper/migrations.go
@@ -0,0 +1,32 @@
+package keeper
+
+import (
+ sdk "github.com/cosmos/cosmos-sdk/types"
+
+ clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
+)
+
+// Migrator is a struct for handling in-place store migrations.
+type Migrator struct {
+ keeper Keeper
+}
+
+// NewMigrator returns a new Migrator.
+func NewMigrator(keeper Keeper) Migrator {
+ return Migrator{keeper: keeper}
+}
+
+// Migrate1to2 migrates from version 1 to 2.
+// This migration prunes:
+// - migrates solo machine client state from protobuf definition v1 to v2
+// - prunes solo machine consensus states
+// - prunes expired tendermint consensus states
+// - adds ProcessedHeight and Iteration keys for unexpired tendermint consensus states
+func (m Migrator) Migrate1to2(ctx sdk.Context) error {
+ clientMigrator := clientkeeper.NewMigrator(m.keeper.ClientKeeper)
+ if err := clientMigrator.Migrate1to2(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/modules/core/legacy/v100/genesis.go b/modules/core/legacy/v100/genesis.go
new file mode 100644
index 00000000..42932613
--- /dev/null
+++ b/modules/core/legacy/v100/genesis.go
@@ -0,0 +1,54 @@
+package v100
+
+import (
+ "github.com/cosmos/cosmos-sdk/client"
+ "github.com/cosmos/cosmos-sdk/codec"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/types"
+)
+
+// MigrateGenesis accepts exported v1.0.0 IBC client genesis file and migrates it to:
+//
+// - Update solo machine client state protobuf definition (v1 to v2)
+// - Remove all solo machine consensus states
+// - Remove all expired tendermint consensus states
+func MigrateGenesis(appState genutiltypes.AppMap, clientCtx client.Context, genDoc tmtypes.GenesisDoc, maxExpectedTimePerBlock uint64) (genutiltypes.AppMap, error) {
+ if appState[host.ModuleName] != nil {
+ // ensure legacy solo machines are registered
+ clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry)
+
+ // unmarshal relative source genesis application state
+ ibcGenState := &types.GenesisState{}
+ clientCtx.JSONCodec.MustUnmarshalJSON(appState[host.ModuleName], ibcGenState)
+
+ clientGenState, err := clientv100.MigrateGenesis(codec.NewProtoCodec(clientCtx.InterfaceRegistry), &ibcGenState.ClientGenesis, genDoc.GenesisTime, clienttypes.NewHeight(clienttypes.ParseChainID(genDoc.ChainID), uint64(genDoc.InitialHeight)))
+ if err != nil {
+ return nil, err
+ }
+
+ ibcGenState.ClientGenesis = *clientGenState
+
+ // set max expected time per block
+ connectionGenesis := connectiontypes.GenesisState{
+ Connections: ibcGenState.ConnectionGenesis.Connections,
+ ClientConnectionPaths: ibcGenState.ConnectionGenesis.ClientConnectionPaths,
+ NextConnectionSequence: ibcGenState.ConnectionGenesis.NextConnectionSequence,
+ Params: connectiontypes.NewParams(maxExpectedTimePerBlock),
+ }
+
+ ibcGenState.ConnectionGenesis = connectionGenesis
+
+ // delete old genesis state
+ delete(appState, host.ModuleName)
+
+ // set new ibc genesis state
+ appState[host.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(ibcGenState)
+ }
+ return appState, nil
+}
diff --git a/modules/core/legacy/v100/genesis_test.go b/modules/core/legacy/v100/genesis_test.go
new file mode 100644
index 00000000..d4e53d9f
--- /dev/null
+++ b/modules/core/legacy/v100/genesis_test.go
@@ -0,0 +1,178 @@
+package v100_test
+
+import (
+ "testing"
+
+ "github.com/cosmos/cosmos-sdk/client"
+ codectypes "github.com/cosmos/cosmos-sdk/codec/types"
+ genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
+ "github.com/stretchr/testify/suite"
+ tmtypes "github.com/tendermint/tendermint/types"
+
+ ibcclient "github.com/cosmos/ibc-go/modules/core/02-client"
+ clientv100 "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100"
+ clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
+ connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
+ host "github.com/cosmos/ibc-go/modules/core/24-host"
+ "github.com/cosmos/ibc-go/modules/core/legacy/v100"
+ "github.com/cosmos/ibc-go/modules/core/types"
+ ibctesting "github.com/cosmos/ibc-go/testing"
+ "github.com/cosmos/ibc-go/testing/simapp"
+)
+
+type LegacyTestSuite struct {
+ suite.Suite
+
+ coordinator *ibctesting.Coordinator
+
+ // testing chains used for convenience and readability
+ chainA *ibctesting.TestChain
+ chainB *ibctesting.TestChain
+}
+
+// TestLegacyTestSuite runs all the tests within this package.
+func TestLegacyTestSuite(t *testing.T) {
+ suite.Run(t, new(LegacyTestSuite))
+}
+
+// SetupTest creates a coordinator with 2 test chains.
+func (suite *LegacyTestSuite) SetupTest() {
+ suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
+ suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
+ suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
+ // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1)
+ suite.coordinator.CommitNBlocks(suite.chainA, 2)
+ suite.coordinator.CommitNBlocks(suite.chainB, 2)
+}
+
+// NOTE: this test is mainly copied from 02-client/legacy/v100
+func (suite *LegacyTestSuite) TestMigrateGenesisSolomachine() {
+ path := ibctesting.NewPath(suite.chainA, suite.chainB)
+ encodingConfig := simapp.MakeTestEncodingConfig()
+ clientCtx := client.Context{}.
+ WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
+ WithTxConfig(encodingConfig.TxConfig).
+ WithJSONCodec(encodingConfig.Marshaler)
+
+ // create multiple legacy solo machine clients
+ solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1)
+ solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4)
+
+ // create tendermint clients
+ // NOTE: only 1 set of metadata is created, we aren't testing ordering
+ // The purpose of this test is to ensure the genesis states can be marshalled/unmarshalled
+ suite.coordinator.SetupClients(path)
+ clientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper)
+
+ // manually generate old proto buf definitions and set in genesis
+ // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are
+ // using client states and consensus states which do not implement the exported.ClientState
+ // and exported.ConsensusState interface
+ var clients []clienttypes.IdentifiedClientState
+ for _, sm := range []*ibctesting.Solomachine{solomachine, solomachineMulti} {
+ clientState := sm.ClientState()
+
+ var seq uint64
+ if clientState.IsFrozen {
+ seq = 1
+ }
+
+ // generate old client state proto defintion
+ legacyClientState := &clientv100.ClientState{
+ Sequence: clientState.Sequence,
+ FrozenSequence: seq,
+ ConsensusState: &clientv100.ConsensusState{
+ PublicKey: clientState.ConsensusState.PublicKey,
+ Diversifier: clientState.ConsensusState.Diversifier,
+ Timestamp: clientState.ConsensusState.Timestamp,
+ },
+ AllowUpdateAfterProposal: clientState.AllowUpdateAfterProposal,
+ }
+
+ // set client state
+ any, err := codectypes.NewAnyWithValue(legacyClientState)
+ suite.Require().NoError(err)
+ suite.Require().NotNil(any)
+ client := clienttypes.IdentifiedClientState{
+ ClientId: sm.ClientID,
+ ClientState: any,
+ }
+ clients = append(clients, client)
+
+ // set in store for ease of determining expected genesis
+ clientStore := path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(path.EndpointA.Chain.GetContext(), sm.ClientID)
+ bz, err := path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState)
+ suite.Require().NoError(err)
+ clientStore.Set(host.ClientStateKey(), bz)
+
+ // set some consensus states
+ height1 := clienttypes.NewHeight(0, 1)
+ height2 := clienttypes.NewHeight(1, 2)
+ height3 := clienttypes.NewHeight(0, 123)
+
+ any, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState)
+ suite.Require().NoError(err)
+ suite.Require().NotNil(any)
+ consensusState1 := clienttypes.ConsensusStateWithHeight{
+ Height: height1,
+ ConsensusState: any,
+ }
+ consensusState2 := clienttypes.ConsensusStateWithHeight{
+ Height: height2,
+ ConsensusState: any,
+ }
+ consensusState3 := clienttypes.ConsensusStateWithHeight{
+ Height: height3,
+ ConsensusState: any,
+ }
+
+ clientConsensusState := clienttypes.ClientConsensusStates{
+ ClientId: sm.ClientID,
+ ConsensusStates: []clienttypes.ConsensusStateWithHeight{consensusState1, consensusState2, consensusState3},
+ }
+
+ clientGenState.ClientsConsensus = append(clientGenState.ClientsConsensus, clientConsensusState)
+
+ // set in store for ease of determining expected genesis
+ bz, err = path.EndpointA.Chain.App.AppCodec().MarshalInterface(legacyClientState.ConsensusState)
+ suite.Require().NoError(err)
+ clientStore.Set(host.ConsensusStateKey(height1), bz)
+ clientStore.Set(host.ConsensusStateKey(height2), bz)
+ clientStore.Set(host.ConsensusStateKey(height3), bz)
+ }
+ // solo machine clients must come before tendermint in expected
+ clientGenState.Clients = append(clients, clientGenState.Clients...)
+
+ // migrate store get expected genesis
+ // store migration and genesis migration should produce identical results
+ err := clientv100.MigrateStore(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.GetSimApp().GetKey(host.StoreKey), path.EndpointA.Chain.App.AppCodec())
+ suite.Require().NoError(err)
+ expectedClientGenState := ibcclient.ExportGenesis(path.EndpointA.Chain.GetContext(), path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper)
+
+ // NOTE: these lines are added in comparison to 02-client/legacy/v100
+ // generate appState with old ibc genesis state
+ appState := genutiltypes.AppMap{}
+ ibcGenState := types.DefaultGenesisState()
+ ibcGenState.ClientGenesis = clientGenState
+ clientv100.RegisterInterfaces(clientCtx.InterfaceRegistry)
+ appState[host.ModuleName] = clientCtx.JSONCodec.MustMarshalJSON(ibcGenState)
+ genDoc := tmtypes.GenesisDoc{
+ ChainID: suite.chainA.ChainID,
+ GenesisTime: suite.coordinator.CurrentTime,
+ InitialHeight: suite.chainA.GetContext().BlockHeight(),
+ }
+
+ // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning
+ migrated, err := v100.MigrateGenesis(appState, clientCtx, genDoc, uint64(connectiontypes.DefaultTimePerBlock))
+ suite.Require().NoError(err)
+
+ expectedAppState := genutiltypes.AppMap{}
+ expectedIBCGenState := types.DefaultGenesisState()
+ expectedIBCGenState.ClientGenesis = expectedClientGenState
+
+ bz, err := clientCtx.JSONCodec.MarshalJSON(expectedIBCGenState)
+ suite.Require().NoError(err)
+ expectedAppState[host.ModuleName] = bz
+
+ suite.Require().Equal(expectedAppState, migrated)
+}
diff --git a/modules/core/module.go b/modules/core/module.go
index 1d338dcb..db7aaba3 100644
--- a/modules/core/module.go
+++ b/modules/core/module.go
@@ -19,6 +19,7 @@ import (
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
ibcclient "github.com/cosmos/ibc-go/modules/core/02-client"
+ clientkeeper "github.com/cosmos/ibc-go/modules/core/02-client/keeper"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
@@ -136,6 +137,9 @@ func (am AppModule) RegisterServices(cfg module.Configurator) {
connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.keeper)
channeltypes.RegisterMsgServer(cfg.MsgServer(), am.keeper)
types.RegisterQueryService(cfg.QueryServer(), am.keeper)
+
+ m := clientkeeper.NewMigrator(am.keeper.ClientKeeper)
+ cfg.RegisterMigration(host.ModuleName, 1, m.Migrate1to2)
}
// InitGenesis performs genesis initialization for the ibc module. It returns
@@ -157,7 +161,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw
}
// ConsensusVersion implements AppModule/ConsensusVersion.
-func (AppModule) ConsensusVersion() uint64 { return 1 }
+func (AppModule) ConsensusVersion() uint64 { return 2 }
// BeginBlock returns the begin blocker for the ibc module.
func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
diff --git a/modules/light-clients/07-tendermint/types/store.go b/modules/light-clients/07-tendermint/types/store.go
index 6e1d63ec..dbb3fede 100644
--- a/modules/light-clients/07-tendermint/types/store.go
+++ b/modules/light-clients/07-tendermint/types/store.go
@@ -281,6 +281,42 @@ func GetPreviousConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, h
return getTmConsensusState(clientStore, cdc, csKey)
}
+// PruneAllExpiredConsensusStates iterates over all consensus states for a given
+// client store. If a consensus state is expired, it is deleted and its metadata
+// is deleted.
+func PruneAllExpiredConsensusStates(
+ ctx sdk.Context, clientStore sdk.KVStore,
+ cdc codec.BinaryCodec, clientState *ClientState,
+) (err error) {
+ var heights []exported.Height
+
+ pruneCb := func(height exported.Height) bool {
+ consState, err := GetConsensusState(clientStore, cdc, height)
+ // this error should never occur
+ if err != nil {
+ return true
+ }
+
+ if clientState.IsExpired(consState.Timestamp, ctx.BlockTime()) {
+ heights = append(heights, height)
+ }
+
+ return false
+ }
+
+ IterateConsensusStateAscending(clientStore, pruneCb)
+ if err != nil {
+ return err
+ }
+
+ for _, height := range heights {
+ deleteConsensusState(clientStore, height)
+ deleteConsensusMetadata(clientStore, height)
+ }
+
+ return nil
+}
+
// Helper function for GetNextConsensusState and GetPreviousConsensusState
func getTmConsensusState(clientStore sdk.KVStore, cdc codec.BinaryCodec, key []byte) (*ConsensusState, bool) {
bz := clientStore.Get(key)
diff --git a/modules/light-clients/07-tendermint/types/tendermint.pb.go b/modules/light-clients/07-tendermint/types/tendermint.pb.go
index 84a79b66..9a0645a4 100644
--- a/modules/light-clients/07-tendermint/types/tendermint.pb.go
+++ b/modules/light-clients/07-tendermint/types/tendermint.pb.go
@@ -11,10 +11,10 @@ import (
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
- _ "github.com/golang/protobuf/ptypes/duration"
- _ "github.com/golang/protobuf/ptypes/timestamp"
github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes"
types2 "github.com/tendermint/tendermint/proto/tendermint/types"
+ _ "google.golang.org/protobuf/types/known/durationpb"
+ _ "google.golang.org/protobuf/types/known/timestamppb"
io "io"
math "math"
math_bits "math/bits"
diff --git a/modules/light-clients/07-tendermint/types/update.go b/modules/light-clients/07-tendermint/types/update.go
index c70746b4..c2e6788f 100644
--- a/modules/light-clients/07-tendermint/types/update.go
+++ b/modules/light-clients/07-tendermint/types/update.go
@@ -134,7 +134,6 @@ func (cs ClientState) CheckHeaderAndUpdateState(
}
// if pruneHeight is set, delete consensus state and metadata
if pruneHeight != nil {
-
deleteConsensusState(clientStore, pruneHeight)
deleteConsensusMetadata(clientStore, pruneHeight)
}
diff --git a/modules/light-clients/07-tendermint/types/update_test.go b/modules/light-clients/07-tendermint/types/update_test.go
index b93168b5..db074eee 100644
--- a/modules/light-clients/07-tendermint/types/update_test.go
+++ b/modules/light-clients/07-tendermint/types/update_test.go
@@ -400,6 +400,8 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() {
clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID)
expectedProcessTime, ok := types.GetProcessedTime(clientStore, expiredHeight)
suite.Require().True(ok)
+ expectedProcessHeight, ok := types.GetProcessedHeight(clientStore, expiredHeight)
+ suite.Require().True(ok)
expectedConsKey := types.GetIterationKey(clientStore, expiredHeight)
suite.Require().NotNil(expectedConsKey)
@@ -425,6 +427,10 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() {
processTime, ok := types.GetProcessedTime(clientStore, pruneHeight)
suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned")
suite.Require().False(ok)
+ processHeight, ok := types.GetProcessedHeight(clientStore, pruneHeight)
+ suite.Require().Nil(processHeight, "processed height metadata not pruned")
+ suite.Require().False(ok)
+
// check iteration key metadata is pruned
consKey := types.GetIterationKey(clientStore, pruneHeight)
suite.Require().Nil(consKey, "iteration key not pruned")
@@ -438,6 +444,12 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() {
processTime, ok = types.GetProcessedTime(clientStore, expiredHeight)
suite.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned")
suite.Require().True(ok)
+
+ // check processed height metadata is not pruned
+ processHeight, ok = types.GetProcessedHeight(clientStore, expiredHeight)
+ suite.Require().Equal(expectedProcessHeight, processHeight, "processed height metadata incorrectly pruned")
+ suite.Require().True(ok)
+
// check iteration key metadata is not pruned
consKey = types.GetIterationKey(clientStore, expiredHeight)
suite.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned")
diff --git a/proto/ibc/core/connection/v1/connection.proto b/proto/ibc/core/connection/v1/connection.proto
index e09f1529..72c0ff7d 100644
--- a/proto/ibc/core/connection/v1/connection.proto
+++ b/proto/ibc/core/connection/v1/connection.proto
@@ -107,8 +107,8 @@ message Version {
// Params defines the set of Connection parameters.
message Params {
- // maximum expected time per block, used to enforce block delay. This parameter should reflect the largest amount of
- // time that the chain might reasonably take to produce the next block under normal operating conditions. A safe
- // choice is 3-5x the expected time per block.
+ // maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the
+ // largest amount of time that the chain might reasonably take to produce the next block under normal operating
+ // conditions. A safe choice is 3-5x the expected time per block.
uint64 max_expected_time_per_block = 1 [(gogoproto.moretags) = "yaml:\"max_expected_time_per_block\""];
}
diff --git a/proto/ibc/lightclients/solomachine/v1/solomachine.proto b/proto/ibc/lightclients/solomachine/v1/solomachine.proto
new file mode 100644
index 00000000..4ba0da25
--- /dev/null
+++ b/proto/ibc/lightclients/solomachine/v1/solomachine.proto
@@ -0,0 +1,189 @@
+syntax = "proto3";
+
+package ibc.lightclients.solomachine.v1;
+
+option go_package = "github.com/cosmos/ibc-go/modules/core/02-client/legacy/v100";
+
+import "ibc/core/connection/v1/connection.proto";
+import "ibc/core/channel/v1/channel.proto";
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+
+// ClientState defines a solo machine client that tracks the current consensus
+// state and if the client is frozen.
+message ClientState {
+ option (gogoproto.goproto_getters) = false;
+ // latest sequence of the client state
+ uint64 sequence = 1;
+ // frozen sequence of the solo machine
+ uint64 frozen_sequence = 2 [(gogoproto.moretags) = "yaml:\"frozen_sequence\""];
+ ConsensusState consensus_state = 3 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
+ // when set to true, will allow governance to update a solo machine client.
+ // The client will be unfrozen if it is frozen.
+ bool allow_update_after_proposal = 4 [(gogoproto.moretags) = "yaml:\"allow_update_after_proposal\""];
+}
+
+// ConsensusState defines a solo machine consensus state. The sequence of a
+// consensus state is contained in the "height" key used in storing the
+// consensus state.
+message ConsensusState {
+ option (gogoproto.goproto_getters) = false;
+ // public key of the solo machine
+ google.protobuf.Any public_key = 1 [(gogoproto.moretags) = "yaml:\"public_key\""];
+ // diversifier allows the same public key to be re-used across different solo
+ // machine clients (potentially on different chains) without being considered
+ // misbehaviour.
+ string diversifier = 2;
+ uint64 timestamp = 3;
+}
+
+// Header defines a solo machine consensus header
+message Header {
+ option (gogoproto.goproto_getters) = false;
+ // sequence to update solo machine public key at
+ uint64 sequence = 1;
+ uint64 timestamp = 2;
+ bytes signature = 3;
+ google.protobuf.Any new_public_key = 4 [(gogoproto.moretags) = "yaml:\"new_public_key\""];
+ string new_diversifier = 5 [(gogoproto.moretags) = "yaml:\"new_diversifier\""];
+}
+
+// Misbehaviour defines misbehaviour for a solo machine which consists
+// of a sequence and two signatures over different messages at that sequence.
+message Misbehaviour {
+ option (gogoproto.goproto_getters) = false;
+ string client_id = 1 [(gogoproto.moretags) = "yaml:\"client_id\""];
+ uint64 sequence = 2;
+ SignatureAndData signature_one = 3 [(gogoproto.moretags) = "yaml:\"signature_one\""];
+ SignatureAndData signature_two = 4 [(gogoproto.moretags) = "yaml:\"signature_two\""];
+}
+
+// SignatureAndData contains a signature and the data signed over to create that
+// signature.
+message SignatureAndData {
+ option (gogoproto.goproto_getters) = false;
+ bytes signature = 1;
+ DataType data_type = 2 [(gogoproto.moretags) = "yaml:\"data_type\""];
+ bytes data = 3;
+ uint64 timestamp = 4;
+}
+
+// TimestampedSignatureData contains the signature data and the timestamp of the
+// signature.
+message TimestampedSignatureData {
+ option (gogoproto.goproto_getters) = false;
+ bytes signature_data = 1 [(gogoproto.moretags) = "yaml:\"signature_data\""];
+ uint64 timestamp = 2;
+}
+
+// SignBytes defines the signed bytes used for signature verification.
+message SignBytes {
+ option (gogoproto.goproto_getters) = false;
+
+ uint64 sequence = 1;
+ uint64 timestamp = 2;
+ string diversifier = 3;
+ // type of the data used
+ DataType data_type = 4 [(gogoproto.moretags) = "yaml:\"data_type\""];
+ // marshaled data
+ bytes data = 5;
+}
+
+// DataType defines the type of solo machine proof being created. This is done
+// to preserve uniqueness of different data sign byte encodings.
+enum DataType {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // Default State
+ DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0 [(gogoproto.enumvalue_customname) = "UNSPECIFIED"];
+ // Data type for client state verification
+ DATA_TYPE_CLIENT_STATE = 1 [(gogoproto.enumvalue_customname) = "CLIENT"];
+ // Data type for consensus state verification
+ DATA_TYPE_CONSENSUS_STATE = 2 [(gogoproto.enumvalue_customname) = "CONSENSUS"];
+ // Data type for connection state verification
+ DATA_TYPE_CONNECTION_STATE = 3 [(gogoproto.enumvalue_customname) = "CONNECTION"];
+ // Data type for channel state verification
+ DATA_TYPE_CHANNEL_STATE = 4 [(gogoproto.enumvalue_customname) = "CHANNEL"];
+ // Data type for packet commitment verification
+ DATA_TYPE_PACKET_COMMITMENT = 5 [(gogoproto.enumvalue_customname) = "PACKETCOMMITMENT"];
+ // Data type for packet acknowledgement verification
+ DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6 [(gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT"];
+ // Data type for packet receipt absence verification
+ DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7 [(gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE"];
+ // Data type for next sequence recv verification
+ DATA_TYPE_NEXT_SEQUENCE_RECV = 8 [(gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV"];
+ // Data type for header verification
+ DATA_TYPE_HEADER = 9 [(gogoproto.enumvalue_customname) = "HEADER"];
+}
+
+// HeaderData returns the SignBytes data for update verification.
+message HeaderData {
+ option (gogoproto.goproto_getters) = false;
+
+ // header public key
+ google.protobuf.Any new_pub_key = 1 [(gogoproto.moretags) = "yaml:\"new_pub_key\""];
+ // header diversifier
+ string new_diversifier = 2 [(gogoproto.moretags) = "yaml:\"new_diversifier\""];
+}
+
+// ClientStateData returns the SignBytes data for client state verification.
+message ClientStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ google.protobuf.Any client_state = 2 [(gogoproto.moretags) = "yaml:\"client_state\""];
+}
+
+// ConsensusStateData returns the SignBytes data for consensus state
+// verification.
+message ConsensusStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ google.protobuf.Any consensus_state = 2 [(gogoproto.moretags) = "yaml:\"consensus_state\""];
+}
+
+// ConnectionStateData returns the SignBytes data for connection state
+// verification.
+message ConnectionStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ ibc.core.connection.v1.ConnectionEnd connection = 2;
+}
+
+// ChannelStateData returns the SignBytes data for channel state
+// verification.
+message ChannelStateData {
+ option (gogoproto.goproto_getters) = false;
+
+ bytes path = 1;
+ ibc.core.channel.v1.Channel channel = 2;
+}
+
+// PacketCommitmentData returns the SignBytes data for packet commitment
+// verification.
+message PacketCommitmentData {
+ bytes path = 1;
+ bytes commitment = 2;
+}
+
+// PacketAcknowledgementData returns the SignBytes data for acknowledgement
+// verification.
+message PacketAcknowledgementData {
+ bytes path = 1;
+ bytes acknowledgement = 2;
+}
+
+// PacketReceiptAbsenceData returns the SignBytes data for
+// packet receipt absence verification.
+message PacketReceiptAbsenceData {
+ bytes path = 1;
+}
+
+// NextSequenceRecvData returns the SignBytes data for verification of the next
+// sequence to be received.
+message NextSequenceRecvData {
+ bytes path = 1;
+ uint64 next_seq_recv = 2 [(gogoproto.moretags) = "yaml:\"next_seq_recv\""];
+}
From 38b50b2c58f4c560478da419ca9b857b27fdcd22 Mon Sep 17 00:00:00 2001
From: Sean King
Date: Fri, 18 Jun 2021 15:52:38 +0200
Subject: [PATCH 080/393] Fix/channel open/close events (#220)
* fix: moving event to keeper function instead of rpc handler
* refactor: removing unnecessary handler
* refactor: delete channel handler file
* Apply suggestions from code review
Co-authored-by: Aditya
---
modules/core/04-channel/handler.go | 186 --------------------
modules/core/04-channel/keeper/handshake.go | 67 +++++++
modules/core/keeper/msg_server.go | 75 ++++++--
3 files changed, 129 insertions(+), 199 deletions(-)
delete mode 100644 modules/core/04-channel/handler.go
diff --git a/modules/core/04-channel/handler.go b/modules/core/04-channel/handler.go
deleted file mode 100644
index 2c222a2e..00000000
--- a/modules/core/04-channel/handler.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package channel
-
-import (
- sdk "github.com/cosmos/cosmos-sdk/types"
- sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
- capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
- "github.com/cosmos/ibc-go/modules/core/04-channel/keeper"
- "github.com/cosmos/ibc-go/modules/core/04-channel/types"
-)
-
-// HandleMsgChannelOpenInit defines the sdk.Handler for MsgChannelOpenInit
-func HandleMsgChannelOpenInit(ctx sdk.Context, k keeper.Keeper, portCap *capabilitytypes.Capability, msg *types.MsgChannelOpenInit) (*sdk.Result, string, *capabilitytypes.Capability, error) {
- channelID, capKey, err := k.ChanOpenInit(
- ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId,
- portCap, msg.Channel.Counterparty, msg.Channel.Version,
- )
- if err != nil {
- return nil, "", nil, sdkerrors.Wrap(err, "channel handshake open init failed")
- }
-
- ctx.EventManager().EmitEvents(sdk.Events{
- sdk.NewEvent(
- types.EventTypeChannelOpenInit,
- sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
- sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
- sdk.NewAttribute(types.AttributeCounterpartyPortID, msg.Channel.Counterparty.PortId),
- sdk.NewAttribute(types.AttributeCounterpartyChannelID, msg.Channel.Counterparty.ChannelId),
- sdk.NewAttribute(types.AttributeKeyConnectionID, msg.Channel.ConnectionHops[0]),
- ),
- sdk.NewEvent(
- sdk.EventTypeMessage,
- sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
- ),
- })
-
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, channelID, capKey, nil
-}
-
-// HandleMsgChannelOpenTry defines the sdk.Handler for MsgChannelOpenTry
-func HandleMsgChannelOpenTry(ctx sdk.Context, k keeper.Keeper, portCap *capabilitytypes.Capability, msg *types.MsgChannelOpenTry) (*sdk.Result, string, *capabilitytypes.Capability, error) {
- channelID, capKey, err := k.ChanOpenTry(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, msg.PreviousChannelId,
- portCap, msg.Channel.Counterparty, msg.Channel.Version, msg.CounterpartyVersion, msg.ProofInit, msg.ProofHeight,
- )
- if err != nil {
- return nil, "", nil, sdkerrors.Wrap(err, "channel handshake open try failed")
- }
-
- ctx.EventManager().EmitEvents(sdk.Events{
- sdk.NewEvent(
- types.EventTypeChannelOpenTry,
- sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
- sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
- sdk.NewAttribute(types.AttributeCounterpartyPortID, msg.Channel.Counterparty.PortId),
- sdk.NewAttribute(types.AttributeCounterpartyChannelID, msg.Channel.Counterparty.ChannelId),
- sdk.NewAttribute(types.AttributeKeyConnectionID, msg.Channel.ConnectionHops[0]),
- ),
- sdk.NewEvent(
- sdk.EventTypeMessage,
- sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
- ),
- })
-
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, channelID, capKey, nil
-}
-
-// HandleMsgChannelOpenAck defines the sdk.Handler for MsgChannelOpenAck
-func HandleMsgChannelOpenAck(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelOpenAck) (*sdk.Result, error) {
- err := k.ChanOpenAck(
- ctx, msg.PortId, msg.ChannelId, channelCap, msg.CounterpartyVersion, msg.CounterpartyChannelId, msg.ProofTry, msg.ProofHeight,
- )
- if err != nil {
- return nil, sdkerrors.Wrap(err, "channel handshake open ack failed")
- }
-
- channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
-
- ctx.EventManager().EmitEvents(sdk.Events{
- sdk.NewEvent(
- types.EventTypeChannelOpenAck,
- sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
- sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
- sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
- sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
- sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
- ),
- sdk.NewEvent(
- sdk.EventTypeMessage,
- sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
- ),
- })
-
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, nil
-}
-
-// HandleMsgChannelOpenConfirm defines the sdk.Handler for MsgChannelOpenConfirm
-func HandleMsgChannelOpenConfirm(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelOpenConfirm) (*sdk.Result, error) {
- err := k.ChanOpenConfirm(ctx, msg.PortId, msg.ChannelId, channelCap, msg.ProofAck, msg.ProofHeight)
- if err != nil {
- return nil, sdkerrors.Wrap(err, "channel handshake open confirm failed")
- }
-
- channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
-
- ctx.EventManager().EmitEvents(sdk.Events{
- sdk.NewEvent(
- types.EventTypeChannelOpenConfirm,
- sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
- sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
- sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
- sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
- sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
- ),
- sdk.NewEvent(
- sdk.EventTypeMessage,
- sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
- ),
- })
-
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, nil
-}
-
-// HandleMsgChannelCloseInit defines the sdk.Handler for MsgChannelCloseInit
-func HandleMsgChannelCloseInit(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelCloseInit) (*sdk.Result, error) {
- err := k.ChanCloseInit(ctx, msg.PortId, msg.ChannelId, channelCap)
- if err != nil {
- return nil, sdkerrors.Wrap(err, "channel handshake close init failed")
- }
-
- channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
-
- ctx.EventManager().EmitEvents(sdk.Events{
- sdk.NewEvent(
- types.EventTypeChannelCloseInit,
- sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
- sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
- sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
- sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
- sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
- ),
- sdk.NewEvent(
- sdk.EventTypeMessage,
- sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
- ),
- })
-
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, nil
-}
-
-// HandleMsgChannelCloseConfirm defines the sdk.Handler for MsgChannelCloseConfirm
-func HandleMsgChannelCloseConfirm(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelCloseConfirm) (*sdk.Result, error) {
- err := k.ChanCloseConfirm(ctx, msg.PortId, msg.ChannelId, channelCap, msg.ProofInit, msg.ProofHeight)
- if err != nil {
- return nil, sdkerrors.Wrap(err, "channel handshake close confirm failed")
- }
-
- channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId)
-
- ctx.EventManager().EmitEvents(sdk.Events{
- sdk.NewEvent(
- types.EventTypeChannelCloseConfirm,
- sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId),
- sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId),
- sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
- sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
- sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
- ),
- sdk.NewEvent(
- sdk.EventTypeMessage,
- sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
- ),
- })
-
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, nil
-}
diff --git a/modules/core/04-channel/keeper/handshake.go b/modules/core/04-channel/keeper/handshake.go
index 2dcfdbed..5f9d70ad 100644
--- a/modules/core/04-channel/keeper/handshake.go
+++ b/modules/core/04-channel/keeper/handshake.go
@@ -91,6 +91,17 @@ func (k Keeper) ChanOpenInit(
telemetry.IncrCounter(1, "ibc", "channel", "open-init")
}()
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenInit,
+ sdk.NewAttribute(types.AttributeKeyPortID, portID),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, connectionHops[0]),
+ ),
+ })
+
return channelID, capKey, nil
}
@@ -232,6 +243,17 @@ func (k Keeper) ChanOpenTry(
telemetry.IncrCounter(1, "ibc", "channel", "open-try")
}()
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenTry,
+ sdk.NewAttribute(types.AttributeKeyPortID, portID),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ })
+
return channelID, capKey, nil
}
@@ -307,6 +329,17 @@ func (k Keeper) ChanOpenAck(
channel.Counterparty.ChannelId = counterpartyChannelID
k.SetChannel(ctx, portID, channelID, channel)
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenAck,
+ sdk.NewAttribute(types.AttributeKeyPortID, portID),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ })
+
return nil
}
@@ -375,6 +408,18 @@ func (k Keeper) ChanOpenConfirm(
defer func() {
telemetry.IncrCounter(1, "ibc", "channel", "open-confirm")
}()
+
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelOpenConfirm,
+ sdk.NewAttribute(types.AttributeKeyPortID, portID),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ })
+
return nil
}
@@ -425,6 +470,17 @@ func (k Keeper) ChanCloseInit(
channel.State = types.CLOSED
k.SetChannel(ctx, portID, channelID, channel)
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelCloseInit,
+ sdk.NewAttribute(types.AttributeKeyPortID, portID),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ })
+
return nil
}
@@ -492,5 +548,16 @@ func (k Keeper) ChanCloseConfirm(
channel.State = types.CLOSED
k.SetChannel(ctx, portID, channelID, channel)
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ types.EventTypeChannelCloseConfirm,
+ sdk.NewAttribute(types.AttributeKeyPortID, portID),
+ sdk.NewAttribute(types.AttributeKeyChannelID, channelID),
+ sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId),
+ sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId),
+ sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]),
+ ),
+ })
+
return nil
}
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index e1f20810..626a6fde 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -10,7 +10,7 @@ import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
connectiontypes "github.com/cosmos/ibc-go/modules/core/03-connection/types"
- channel "github.com/cosmos/ibc-go/modules/core/04-channel"
+ "github.com/cosmos/ibc-go/modules/core/04-channel/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
)
@@ -264,9 +264,12 @@ func (k Keeper) ChannelOpenInit(goCtx context.Context, msg *channeltypes.MsgChan
return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
}
- _, channelID, cap, err := channel.HandleMsgChannelOpenInit(ctx, k.ChannelKeeper, portCap, msg)
+ channelID, cap, err := k.ChannelKeeper.ChanOpenInit(
+ ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId,
+ portCap, msg.Channel.Counterparty, msg.Channel.Version,
+ )
if err != nil {
- return nil, err
+ return nil, sdkerrors.Wrap(err, "channel handshake open init failed")
}
// Retrieve callbacks from router
@@ -279,6 +282,13 @@ func (k Keeper) ChannelOpenInit(goCtx context.Context, msg *channeltypes.MsgChan
return nil, sdkerrors.Wrap(err, "channel open init callback failed")
}
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory),
+ ),
+ })
+
return &channeltypes.MsgChannelOpenInitResponse{}, nil
}
@@ -291,9 +301,11 @@ func (k Keeper) ChannelOpenTry(goCtx context.Context, msg *channeltypes.MsgChann
return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id")
}
- _, channelID, cap, err := channel.HandleMsgChannelOpenTry(ctx, k.ChannelKeeper, portCap, msg)
+ channelID, cap, err := k.ChannelKeeper.ChanOpenTry(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, msg.PreviousChannelId,
+ portCap, msg.Channel.Counterparty, msg.Channel.Version, msg.CounterpartyVersion, msg.ProofInit, msg.ProofHeight,
+ )
if err != nil {
- return nil, err
+ return nil, sdkerrors.Wrap(err, "channel handshake open try failed")
}
// Retrieve callbacks from router
@@ -306,6 +318,13 @@ func (k Keeper) ChannelOpenTry(goCtx context.Context, msg *channeltypes.MsgChann
return nil, sdkerrors.Wrap(err, "channel open try callback failed")
}
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
return &channeltypes.MsgChannelOpenTryResponse{}, nil
}
@@ -325,15 +344,24 @@ func (k Keeper) ChannelOpenAck(goCtx context.Context, msg *channeltypes.MsgChann
return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
}
- _, err = channel.HandleMsgChannelOpenAck(ctx, k.ChannelKeeper, cap, msg)
+ err = k.ChannelKeeper.ChanOpenAck(
+ ctx, msg.PortId, msg.ChannelId, cap, msg.CounterpartyVersion, msg.CounterpartyChannelId, msg.ProofTry, msg.ProofHeight,
+ )
if err != nil {
- return nil, err
+ return nil, sdkerrors.Wrap(err, "channel handshake open ack failed")
}
if err = cbs.OnChanOpenAck(ctx, msg.PortId, msg.ChannelId, msg.CounterpartyVersion); err != nil {
return nil, sdkerrors.Wrap(err, "channel open ack callback failed")
}
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
return &channeltypes.MsgChannelOpenAckResponse{}, nil
}
@@ -353,15 +381,22 @@ func (k Keeper) ChannelOpenConfirm(goCtx context.Context, msg *channeltypes.MsgC
return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module)
}
- _, err = channel.HandleMsgChannelOpenConfirm(ctx, k.ChannelKeeper, cap, msg)
+ err = k.ChannelKeeper.ChanOpenConfirm(ctx, msg.PortId, msg.ChannelId, cap, msg.ProofAck, msg.ProofHeight)
if err != nil {
- return nil, err
+ return nil, sdkerrors.Wrap(err, "channel handshake open confirm failed")
}
if err = cbs.OnChanOpenConfirm(ctx, msg.PortId, msg.ChannelId); err != nil {
return nil, sdkerrors.Wrap(err, "channel open confirm callback failed")
}
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
return &channeltypes.MsgChannelOpenConfirmResponse{}, nil
}
@@ -384,11 +419,18 @@ func (k Keeper) ChannelCloseInit(goCtx context.Context, msg *channeltypes.MsgCha
return nil, sdkerrors.Wrap(err, "channel close init callback failed")
}
- _, err = channel.HandleMsgChannelCloseInit(ctx, k.ChannelKeeper, cap, msg)
+ err = k.ChannelKeeper.ChanCloseInit(ctx, msg.PortId, msg.ChannelId, cap)
if err != nil {
- return nil, err
+ return nil, sdkerrors.Wrap(err, "channel handshake close init failed")
}
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
return &channeltypes.MsgChannelCloseInitResponse{}, nil
}
@@ -412,11 +454,18 @@ func (k Keeper) ChannelCloseConfirm(goCtx context.Context, msg *channeltypes.Msg
return nil, sdkerrors.Wrap(err, "channel close confirm callback failed")
}
- _, err = channel.HandleMsgChannelCloseConfirm(ctx, k.ChannelKeeper, cap, msg)
+ err = k.ChannelKeeper.ChanCloseConfirm(ctx, msg.PortId, msg.ChannelId, cap, msg.ProofInit, msg.ProofHeight)
if err != nil {
- return nil, err
+ return nil, sdkerrors.Wrap(err, "channel handshake close confirm failed")
}
+ ctx.EventManager().EmitEvents(sdk.Events{
+ sdk.NewEvent(
+ sdk.EventTypeMessage,
+ sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
+ ),
+ })
+
return &channeltypes.MsgChannelCloseConfirmResponse{}, nil
}
From 007c6804bd30174bd24938466352ac3812f37f05 Mon Sep 17 00:00:00 2001
From: Aleksandr Bezobchuk
Date: Tue, 22 Jun 2021 10:07:42 -0400
Subject: [PATCH 081/393] ibc: fix metrics (#223)
---
modules/apps/transfer/keeper/relay.go | 23 +++++++-------
modules/core/02-client/keeper/client.go | 22 +++++++-------
modules/core/02-client/keeper/proposal.go | 6 ++--
modules/core/02-client/types/metrics.go | 9 ++++++
modules/core/keeper/msg_server.go | 37 ++++++++++++-----------
modules/core/types/metrics.go | 12 ++++++++
6 files changed, 66 insertions(+), 43 deletions(-)
create mode 100644 modules/core/02-client/types/metrics.go
create mode 100644 modules/core/types/metrics.go
diff --git a/modules/apps/transfer/keeper/relay.go b/modules/apps/transfer/keeper/relay.go
index 7e9bdf37..161295d0 100644
--- a/modules/apps/transfer/keeper/relay.go
+++ b/modules/apps/transfer/keeper/relay.go
@@ -13,6 +13,7 @@ import (
clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
host "github.com/cosmos/ibc-go/modules/core/24-host"
+ coretypes "github.com/cosmos/ibc-go/modules/core/types"
)
// SendTransfer handles transfer sending logic. There are 2 possible cases:
@@ -101,8 +102,8 @@ func (k Keeper) SendTransfer(
}
labels := []metrics.Label{
- telemetry.NewLabel("destination-port", destinationPort),
- telemetry.NewLabel("destination-channel", destinationChannel),
+ telemetry.NewLabel(coretypes.LabelDestinationPort, destinationPort),
+ telemetry.NewLabel(coretypes.LabelDestinationChannel, destinationChannel),
}
// NOTE: SendTransfer simply sends the denomination as it exists on its own
@@ -110,7 +111,7 @@ func (k Keeper) SendTransfer(
// prefixing as necessary.
if types.SenderChainIsSource(sourcePort, sourceChannel, fullDenomPath) {
- labels = append(labels, telemetry.NewLabel("source", "true"))
+ labels = append(labels, telemetry.NewLabel(coretypes.LabelSource, "true"))
// create the escrow address for the tokens
escrowAddress := types.GetEscrowAddress(sourcePort, sourceChannel)
@@ -123,7 +124,7 @@ func (k Keeper) SendTransfer(
}
} else {
- labels = append(labels, telemetry.NewLabel("source", "false"))
+ labels = append(labels, telemetry.NewLabel(coretypes.LabelSource, "false"))
// transfer the coins to the module account and burn them
if err := k.bankKeeper.SendCoinsFromAccountToModule(
@@ -165,7 +166,7 @@ func (k Keeper) SendTransfer(
telemetry.SetGaugeWithLabels(
[]string{"tx", "msg", "ibc", "transfer"},
float32(token.Amount.Int64()),
- []metrics.Label{telemetry.NewLabel("denom", fullDenomPath)},
+ []metrics.Label{telemetry.NewLabel(coretypes.LabelDenom, fullDenomPath)},
)
telemetry.IncrCounterWithLabels(
@@ -200,8 +201,8 @@ func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data t
}
labels := []metrics.Label{
- telemetry.NewLabel("source-port", packet.GetSourcePort()),
- telemetry.NewLabel("source-channel", packet.GetSourceChannel()),
+ telemetry.NewLabel(coretypes.LabelSourcePort, packet.GetSourcePort()),
+ telemetry.NewLabel(coretypes.LabelSourceChannel, packet.GetSourceChannel()),
}
// This is the prefix that would have been prefixed to the denomination
@@ -244,14 +245,14 @@ func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data t
telemetry.SetGaugeWithLabels(
[]string{"ibc", types.ModuleName, "packet", "receive"},
float32(data.Amount),
- []metrics.Label{telemetry.NewLabel("denom", unprefixedDenom)},
+ []metrics.Label{telemetry.NewLabel(coretypes.LabelDenom, unprefixedDenom)},
)
telemetry.IncrCounterWithLabels(
[]string{"ibc", types.ModuleName, "receive"},
1,
append(
- labels, telemetry.NewLabel("source", "true"),
+ labels, telemetry.NewLabel(coretypes.LabelSource, "true"),
),
)
}()
@@ -303,14 +304,14 @@ func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data t
telemetry.SetGaugeWithLabels(
[]string{"ibc", types.ModuleName, "packet", "receive"},
float32(data.Amount),
- []metrics.Label{telemetry.NewLabel("denom", data.Denom)},
+ []metrics.Label{telemetry.NewLabel(coretypes.LabelDenom, data.Denom)},
)
telemetry.IncrCounterWithLabels(
[]string{"ibc", types.ModuleName, "receive"},
1,
append(
- labels, telemetry.NewLabel("source", "false"),
+ labels, telemetry.NewLabel(coretypes.LabelSource, "false"),
),
)
}()
diff --git a/modules/core/02-client/keeper/client.go b/modules/core/02-client/keeper/client.go
index 1ebccead..b16fa819 100644
--- a/modules/core/02-client/keeper/client.go
+++ b/modules/core/02-client/keeper/client.go
@@ -47,7 +47,7 @@ func (k Keeper) CreateClient(
telemetry.IncrCounterWithLabels(
[]string{"ibc", "client", "create"},
1,
- []metrics.Label{telemetry.NewLabel("client-type", clientState.ClientType())},
+ []metrics.Label{telemetry.NewLabel(types.LabelClientType, clientState.ClientType())},
)
}()
@@ -112,9 +112,9 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
[]string{"ibc", "client", "update"},
1,
[]metrics.Label{
- telemetry.NewLabel("client-type", clientState.ClientType()),
- telemetry.NewLabel("client-id", clientID),
- telemetry.NewLabel("update-type", "msg"),
+ telemetry.NewLabel(types.LabelClientType, clientState.ClientType()),
+ telemetry.NewLabel(types.LabelClientID, clientID),
+ telemetry.NewLabel(types.LabelUpdateType, "msg"),
},
)
}()
@@ -129,9 +129,9 @@ func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.H
[]string{"ibc", "client", "misbehaviour"},
1,
[]metrics.Label{
- telemetry.NewLabel("client-type", clientState.ClientType()),
- telemetry.NewLabel("client-id", clientID),
- telemetry.NewLabel("msg-type", "update"),
+ telemetry.NewLabel(types.LabelClientType, clientState.ClientType()),
+ telemetry.NewLabel(types.LabelClientID, clientID),
+ telemetry.NewLabel(types.LabelMsgType, "update"),
},
)
}()
@@ -181,8 +181,8 @@ func (k Keeper) UpgradeClient(ctx sdk.Context, clientID string, upgradedClient e
[]string{"ibc", "client", "upgrade"},
1,
[]metrics.Label{
- telemetry.NewLabel("client-type", updatedClientState.ClientType()),
- telemetry.NewLabel("client-id", clientID),
+ telemetry.NewLabel(types.LabelClientType, updatedClientState.ClientType()),
+ telemetry.NewLabel(types.LabelClientID, clientID),
},
)
}()
@@ -231,8 +231,8 @@ func (k Keeper) CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour ex
[]string{"ibc", "client", "misbehaviour"},
1,
[]metrics.Label{
- telemetry.NewLabel("client-type", misbehaviour.ClientType()),
- telemetry.NewLabel("client-id", misbehaviour.GetClientID()),
+ telemetry.NewLabel(types.LabelClientType, misbehaviour.ClientType()),
+ telemetry.NewLabel(types.LabelClientID, misbehaviour.GetClientID()),
},
)
}()
diff --git a/modules/core/02-client/keeper/proposal.go b/modules/core/02-client/keeper/proposal.go
index 1880e2cd..da2e93ba 100644
--- a/modules/core/02-client/keeper/proposal.go
+++ b/modules/core/02-client/keeper/proposal.go
@@ -62,9 +62,9 @@ func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdatePropo
[]string{"ibc", "client", "update"},
1,
[]metrics.Label{
- telemetry.NewLabel("client-type", clientState.ClientType()),
- telemetry.NewLabel("client-id", p.SubjectClientId),
- telemetry.NewLabel("update-type", "proposal"),
+ telemetry.NewLabel(types.LabelClientType, clientState.ClientType()),
+ telemetry.NewLabel(types.LabelClientID, p.SubjectClientId),
+ telemetry.NewLabel(types.LabelUpdateType, "proposal"),
},
)
}()
diff --git a/modules/core/02-client/types/metrics.go b/modules/core/02-client/types/metrics.go
new file mode 100644
index 00000000..879e79a4
--- /dev/null
+++ b/modules/core/02-client/types/metrics.go
@@ -0,0 +1,9 @@
+package types
+
+// Prometheus metric labels.
+const (
+ LabelClientType = "client_type"
+ LabelClientID = "client_id"
+ LabelUpdateType = "update_type"
+ LabelMsgType = "msg_type"
+)
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index 626a6fde..3aeeffdb 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -13,6 +13,7 @@ import (
"github.com/cosmos/ibc-go/modules/core/04-channel/types"
channeltypes "github.com/cosmos/ibc-go/modules/core/04-channel/types"
porttypes "github.com/cosmos/ibc-go/modules/core/05-port/types"
+ coretypes "github.com/cosmos/ibc-go/modules/core/types"
)
var _ clienttypes.MsgServer = Keeper{}
@@ -518,10 +519,10 @@ func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacke
[]string{"tx", "msg", "ibc", channeltypes.EventTypeRecvPacket},
1,
[]metrics.Label{
- telemetry.NewLabel("source-port", msg.Packet.SourcePort),
- telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
- telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
- telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
+ telemetry.NewLabel(coretypes.LabelSourcePort, msg.Packet.SourcePort),
+ telemetry.NewLabel(coretypes.LabelSourceChannel, msg.Packet.SourceChannel),
+ telemetry.NewLabel(coretypes.LabelDestinationPort, msg.Packet.DestinationPort),
+ telemetry.NewLabel(coretypes.LabelDestinationChannel, msg.Packet.DestinationChannel),
},
)
}()
@@ -571,11 +572,11 @@ func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*c
[]string{"ibc", "timeout", "packet"},
1,
[]metrics.Label{
- telemetry.NewLabel("source-port", msg.Packet.SourcePort),
- telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
- telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
- telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
- telemetry.NewLabel("timeout-type", "height"),
+ telemetry.NewLabel(coretypes.LabelSourcePort, msg.Packet.SourcePort),
+ telemetry.NewLabel(coretypes.LabelSourceChannel, msg.Packet.SourceChannel),
+ telemetry.NewLabel(coretypes.LabelDestinationPort, msg.Packet.DestinationPort),
+ telemetry.NewLabel(coretypes.LabelDestinationChannel, msg.Packet.DestinationChannel),
+ telemetry.NewLabel(coretypes.LabelTimeoutType, "height"),
},
)
}()
@@ -627,11 +628,11 @@ func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeo
[]string{"ibc", "timeout", "packet"},
1,
[]metrics.Label{
- telemetry.NewLabel("source-port", msg.Packet.SourcePort),
- telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
- telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
- telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
- telemetry.NewLabel("timeout-type", "channel-closed"),
+ telemetry.NewLabel(coretypes.LabelSourcePort, msg.Packet.SourcePort),
+ telemetry.NewLabel(coretypes.LabelSourceChannel, msg.Packet.SourceChannel),
+ telemetry.NewLabel(coretypes.LabelDestinationPort, msg.Packet.DestinationPort),
+ telemetry.NewLabel(coretypes.LabelDestinationChannel, msg.Packet.DestinationChannel),
+ telemetry.NewLabel(coretypes.LabelTimeoutType, "channel-closed"),
},
)
}()
@@ -676,10 +677,10 @@ func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAckn
[]string{"tx", "msg", "ibc", channeltypes.EventTypeAcknowledgePacket},
1,
[]metrics.Label{
- telemetry.NewLabel("source-port", msg.Packet.SourcePort),
- telemetry.NewLabel("source-channel", msg.Packet.SourceChannel),
- telemetry.NewLabel("destination-port", msg.Packet.DestinationPort),
- telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel),
+ telemetry.NewLabel(coretypes.LabelSourcePort, msg.Packet.SourcePort),
+ telemetry.NewLabel(coretypes.LabelSourceChannel, msg.Packet.SourceChannel),
+ telemetry.NewLabel(coretypes.LabelDestinationPort, msg.Packet.DestinationPort),
+ telemetry.NewLabel(coretypes.LabelDestinationChannel, msg.Packet.DestinationChannel),
},
)
}()
diff --git a/modules/core/types/metrics.go b/modules/core/types/metrics.go
new file mode 100644
index 00000000..9fcd348a
--- /dev/null
+++ b/modules/core/types/metrics.go
@@ -0,0 +1,12 @@
+package types
+
+// Prometheus metric labels.
+const (
+ LabelSourcePort = "source_port"
+ LabelSourceChannel = "source_channel"
+ LabelDestinationPort = "destination_port"
+ LabelDestinationChannel = "destination_channel"
+ LabelTimeoutType = "timeout_type"
+ LabelDenom = "denom"
+ LabelSource = "source"
+)
From b36f8c60e401e3d59986e6a111072cd0cf04a0f7 Mon Sep 17 00:00:00 2001
From: smadarasmi
Date: Mon, 5 Jul 2021 22:26:28 +0800
Subject: [PATCH 082/393] Remove sdk.Result from application callbacks #215
(#227)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Remove sdk.Result from application callbacks #215
* add changelog and migration doc entry
Co-authored-by: Colin Axnér <25233464+colin-axner@users.noreply.github.com>
---
CHANGELOG.md | 5 +++++
docs/migrations/ibc-migration-v100.md | 8 ++++++++
modules/apps/transfer/module.go | 22 +++++++++-------------
modules/core/05-port/types/module.go | 4 ++--
modules/core/keeper/msg_server.go | 6 +++---
testing/mock/mock.go | 8 ++++----
6 files changed, 31 insertions(+), 22 deletions(-)
create mode 100644 docs/migrations/ibc-migration-v100.md
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c4420fb8..39560254 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -34,6 +34,11 @@ Ref: https://keepachangelog.com/en/1.0.0/
# Changelog
+## [Unreleased (2.0)]
+
+* (core) [\#227](https://github.com/cosmos/ibc-go/pull/227) Remove sdk.Result from application callbacks
+
+
## [Unreleased]
### Bug Fixes
diff --git a/docs/migrations/ibc-migration-v100.md b/docs/migrations/ibc-migration-v100.md
new file mode 100644
index 00000000..5f5c814c
--- /dev/null
+++ b/docs/migrations/ibc-migration-v100.md
@@ -0,0 +1,8 @@
+# Migrating from ibc-go v1.x.x to v2.0.0
+
+## Application Callbacks
+
+sdk.Result has been removed as a return value in the application callbacks. Previously it was being discarded by core IBC and was thus unused.
+
+
+
diff --git a/modules/apps/transfer/module.go b/modules/apps/transfer/module.go
index 1c9afbe9..a9a1aa4f 100644
--- a/modules/apps/transfer/module.go
+++ b/modules/apps/transfer/module.go
@@ -362,18 +362,18 @@ func (am AppModule) OnAcknowledgementPacket(
packet channeltypes.Packet,
acknowledgement []byte,
relayer sdk.AccAddress,
-) (*sdk.Result, error) {
+) error {
var ack channeltypes.Acknowledgement
if err := types.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil {
- return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %v", err)
+ return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %v", err)
}
var data types.FungibleTokenPacketData
if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
- return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
+ return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
}
if err := am.keeper.OnAcknowledgementPacket(ctx, packet, data, ack); err != nil {
- return nil, err
+ return err
}
ctx.EventManager().EmitEvent(
@@ -404,9 +404,7 @@ func (am AppModule) OnAcknowledgementPacket(
)
}
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, nil
+ return nil
}
// OnTimeoutPacket implements the IBCModule interface
@@ -414,14 +412,14 @@ func (am AppModule) OnTimeoutPacket(
ctx sdk.Context,
packet channeltypes.Packet,
relayer sdk.AccAddress,
-) (*sdk.Result, error) {
+) error {
var data types.FungibleTokenPacketData
if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil {
- return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
+ return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error())
}
// refund tokens
if err := am.keeper.OnTimeoutPacket(ctx, packet, data); err != nil {
- return nil, err
+ return err
}
ctx.EventManager().EmitEvent(
@@ -434,7 +432,5 @@ func (am AppModule) OnTimeoutPacket(
),
)
- return &sdk.Result{
- Events: ctx.EventManager().Events().ToABCIEvents(),
- }, nil
+ return nil
}
diff --git a/modules/core/05-port/types/module.go b/modules/core/05-port/types/module.go
index 4a1c2596..9c0885c0 100644
--- a/modules/core/05-port/types/module.go
+++ b/modules/core/05-port/types/module.go
@@ -75,11 +75,11 @@ type IBCModule interface {
packet channeltypes.Packet,
acknowledgement []byte,
relayer sdk.AccAddress,
- ) (*sdk.Result, error)
+ ) error
OnTimeoutPacket(
ctx sdk.Context,
packet channeltypes.Packet,
relayer sdk.AccAddress,
- ) (*sdk.Result, error)
+ ) error
}
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index 3aeeffdb..fe410471 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -557,7 +557,7 @@ func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*c
}
// Perform application logic callback
- _, err = cbs.OnTimeoutPacket(ctx, msg.Packet, relayer)
+ err = cbs.OnTimeoutPacket(ctx, msg.Packet, relayer)
if err != nil {
return nil, sdkerrors.Wrap(err, "timeout packet callback failed")
}
@@ -613,7 +613,7 @@ func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeo
// Perform application logic callback
// NOTE: MsgTimeout and MsgTimeoutOnClose use the same "OnTimeoutPacket"
// application logic callback.
- _, err = cbs.OnTimeoutPacket(ctx, msg.Packet, relayer)
+ err = cbs.OnTimeoutPacket(ctx, msg.Packet, relayer)
if err != nil {
return nil, sdkerrors.Wrap(err, "timeout packet callback failed")
}
@@ -667,7 +667,7 @@ func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAckn
}
// Perform application logic callback
- _, err = cbs.OnAcknowledgementPacket(ctx, msg.Packet, msg.Acknowledgement, relayer)
+ err = cbs.OnAcknowledgementPacket(ctx, msg.Packet, msg.Acknowledgement, relayer)
if err != nil {
return nil, sdkerrors.Wrap(err, "acknowledge packet callback failed")
}
diff --git a/testing/mock/mock.go b/testing/mock/mock.go
index e2062dda..9edf5d35 100644
--- a/testing/mock/mock.go
+++ b/testing/mock/mock.go
@@ -205,11 +205,11 @@ func (am AppModule) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, re
}
// OnAcknowledgementPacket implements the IBCModule interface.
-func (am AppModule) OnAcknowledgementPacket(sdk.Context, channeltypes.Packet, []byte, sdk.AccAddress) (*sdk.Result, error) {
- return nil, nil
+func (am AppModule) OnAcknowledgementPacket(sdk.Context, channeltypes.Packet, []byte, sdk.AccAddress) error {
+ return nil
}
// OnTimeoutPacket implements the IBCModule interface.
-func (am AppModule) OnTimeoutPacket(sdk.Context, channeltypes.Packet, sdk.AccAddress) (*sdk.Result, error) {
- return nil, nil
+func (am AppModule) OnTimeoutPacket(sdk.Context, channeltypes.Packet, sdk.AccAddress) error {
+ return nil
}
From 312beecf1f65276023161bcb8b0f3322621408a4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 6 Jul 2021 14:47:09 +0200
Subject: [PATCH 083/393] add missing changelog entries (#230)
* add missing changelog entries
* add missing entry
---
CHANGELOG.md | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 39560254..e62693b8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -43,17 +43,18 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (modules) [\#223](https://github.com/cosmos/ibc-go/pull/223) Use correct Prometheus format for metric labels.
* (06-solomachine) [\#214](https://github.com/cosmos/ibc-go/pull/214) Disable defensive timestamp check in SendPacket for solo machine clients.
-* (07-tendermint) [#\210](https://github.com/cosmos/ibc-go/pull/210) Export all consensus metadata on genesis restarts for tendermint clients.
+* (07-tendermint) [\#210](https://github.com/cosmos/ibc-go/pull/210) Export all consensus metadata on genesis restarts for tendermint clients.
* (core) [\#200](https://github.com/cosmos/ibc-go/pull/200) Fixes incorrect export of IBC identifier sequences. Previously, the next identifier sequence for clients/connections/channels was not set during genesis export. This resulted in the next identifiers being generated on the new chain to reuse old identifiers (the sequences began again from 0).
* (02-client) [\#192](https://github.com/cosmos/ibc-go/pull/192) Fix IBC `query ibc client header` cli command. Support historical queries for query header/node-state commands.
* (modules/light-clients/06-solomachine) [\#153](https://github.com/cosmos/ibc-go/pull/153) Fix solo machine proof height sequence mismatch bug.
* (modules/light-clients/06-solomachine) [\#122](https://github.com/cosmos/ibc-go/pull/122) Fix solo machine merkle prefix casting bug.
* (modules/light-clients/06-solomachine) [\#120](https://github.com/cosmos/ibc-go/pull/120) Fix solo machine handshake verification bug.
-
### API Breaking
+* (04-channel) [\#220](https://github.com/cosmos/ibc-go/pull/220) Channel legacy handler functions were removed. Please use the MsgServer functions or directly call the channel keeper's handshake function.
* (modules) [\#206](https://github.com/cosmos/ibc-go/pull/206) Expose `relayer sdk.AccAddress` on `OnRecvPacket`, `OnAcknowledgementPacket`, `OnTimeoutPacket` module callbacks to enable incentivization.
* (02-client) [\#181](https://github.com/cosmos/ibc-go/pull/181) Remove 'InitialHeight' from UpdateClient Proposal. Only copy over latest consensus state from substitute client.
* (06-solomachine) [\#169](https://github.com/cosmos/ibc-go/pull/169) Change FrozenSequence to boolean in solomachine ClientState. The solo machine proto package has been bumped from `v1` to `v2`.
@@ -77,6 +78,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Improvements
+* (04-channel) [\#220](https://github.com/cosmos/ibc-go/pull/220) Channel handshake events are now emitted with the channel keeper.
* (core/02-client) [\#205](https://github.com/cosmos/ibc-go/pull/205) Add in-place and genesis migrations from SDK v0.42.0 to ibc-go v1.0.0. Solo machine protobuf defintions are migrated from v1 to v2. All solo machine consensus states are pruned. All expired tendermint consensus states are pruned.
* (modules/core) [\#184](https://github.com/cosmos/ibc-go/pull/184) Improve error messages. Uses unique error codes to indicate already relayed packets.
* (07-tendermint) [\#182](https://github.com/cosmos/ibc-go/pull/182) Remove duplicate checks in upgrade logic.
From f4484c942be807380ebf52b81d8ca3fc5a0cf812 Mon Sep 17 00:00:00 2001
From: Jun Kimura
Date: Tue, 6 Jul 2021 21:57:15 +0900
Subject: [PATCH 084/393] Fix missing events in OnRecvPacket (#233)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* fix to set events to the original context
* Update modules/core/keeper/msg_server.go
Co-authored-by: Aditya
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
modules/core/keeper/msg_server.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go
index fe410471..ab9e5833 100644
--- a/modules/core/keeper/msg_server.go
+++ b/modules/core/keeper/msg_server.go
@@ -500,6 +500,10 @@ func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacke
// Cache context so that we may discard state changes from callback if the acknowledgement is unsuccessful.
cacheCtx, writeFn := ctx.CacheContext()
ack := cbs.OnRecvPacket(cacheCtx, msg.Packet, relayer)
+ // This doesn't cause duplicate events to be emitted.
+ // NOTE: The context returned by CacheContext() refers to a new EventManager, so it needs to explicitly set events to the original context.
+ // Events from callback are emitted regardless of acknowledgement success
+ ctx.EventManager().EmitEvents(cacheCtx.EventManager().Events())
if ack == nil || ack.Success() {
// write application state changes for asynchronous and successful acknowledgements
writeFn()
From c2ad5503e72efbd12f2552437a59396e3e05c922 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 6 Jul 2021 16:27:34 +0200
Subject: [PATCH 085/393] bump SDK dependency to v0.43.0-rc0 (#229)
Co-authored-by: Aditya
---
go.mod | 12 +-
go.sum | 351 +++++++++++++++---
modules/core/02-client/types/proposal_test.go | 2 +-
testing/simapp/simd/cmd/root.go | 87 ++++-
4 files changed, 375 insertions(+), 77 deletions(-)
diff --git a/go.mod b/go.mod
index f4100fcb..30f65bae 100644
--- a/go.mod
+++ b/go.mod
@@ -5,9 +5,9 @@ module github.com/cosmos/ibc-go
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
require (
- github.com/armon/go-metrics v0.3.8
+ github.com/armon/go-metrics v0.3.9
github.com/confio/ics23/go v0.6.6
- github.com/cosmos/cosmos-sdk v0.43.0-beta1
+ github.com/cosmos/cosmos-sdk v0.43.0-rc0
github.com/gogo/protobuf v1.3.3
github.com/golang/protobuf v1.5.2
github.com/gorilla/mux v1.8.0
@@ -16,11 +16,11 @@ require (
github.com/rakyll/statik v0.1.7
github.com/spf13/cast v1.3.1
github.com/spf13/cobra v1.1.3
- github.com/spf13/viper v1.7.1
+ github.com/spf13/viper v1.8.0
github.com/stretchr/testify v1.7.0
- github.com/tendermint/tendermint v0.34.10
+ github.com/tendermint/tendermint v0.34.11
github.com/tendermint/tm-db v0.6.4
- google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f
- google.golang.org/grpc v1.37.0
+ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c
+ google.golang.org/grpc v1.38.0
google.golang.org/protobuf v1.26.0
)
diff --git a/go.sum b/go.sum
index 3f9f99e4..8b45c8a4 100644
--- a/go.sum
+++ b/go.sum
@@ -5,11 +5,37 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
filippo.io/edwards25519 v1.0.0-beta.2 h1:/BZRNzm8N4K4eWfK28dL4yescorxtO7YG1yun8fy+pI=
filippo.io/edwards25519 v1.0.0-beta.2/go.mod h1:X+pm78QAUPtFLi1z9PYIlS/bdDnvbCOGKtZ+ACWEf7o=
@@ -47,6 +73,7 @@ github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrd
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/Workiva/go-datastructures v1.0.52 h1:PLSK6pwn8mYdaoaCZEMsXBpBotr4HHn9abU0yMQt0NI=
github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
+github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -62,8 +89,8 @@ github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.3.8 h1:oOxq3KPj0WhCuy50EhzwiyMyG2ovRQZpZLXQuOh2a/M=
-github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
+github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
@@ -77,16 +104,21 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0=
+github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
-github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M=
github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94=
+github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo=
+github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
+github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
-github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
+github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ=
+github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
@@ -102,18 +134,21 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/coinbase/rosetta-sdk-go v0.5.8/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM=
-github.com/coinbase/rosetta-sdk-go v0.5.9 h1:CuGQE3HFmYwdEACJnuOtVI9cofqPsGvq6FdFIzaOPKI=
-github.com/coinbase/rosetta-sdk-go v0.5.9/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM=
+github.com/coinbase/rosetta-sdk-go v0.6.10 h1:rgHD/nHjxLh0lMEdfGDqpTtlvtSBwULqrrZ2qPdNaCM=
+github.com/coinbase/rosetta-sdk-go v0.6.10/go.mod h1:J/JFMsfcePrjJZkwQFLh+hJErkAmdm9Iyy3D5Y0LfXo=
github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
github.com/confio/ics23/go v0.6.3/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg=
github.com/confio/ics23/go v0.6.6 h1:pkOy18YxxJ/r0XFDCnrl4Bjv6h4LkBSpLS6F38mrKL8=
@@ -126,10 +161,11 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cosmos/cosmos-sdk v0.43.0-beta1 h1:cfRZY+opamo+zF+MuEbvriZwoSzfCuEh1fqUM8fFHbg=
-github.com/cosmos/cosmos-sdk v0.43.0-beta1/go.mod h1:rpCPaC3MnityU4Io4CDZqZB4GMtPqNeYXxPk8iRqmYM=
+github.com/cosmos/cosmos-sdk v0.43.0-rc0 h1:+WGHEo1N/2zRSpWpKmuquTjDskL4j9K6zTc7CfDpfOM=
+github.com/cosmos/cosmos-sdk v0.43.0-rc0/go.mod h1:ctcrTEAhei9s8O3KSNvL0dxe+fVQGp07QyRb/7H9JYE=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
@@ -187,10 +223,11 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ethereum/go-ethereum v1.9.23/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM=
+github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
@@ -199,7 +236,7 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQD
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -217,11 +254,13 @@ github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
@@ -247,6 +286,7 @@ github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0=
github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
@@ -254,16 +294,24 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -274,13 +322,13 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
-github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg=
+github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -288,8 +336,11 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -298,12 +349,24 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us=
github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@@ -381,6 +444,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/improbable-eng/grpc-web v0.14.0 h1:GdoK+cXABdB+1keuqsV1drSFO2XLYIxqt/4Rj8SWGBk=
github.com/improbable-eng/grpc-web v0.14.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
@@ -388,6 +453,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jhump/protoreflect v1.8.2 h1:k2xE7wcUomeqwY0LDCYA16y4WWfyTcMx5mKhk0d4ua0=
@@ -402,9 +468,11 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@@ -443,17 +511,15 @@ github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaW
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
+github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
@@ -473,8 +539,9 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -495,6 +562,7 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/neilotoole/errgroup v0.1.5/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
@@ -540,8 +608,8 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
-github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
@@ -565,8 +633,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
-github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg=
-github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU=
+github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -582,9 +650,9 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
-github.com/prometheus/common v0.23.0 h1:GXWvPYuTUenIa+BhOq/x+L/QZzCqASkVRny5KTlPDGM=
-github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=
+github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -614,8 +682,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/zerolog v1.21.0 h1:Q3vdXlfLNT+OftyBHsU0Y445MD+8m8axjKgf2si0QcM=
-github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
+github.com/rs/zerolog v1.23.0 h1:UskrK+saS9P9Y789yNNulYKdARjPZuS35B8gJF2x60g=
+github.com/rs/zerolog v1.23.0/go.mod h1:6c7hFfxPOy7TacJc4Fcdi24/J0NKYGzjG8FWRI916Qo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -624,6 +692,7 @@ github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZ
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4=
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY=
github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -640,8 +709,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc=
-github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
@@ -661,8 +730,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.8.0 h1:QRwDgoG8xX+kp69di68D+YYTCWfYEckbZRfUlEIAal0=
+github.com/spf13/viper v1.8.0/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
@@ -688,8 +758,6 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
github.com/tendermint/btcd v0.1.1 h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s=
github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrnDD5pN+U=
-github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2.0.20210304154332-87d6ca4410df h1:hoMLrOS4WyyMM+Y+iWdGu94o0zzp6Q43y7v89Q1/OIw=
-github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2.0.20210304154332-87d6ca4410df/go.mod h1:gBPw8WV2Erm4UGHlBRiM3zaEBst4bsuihmMCNQdgP/s=
github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI=
github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15/go.mod h1:z4YtwM70uOnk8h0pjJYlj3zdYwi9l03By6iAIF5j/Pk=
github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E=
@@ -697,20 +765,22 @@ github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoM
github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4=
github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg=
github.com/tendermint/tendermint v0.34.0/go.mod h1:Aj3PIipBFSNO21r+Lq3TtzQ+uKESxkbA3yo/INM4QwQ=
-github.com/tendermint/tendermint v0.34.10 h1:wBOc/It8sh/pVH9np2V5fBvRmIyFN/bUrGPx+eAHexs=
github.com/tendermint/tendermint v0.34.10/go.mod h1:aeHL7alPh4uTBIJQ8mgFEE8VwJLXI1VD3rVOmH2Mcy0=
+github.com/tendermint/tendermint v0.34.11 h1:q1Yh76oG4QbS07xhmIJh5iAE0fYpJ8P8YKYtjnWfJRY=
+github.com/tendermint/tendermint v0.34.11/go.mod h1:aeHL7alPh4uTBIJQ8mgFEE8VwJLXI1VD3rVOmH2Mcy0=
github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI=
github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8=
github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ=
github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw=
-github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0=
-github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
+github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI=
+github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/tidwall/sjson v1.1.2/go.mod h1:SEzaDwxiPzKzNfUEO4HbYF/m4UCSJDsGgNqsS1LvdoY=
+github.com/tidwall/sjson v1.1.4/go.mod h1:wXpKXu8CtDjKAZ+3DrKY5ROCorDFahq8l0tey/Lx1fg=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
+github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
@@ -719,14 +789,17 @@ github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/vmihailenco/msgpack/v5 v5.0.0-beta.9/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo=
+github.com/vmihailenco/msgpack/v5 v5.1.4/go.mod h1:C5gboKD0TJPqWDTVTtrQNfRbiBwHZGo8UTqP/9/XvLI=
github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@@ -734,28 +807,40 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -774,6 +859,11 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -784,6 +874,11 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
@@ -794,6 +889,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -814,31 +912,62 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -850,7 +979,6 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -867,34 +995,61 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY=
-golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -919,16 +1074,45 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -940,11 +1124,31 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -956,15 +1160,43 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201119123407-9b1e624d6bc4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f h1:izedQ6yVIc5mZsRuXzmSreCOlzI0lCU1HpG8yEdMiKw=
-google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -978,16 +1210,22 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1014,8 +1252,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10=
-gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
@@ -1034,8 +1272,8 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -1043,9 +1281,12 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k=
nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/modules/core/02-client/types/proposal_test.go b/modules/core/02-client/types/proposal_test.go
index 32521d8c..334204fb 100644
--- a/modules/core/02-client/types/proposal_test.go
+++ b/modules/core/02-client/types/proposal_test.go
@@ -212,7 +212,7 @@ func (suite *TypesTestSuite) TestUpgradeString() {
proposal, err := types.NewUpgradeProposal(ibctesting.Title, ibctesting.Description, plan, &ibctmtypes.ClientState{})
suite.Require().NoError(err)
- expect := fmt.Sprintf("IBC Upgrade Proposal\n Title: title\n Description: description\n Upgrade Plan\n Name: ibc upgrade\n Height: 1000\n Info: https://foo.bar/baz.\n Upgraded IBC Client: %s", &ibctmtypes.ClientState{})
+ expect := fmt.Sprintf("IBC Upgrade Proposal\n Title: title\n Description: description\n Upgrade Plan\n Name: ibc upgrade\n height: 1000\n Info: https://foo.bar/baz.\n Upgraded IBC Client: %s", &ibctmtypes.ClientState{})
suite.Require().Equal(expect, proposal.String())
}
diff --git a/testing/simapp/simd/cmd/root.go b/testing/simapp/simd/cmd/root.go
index e195ba6b..b8b1f5a5 100644
--- a/testing/simapp/simd/cmd/root.go
+++ b/testing/simapp/simd/cmd/root.go
@@ -6,30 +6,30 @@ import (
"os"
"path/filepath"
- "github.com/spf13/cast"
- "github.com/spf13/cobra"
- tmcli "github.com/tendermint/tendermint/libs/cli"
- "github.com/tendermint/tendermint/libs/log"
- dbm "github.com/tendermint/tm-db"
-
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
- config "github.com/cosmos/cosmos-sdk/client/config"
+ "github.com/cosmos/cosmos-sdk/client/config"
"github.com/cosmos/cosmos-sdk/client/debug"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/client/rpc"
"github.com/cosmos/cosmos-sdk/server"
+ serverconfig "github.com/cosmos/cosmos-sdk/server/config"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/snapshots"
"github.com/cosmos/cosmos-sdk/store"
sdk "github.com/cosmos/cosmos-sdk/types"
authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
"github.com/cosmos/cosmos-sdk/x/auth/types"
- vestingcli "github.com/cosmos/cosmos-sdk/x/auth/vesting/client/cli"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/cosmos/cosmos-sdk/x/crisis"
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
+ "github.com/spf13/cast"
+ "github.com/spf13/cobra"
+ tmcli "github.com/tendermint/tendermint/libs/cli"
+ "github.com/tendermint/tendermint/libs/log"
+ dbm "github.com/tendermint/tm-db"
+
"github.com/cosmos/ibc-go/testing/simapp"
"github.com/cosmos/ibc-go/testing/simapp/params"
)
@@ -39,13 +39,12 @@ import (
func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
encodingConfig := simapp.MakeTestEncodingConfig()
initClientCtx := client.Context{}.
- WithJSONCodec(encodingConfig.Marshaler).
+ WithCodec(encodingConfig.Marshaler).
WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
WithTxConfig(encodingConfig.TxConfig).
WithLegacyAmino(encodingConfig.Amino).
WithInput(os.Stdin).
WithAccountRetriever(types.AccountRetriever{}).
- WithBroadcastMode(flags.BroadcastBlock).
WithHomeDir(simapp.DefaultNodeHome).
WithViper("") // In simapp, we don't use any prefix for env variables.
@@ -68,7 +67,9 @@ func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
return err
}
- return server.InterceptConfigsPreRunHandler(cmd)
+ customAppTemplate, customAppConfig := initAppConfig()
+
+ return server.InterceptConfigsPreRunHandler(cmd, customAppTemplate, customAppConfig)
},
}
@@ -77,7 +78,66 @@ func NewRootCmd() (*cobra.Command, params.EncodingConfig) {
return rootCmd, encodingConfig
}
+// initAppConfig helps to override default appConfig template and configs.
+// return "", nil if no custom configuration is required for the application.
+func initAppConfig() (string, interface{}) {
+ // The following code snippet is just for reference.
+
+ // WASMConfig defines configuration for the wasm module.
+ type WASMConfig struct {
+ // This is the maximum sdk gas (wasm and storage) that we allow for any x/wasm "smart" queries
+ QueryGasLimit uint64 `mapstructure:"query_gas_limit"`
+
+ // Address defines the gRPC-web server to listen on
+ LruSize uint64 `mapstructure:"lru_size"`
+ }
+
+ type CustomAppConfig struct {
+ serverconfig.Config
+
+ WASM WASMConfig `mapstructure:"wasm"`
+ }
+
+ // Optionally allow the chain developer to overwrite the SDK's default
+ // server config.
+ srvCfg := serverconfig.DefaultConfig()
+ // The SDK's default minimum gas price is set to "" (empty value) inside
+ // app.toml. If left empty by validators, the node will halt on startup.
+ // However, the chain developer can set a default app.toml value for their
+ // validators here.
+ //
+ // In summary:
+ // - if you leave srvCfg.MinGasPrices = "", all validators MUST tweak their
+ // own app.toml config,
+ // - if you set srvCfg.MinGasPrices non-empty, validators CAN tweak their
+ // own app.toml to override, or use this default value.
+ //
+ // In simapp, we set the min gas prices to 0.
+ srvCfg.MinGasPrices = "0stake"
+
+ customAppConfig := CustomAppConfig{
+ Config: *srvCfg,
+ WASM: WASMConfig{
+ LruSize: 1,
+ QueryGasLimit: 300000,
+ },
+ }
+
+ customAppTemplate := serverconfig.DefaultConfigTemplate + `
+[wasm]
+# This is the maximum sdk gas (wasm and storage) that we allow for any x/wasm "smart" queries
+query_gas_limit = 300000
+# This is the number of wasm vm instances we keep cached in memory for speed-up
+# Warning: this is currently unstable and may lead to crashes, best to keep for 0 unless testing locally
+lru_size = 0`
+
+ return customAppTemplate, customAppConfig
+}
+
func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig) {
+ cfg := sdk.GetConfig()
+ cfg.Seal()
+
rootCmd.AddCommand(
genutilcli.InitCmd(simapp.ModuleBasics, simapp.DefaultNodeHome),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, simapp.DefaultNodeHome),
@@ -149,12 +209,9 @@ func txCommand() *cobra.Command {
authcmd.GetMultiSignCommand(),
authcmd.GetMultiSignBatchCmd(),
authcmd.GetValidateSignaturesCommand(),
- flags.LineBreak,
authcmd.GetBroadcastCommand(),
authcmd.GetEncodeCommand(),
authcmd.GetDecodeCommand(),
- flags.LineBreak,
- vestingcli.GetTxCmd(),
)
simapp.ModuleBasics.AddTxCommands(cmd)
@@ -167,7 +224,7 @@ type appCreator struct {
encCfg params.EncodingConfig
}
-// newApp is an AppCreator
+// newApp is an appCreator
func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, appOpts servertypes.AppOptions) servertypes.Application {
var cache sdk.MultiStorePersistentCache
From 6fed6eeb0699696cb10f18661324d798781fcf79 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 6 Jul 2021 16:31:31 +0200
Subject: [PATCH 086/393] update Makefile with buf fixes (#213)
* update Makefile with buf fixes
* update makefile with changes from SDK
---
Makefile | 22 +++++++++++++++-------
1 file changed, 15 insertions(+), 7 deletions(-)
diff --git a/Makefile b/Makefile
index 69318533..ef674a5f 100644
--- a/Makefile
+++ b/Makefile
@@ -365,26 +365,34 @@ devdoc-update:
### Protobuf ###
###############################################################################
+containerProtoVer=v0.2
+containerProtoImage=tendermintdev/sdk-proto-gen:$(containerProtoVer)
+containerProtoGen=cosmos-sdk-proto-gen-$(containerProtoVer)
+containerProtoGenSwagger=cosmos-sdk-proto-gen-swagger-$(containerProtoVer)
+containerProtoFmt=cosmos-sdk-proto-fmt-$(containerProtoVer)
+
proto-all: proto-format proto-lint proto-gen
proto-gen:
@echo "Generating Protobuf files"
- $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace tendermintdev/sdk-proto-gen sh ./scripts/protocgen.sh
+ @if docker ps -a --format '{{.Names}}' | grep -Eq "^${containerProtoGen}$$"; then docker start -a $(containerProtoGen); else docker run --name $(containerProtoGen) -v $(CURDIR):/workspace --workdir /workspace $(containerProtoImage) \
+ sh ./scripts/protocgen.sh; fi
proto-format:
@echo "Formatting Protobuf files"
- $(DOCKER) run --rm -v $(CURDIR):/workspace \
- --workdir /workspace tendermintdev/docker-build-proto \
- find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
+ @if docker ps -a --format '{{.Names}}' | grep -Eq "^${containerProtoFmt}$$"; then docker start -a $(containerProtoFmt); else docker run --name $(containerProtoFmt) -v $(CURDIR):/workspace --workdir /workspace tendermintdev/docker-build-proto \
+ find ./ -not -path "./third_party/*" -name "*.proto" -exec clang-format -i {} \; ; fi
proto-swagger-gen:
- @./scripts/protoc-swagger-gen.sh
+ @echo "Generating Protobuf Swagger"
+ @if docker ps -a --format '{{.Names}}' | grep -Eq "^${containerProtoGenSwagger}$$"; then docker start -a $(containerProtoGenSwagger); else docker run --name $(containerProtoGenSwagger) -v $(CURDIR):/workspace --workdir /workspace $(containerProtoImage) \
+ sh ./scripts/protoc-swagger-gen.sh; fi
proto-lint:
- @$(DOCKER_BUF) check lint --error-format=json
+ @$(DOCKER_BUF) lint --error-format=json
proto-check-breaking:
- @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=main
+ @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=main
TM_URL = https://raw.githubusercontent.com/tendermint/tendermint/v0.34.0-rc6/proto/tendermint
GOGO_PROTO_URL = https://raw.githubusercontent.com/regen-network/protobuf/cosmos
From 0f01c678dbede08e1b8c8f744d1e707b477cc37d Mon Sep 17 00:00:00 2001
From: Aditya
Date: Tue, 6 Jul 2021 16:48:41 +0200
Subject: [PATCH 087/393] Sentinel Root Fix (#234)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* fix sentinel value
* add godoc and test
* fix grammar
* add changelog
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
CHANGELOG.md | 1 +
.../light-clients/07-tendermint/types/consensus_state.go | 3 +++
.../07-tendermint/types/consensus_state_test.go | 7 +++++++
modules/light-clients/07-tendermint/types/upgrade.go | 4 ++--
4 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e62693b8..c66ceb49 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -43,6 +43,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (07-tendermint) [\#234](https://github.com/cosmos/ibc-go/pull/234) Use sentinel value for the consensus state root set during a client upgrade. This prevents genesis validation from failing.
* (modules) [\#223](https://github.com/cosmos/ibc-go/pull/223) Use correct Prometheus format for metric labels.
* (06-solomachine) [\#214](https://github.com/cosmos/ibc-go/pull/214) Disable defensive timestamp check in SendPacket for solo machine clients.
* (07-tendermint) [\#210](https://github.com/cosmos/ibc-go/pull/210) Export all consensus metadata on genesis restarts for tendermint clients.
diff --git a/modules/light-clients/07-tendermint/types/consensus_state.go b/modules/light-clients/07-tendermint/types/consensus_state.go
index c4a92fed..046d73ce 100644
--- a/modules/light-clients/07-tendermint/types/consensus_state.go
+++ b/modules/light-clients/07-tendermint/types/consensus_state.go
@@ -12,6 +12,9 @@ import (
"github.com/cosmos/ibc-go/modules/core/exported"
)
+// SentinelRoot is used as a stand-in root value for the consensus state set at the upgrade height
+const SentinelRoot = "sentinel_root"
+
// NewConsensusState creates a new ConsensusState instance.
func NewConsensusState(
timestamp time.Time, root commitmenttypes.MerkleRoot, nextValsHash tmbytes.HexBytes,
diff --git a/modules/light-clients/07-tendermint/types/consensus_state_test.go b/modules/light-clients/07-tendermint/types/consensus_state_test.go
index 2664071d..b4964ef4 100644
--- a/modules/light-clients/07-tendermint/types/consensus_state_test.go
+++ b/modules/light-clients/07-tendermint/types/consensus_state_test.go
@@ -21,6 +21,13 @@ func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() {
NextValidatorsHash: suite.valsHash,
},
true},
+ {"success with sentinel",
+ &types.ConsensusState{
+ Timestamp: suite.now,
+ Root: commitmenttypes.NewMerkleRoot([]byte(types.SentinelRoot)),
+ NextValidatorsHash: suite.valsHash,
+ },
+ true},
{"root is nil",
&types.ConsensusState{
Timestamp: suite.now,
diff --git a/modules/light-clients/07-tendermint/types/upgrade.go b/modules/light-clients/07-tendermint/types/upgrade.go
index a4dead6f..d3801d67 100644
--- a/modules/light-clients/07-tendermint/types/upgrade.go
+++ b/modules/light-clients/07-tendermint/types/upgrade.go
@@ -108,14 +108,14 @@ func (cs ClientState) VerifyUpgradeAndUpdateState(
}
// The new consensus state is merely used as a trusted kernel against which headers on the new
- // chain can be verified. The root is empty as it cannot be known in advance, thus no proof verification will pass.
+ // chain can be verified. The root is just a stand-in sentinel value as it cannot be known in advance, thus no proof verification will pass.
// The timestamp and the NextValidatorsHash of the consensus state is the blocktime and NextValidatorsHash
// of the last block committed by the old chain. This will allow the first block of the new chain to be verified against
// the last validators of the old chain so long as it is submitted within the TrustingPeriod of this client.
// NOTE: We do not set processed time for this consensus state since this consensus state should not be used for packet verification
// as the root is empty. The next consensus state submitted using update will be usable for packet-verification.
newConsState := NewConsensusState(
- tmUpgradeConsState.Timestamp, commitmenttypes.MerkleRoot{}, tmUpgradeConsState.NextValidatorsHash,
+ tmUpgradeConsState.Timestamp, commitmenttypes.NewMerkleRoot([]byte(SentinelRoot)), tmUpgradeConsState.NextValidatorsHash,
)
// set metadata for this consensus state
From c1f05145e9724573c5a1b5aa517aae1fcc918d0c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 6 Jul 2021 15:09:45 +0000
Subject: [PATCH 088/393] Bump technote-space/get-diff-action from 4.1.1 to 4.2
(#232)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps [technote-space/get-diff-action](https://github.com/technote-space/get-diff-action) from 4.1.1 to 4.2.
- [Release notes](https://github.com/technote-space/get-diff-action/releases)
- [Changelog](https://github.com/technote-space/get-diff-action/blob/main/.releasegarc)
- [Commits](https://github.com/technote-space/get-diff-action/compare/v4.1.1...v4.2)
---
updated-dependencies:
- dependency-name: technote-space/get-diff-action
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: colin axnér <25233464+colin-axner@users.noreply.github.com>
---
.github/workflows/test.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 941c697d..6259fb60 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -41,7 +41,7 @@ jobs:
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.15
- - uses: technote-space/get-diff-action@v4.1.1
+ - uses: technote-space/get-diff-action@v4.2
id: git_diff
with:
PATTERNS: |
@@ -89,7 +89,7 @@ jobs:
- uses: actions/setup-go@v2.1.3
with:
go-version: 1.15
- - uses: technote-space/get-diff-action@v4.1.1
+ - uses: technote-space/get-diff-action@v4.2
with:
PATTERNS: |
**/**.go
@@ -113,7 +113,7 @@ jobs:
needs: tests
steps:
- uses: actions/checkout@v2.3.4
- - uses: technote-space/get-diff-action@v4.1.1
+ - uses: technote-space/get-diff-action@v4.2
with:
PATTERNS: |
**/**.go
From 2f0cad2807828a63f6305ea13760a15dc1693332 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 6 Jul 2021 15:26:02 +0000
Subject: [PATCH 089/393] Bump google.golang.org/protobuf from 1.26.0 to 1.27.1
(#236)
Bumps [google.golang.org/protobuf](https://github.com/protocolbuffers/protobuf-go) from 1.26.0 to 1.27.1.
- [Release notes](https://github.com/protocolbuffers/protobuf-go/releases)
- [Changelog](https://github.com/protocolbuffers/protobuf-go/blob/master/release.bash)
- [Commits](https://github.com/protocolbuffers/protobuf-go/compare/v1.26.0...v1.27.1)
---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
go.mod | 2 +-
go.sum | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/go.mod b/go.mod
index 30f65bae..c0fa25b0 100644
--- a/go.mod
+++ b/go.mod
@@ -22,5 +22,5 @@ require (
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c
google.golang.org/grpc v1.38.0
- google.golang.org/protobuf v1.26.0
+ google.golang.org/protobuf v1.27.1
)
diff --git a/go.sum b/go.sum
index 8b45c8a4..121ae250 100644
--- a/go.sum
+++ b/go.sum
@@ -1238,8 +1238,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
From b80d906b0f31b39c907aca8ff24c91af02b295db Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 6 Jul 2021 15:35:51 +0000
Subject: [PATCH 090/393] Bump github.com/spf13/cobra from 1.1.3 to 1.2.1
(#237)
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.1.3 to 1.2.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md)
- [Commits](https://github.com/spf13/cobra/compare/v1.1.3...v1.2.1)
---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
go.mod | 4 ++--
go.sum | 6 ++++--
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/go.mod b/go.mod
index c0fa25b0..fd121a2d 100644
--- a/go.mod
+++ b/go.mod
@@ -15,8 +15,8 @@ require (
github.com/pkg/errors v0.9.1
github.com/rakyll/statik v0.1.7
github.com/spf13/cast v1.3.1
- github.com/spf13/cobra v1.1.3
- github.com/spf13/viper v1.8.0
+ github.com/spf13/cobra v1.2.1
+ github.com/spf13/viper v1.8.1
github.com/stretchr/testify v1.7.0
github.com/tendermint/tendermint v0.34.11
github.com/tendermint/tm-db v0.6.4
diff --git a/go.sum b/go.sum
index 121ae250..a5f564a7 100644
--- a/go.sum
+++ b/go.sum
@@ -718,8 +718,9 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
-github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
@@ -731,8 +732,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.0 h1:QRwDgoG8xX+kp69di68D+YYTCWfYEckbZRfUlEIAal0=
github.com/spf13/viper v1.8.0/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
From 544e9cc04621182fc61ea4d17ffa53861a2aa7af Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 6 Jul 2021 17:58:47 +0200
Subject: [PATCH 091/393] Bump google.golang.org/grpc from 1.38.0 to 1.39.0
(#238)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.38.0 to 1.39.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.38.0...v1.39.0)
---
updated-dependencies:
- dependency-name: google.golang.org/grpc
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
go.mod | 2 +-
go.sum | 6 +++++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/go.mod b/go.mod
index fd121a2d..50fe2ef0 100644
--- a/go.mod
+++ b/go.mod
@@ -21,6 +21,6 @@ require (
github.com/tendermint/tendermint v0.34.11
github.com/tendermint/tm-db v0.6.4
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c
- google.golang.org/grpc v1.38.0
+ google.golang.org/grpc v1.39.0
google.golang.org/protobuf v1.27.1
)
diff --git a/go.sum b/go.sum
index a5f564a7..fdefb042 100644
--- a/go.sum
+++ b/go.sum
@@ -145,6 +145,7 @@ github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coinbase/rosetta-sdk-go v0.6.10 h1:rgHD/nHjxLh0lMEdfGDqpTtlvtSBwULqrrZ2qPdNaCM=
@@ -226,6 +227,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
@@ -821,6 +823,7 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@@ -1226,8 +1229,9 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
From 980e185df6b937c3ad4de329483c11ead9840c3c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 7 Jul 2021 14:46:14 +0200
Subject: [PATCH 092/393] export connection params (#242)
---
modules/core/03-connection/genesis.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/core/03-connection/genesis.go b/modules/core/03-connection/genesis.go
index 4b139c93..d0f7f372 100644
--- a/modules/core/03-connection/genesis.go
+++ b/modules/core/03-connection/genesis.go
@@ -26,5 +26,6 @@ func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState {
Connections: k.GetAllConnections(ctx),
ClientConnectionPaths: k.GetAllClientConnectionPaths(ctx),
NextConnectionSequence: k.GetNextConnectionSequence(ctx),
+ Params: k.GetParams(ctx),
}
}
From d759eb44e7ba2a7751e104e8526736b91fd86219 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 7 Jul 2021 15:25:05 +0200
Subject: [PATCH 093/393] ensure latest height revision number matches chain id
revision number (#241)
* ensure latest height revision number matches chain id revision number
fix tests as well
* add changelog
* Update modules/light-clients/07-tendermint/types/client_state_test.go
* Update modules/light-clients/07-tendermint/types/client_state_test.go
* address review suggestions
---
CHANGELOG.md | 1 +
modules/core/02-client/keeper/client_test.go | 2 +-
modules/core/keeper/msg_server_test.go | 5 +++--
.../07-tendermint/types/client_state.go | 8 +++++++-
.../07-tendermint/types/client_state_test.go | 7 ++++++-
.../07-tendermint/types/upgrade_test.go | 13 +++++++++----
6 files changed, 27 insertions(+), 9 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c66ceb49..f972b9e4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -43,6 +43,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
+* (07-tendermint) [\#241](https://github.com/cosmos/ibc-go/pull/241) Ensure tendermint client state latest height revision number matches chain id revision number.
* (07-tendermint) [\#234](https://github.com/cosmos/ibc-go/pull/234) Use sentinel value for the consensus state root set during a client upgrade. This prevents genesis validation from failing.
* (modules) [\#223](https://github.com/cosmos/ibc-go/pull/223) Use correct Prometheus format for metric labels.
* (06-solomachine) [\#214](https://github.com/cosmos/ibc-go/pull/214) Disable defensive timestamp check in SendPacket for solo machine clients.
diff --git a/modules/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go
index 96aed366..17c97a8d 100644
--- a/modules/core/02-client/keeper/client_test.go
+++ b/modules/core/02-client/keeper/client_test.go
@@ -394,7 +394,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
tc := tc
path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(path)
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedClient = ibctmtypes.NewClientState("newChainId-1", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
upgradedClient = upgradedClient.ZeroCustomFields()
upgradedClientBz, err = types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
suite.Require().NoError(err)
diff --git a/modules/core/keeper/msg_server_test.go b/modules/core/keeper/msg_server_test.go
index 5660c32d..a337c1fb 100644
--- a/modules/core/keeper/msg_server_test.go
+++ b/modules/core/keeper/msg_server_test.go
@@ -647,6 +647,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
)
newClientHeight := clienttypes.NewHeight(1, 1)
+ newChainId := "newChainId-1"
cases := []struct {
name string
@@ -657,7 +658,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
name: "successful upgrade",
setup: func() {
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedClient = ibctmtypes.NewClientState(newChainId, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
// Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
upgradedClient = upgradedClient.ZeroCustomFields()
@@ -698,7 +699,7 @@ func (suite *KeeperTestSuite) TestUpgradeClient() {
name: "VerifyUpgrade fails",
setup: func() {
- upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
+ upgradedClient = ibctmtypes.NewClientState(newChainId, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
// Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient
upgradedClient = upgradedClient.ZeroCustomFields()
diff --git a/modules/light-clients/07-tendermint/types/client_state.go b/modules/light-clients/07-tendermint/types/client_state.go
index 996a2f3b..4faa3796 100644
--- a/modules/light-clients/07-tendermint/types/client_state.go
+++ b/modules/light-clients/07-tendermint/types/client_state.go
@@ -122,8 +122,14 @@ func (cs ClientState) Validate() error {
if cs.MaxClockDrift == 0 {
return sdkerrors.Wrap(ErrInvalidMaxClockDrift, "max clock drift cannot be zero")
}
+
+ // the latest height revision number must match the chain id revision number
+ if cs.LatestHeight.RevisionNumber != clienttypes.ParseChainID(cs.ChainId) {
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight,
+ "latest height revision number must match chain id revision number (%d != %d)", cs.LatestHeight.RevisionNumber, clienttypes.ParseChainID(cs.ChainId))
+ }
if cs.LatestHeight.RevisionHeight == 0 {
- return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "tendermint revision height cannot be zero")
+ return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "tendermint client's latest height revision height cannot be zero")
}
if cs.TrustingPeriod >= cs.UnbondingPeriod {
return sdkerrors.Wrapf(
diff --git a/modules/light-clients/07-tendermint/types/client_state_test.go b/modules/light-clients/07-tendermint/types/client_state_test.go
index b6235113..d582af36 100644
--- a/modules/light-clients/07-tendermint/types/client_state_test.go
+++ b/modules/light-clients/07-tendermint/types/client_state_test.go
@@ -129,7 +129,12 @@ func (suite *TendermintTestSuite) TestValidate() {
expPass: false,
},
{
- name: "invalid height",
+ name: "invalid revision number",
+ clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
+ expPass: false,
+ },
+ {
+ name: "invalid revision height",
clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), upgradePath, false, false),
expPass: false,
},
diff --git a/modules/light-clients/07-tendermint/types/upgrade_test.go b/modules/light-clients/07-tendermint/types/upgrade_test.go
index 6c1baef6..df974a80 100644
--- a/modules/light-clients/07-tendermint/types/upgrade_test.go
+++ b/modules/light-clients/07-tendermint/types/upgrade_test.go
@@ -10,6 +10,10 @@ import (
ibctesting "github.com/cosmos/ibc-go/testing"
)
+var (
+ newChainId = "newChainId-1"
+)
+
func (suite *TendermintTestSuite) TestVerifyUpgrade() {
var (
upgradedClient exported.ClientState
@@ -54,6 +58,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
name: "successful upgrade to same revision",
setup: func() {
upgradedHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+2))
+ // don't use -1 suffix in chain id
upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, upgradedHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
upgradedClient = upgradedClient.ZeroCustomFields()
upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
@@ -109,7 +114,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
name: "unsuccessful upgrade: committed client does not have zeroed custom fields",
setup: func() {
// non-zeroed upgrade client
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedClient = types.NewClientState(newChainId, types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
suite.Require().NoError(err)
@@ -167,7 +172,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz)
// change upgradedClient client-specified parameters
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, false)
+ upgradedClient = types.NewClientState(newChainId, types.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, false)
suite.coordinator.CommitBlock(suite.chainB)
err := path.EndpointA.UpdateClient()
@@ -398,7 +403,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
name: "unsuccessful upgrade: final client is not valid",
setup: func() {
// new client has smaller unbonding period such that old trusting period is no longer valid
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedClient = types.NewClientState(newChainId, types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
suite.Require().NoError(err)
@@ -433,7 +438,7 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() {
path = ibctesting.NewPath(suite.chainA, suite.chainB)
suite.coordinator.SetupClients(path)
- upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
+ upgradedClient = types.NewClientState(newChainId, types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false)
upgradedClient = upgradedClient.ZeroCustomFields()
upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)
suite.Require().NoError(err)
From ca7f92d0a173a1a4d3fbb565cdba7aab015a5d83 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 7 Jul 2021 15:27:17 +0200
Subject: [PATCH 094/393] prep changelog for rc0 (#244)
* prep changelog for rc0
* fix link
Co-authored-by: Aditya
---
CHANGELOG.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f972b9e4..4c2de271 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -34,12 +34,12 @@ Ref: https://keepachangelog.com/en/1.0.0/
# Changelog
-## [Unreleased (2.0)]
+## [Unreleased]
* (core) [\#227](https://github.com/cosmos/ibc-go/pull/227) Remove sdk.Result from application callbacks
-## [Unreleased]
+## [v1.0.0-rc0](https://github.com/cosmos/ibc-go/releases/tag/v1.0.0-rc0) - 2021-07-07
### Bug Fixes
From cccee907bf666492c400e6ddcba13da5ccdacf5d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Tue, 13 Jul 2021 16:11:49 +0200
Subject: [PATCH 095/393] fix link (#250)
---
docs/migrations/ibc-migration-043.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/migrations/ibc-migration-043.md b/docs/migrations/ibc-migration-043.md
index 82154645..0e07c448 100644
--- a/docs/migrations/ibc-migration-043.md
+++ b/docs/migrations/ibc-migration-043.md
@@ -164,7 +164,7 @@ The solo machine has replaced the FrozenSequence uint64 field with a IsFrozen bo
Application developers need to update their `OnRecvPacket` callback logic.
-The `OnRecvPacket` callback has been modified to only return the acknowledgement. The acknowledgement returned must implement the `Acknowledgement` interface. The acknowledgement should indicate if it represents a successful processing of a packet by returning true on `Success()` and false in all other cases. A return value of false on `Success()` will result in all state changes which occurred in the callback being discarded. More information can be found in the [documentation](https://github.com/cosmos/ibc-go/blob/main/docs/custom.md#receiving-packets).
+The `OnRecvPacket` callback has been modified to only return the acknowledgement. The acknowledgement returned must implement the `Acknowledgement` interface. The acknowledgement should indicate if it represents a successful processing of a packet by returning true on `Success()` and false in all other cases. A return value of false on `Success()` will result in all state changes which occurred in the callback being discarded. More information can be found in the [documentation](https://github.com/cosmos/ibc-go/blob/main/docs/ibc/apps.md#receiving-packets).
The `OnRecvPacket`, `OnAcknowledgementPacket`, and `OnTimeoutPacket` callbacks are now passed the `sdk.AccAddress` of the relayer who relayed the IBC packet. Applications may use or ignore this information.
From fa404e5cc2b18d902638e837b914274be9eda38e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 14 Jul 2021 09:25:45 +0200
Subject: [PATCH 096/393] Update outdated documentation (#252)
* fix #85
* fix #77
---
docs/ibc/integration.md | 40 ++++-------------------
modules/apps/transfer/spec/01_concepts.md | 4 +--
2 files changed, 8 insertions(+), 36 deletions(-)
diff --git a/docs/ibc/integration.md b/docs/ibc/integration.md
index ec48126f..b294b54b 100644
--- a/docs/ibc/integration.md
+++ b/docs/ibc/integration.md
@@ -17,15 +17,15 @@ Integrating the IBC module to your SDK-based application is straighforward. The
- Add required modules to the `module.BasicManager`
- Define additional `Keeper` fields for the new modules on the `App` type
- Add the module's `StoreKeys` and initialize their `Keepers`
-- Set up corresponding routers and routes for the `ibc` and `evidence` modules
+- Set up corresponding routers and routes for the `ibc` module
- Add the modules to the module `Manager`
- Add modules to `Begin/EndBlockers` and `InitGenesis`
- Update the module `SimulationManager` to enable simulations
### Module `BasicManager` and `ModuleAccount` permissions
-The first step is to add the following modules to the `BasicManager`: `x/capability`, `x/ibc`,
-`x/evidence` and `x/ibc-transfer`. After that, we need to grant `Minter` and `Burner` permissions to
+The first step is to add the following modules to the `BasicManager`: `x/capability`, `x/ibc`,
+and `x/ibc-transfer`. After that, we need to grant `Minter` and `Burner` permissions to
the `ibc-transfer` `ModuleAccount` to mint and burn relayed tokens.
```go
@@ -36,7 +36,6 @@ var (
// ...
capability.AppModuleBasic{},
ibc.AppModuleBasic{},
- evidence.AppModuleBasic{},
transfer.AppModuleBasic{}, // i.e ibc-transfer module
)
@@ -60,7 +59,6 @@ type App struct {
// other keepers
// ...
IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
- EvidenceKeeper evidencekeeper.Keeper // required to set up the client misbehaviour route
TransferKeeper ibctransferkeeper.Keeper // for cross-chain fungible token transfers
// make scoped keepers public for test purposes
@@ -105,11 +103,6 @@ func NewApp(...args) *App {
)
transferModule := transfer.NewAppModule(app.TransferKeeper)
- // Create evidence Keeper for to register the IBC light client misbehaviour evidence route
- evidenceKeeper := evidencekeeper.NewKeeper(
- appCodec, keys[evidencetypes.StoreKey], &app.StakingKeeper, app.SlashingKeeper,
- )
-
// .. continues
}
```
@@ -126,12 +119,6 @@ IBC module.
Adding the module routes allows the IBC handler to call the appropriate callback when processing a
channel handshake or a packet.
-The second `Router` that is required is the evidence module router. This router handles genenal
-evidence submission and routes the business logic to each registered evidence handler. In the case
-of IBC, it is required to submit evidence for [light client
-misbehaviour](https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#misbehaviour)
-in order to freeze a client and prevent further data packets from being sent/received.
-
Currently, a `Router` is static so it must be initialized and set correctly on app initialization.
Once the `Router` has been set, no new routes can be added.
@@ -147,19 +134,6 @@ func NewApp(...args) *App {
// No more routes can be added
app.IBCKeeper.SetRouter(ibcRouter)
- // create static Evidence routers
-
- evidenceRouter := evidencetypes.NewRouter().
- // add IBC ClientMisbehaviour evidence handler
- AddRoute(ibcclient.RouterKey, ibcclient.HandlerClientMisbehaviour(app.IBCKeeper.ClientKeeper))
-
- // Setting Router will finalize all routes by sealing router
- // No more routes can be added
- evidenceKeeper.SetRouter(evidenceRouter)
-
- // set the evidence keeper from the section above
- app.EvidenceKeeper = *evidenceKeeper
-
// .. continues
```
@@ -176,7 +150,6 @@ func NewApp(...args) *App {
// other modules
// ...
capability.NewAppModule(appCodec, *app.CapabilityKeeper),
- evidence.NewAppModule(app.EvidenceKeeper),
ibc.NewAppModule(app.IBCKeeper),
transferModule,
)
@@ -187,7 +160,6 @@ func NewApp(...args) *App {
// other modules
// ...
capability.NewAppModule(appCodec, *app.CapabilityKeeper),
- evidence.NewAppModule(app.EvidenceKeeper),
ibc.NewAppModule(app.IBCKeeper),
transferModule,
)
@@ -219,10 +191,10 @@ localhost (_aka_ loopback) client.
func NewApp(...args) *App {
// .. continuation from above
- // add evidence, staking and ibc modules to BeginBlockers
+ // add staking and ibc modules to BeginBlockers
app.mm.SetOrderBeginBlockers(
// other modules ...
- evidencetypes.ModuleName, stakingtypes.ModuleName, ibchost.ModuleName,
+ stakingtypes.ModuleName, ibchost.ModuleName,
)
// ...
@@ -233,7 +205,7 @@ func NewApp(...args) *App {
app.mm.SetOrderInitGenesis(
capabilitytypes.ModuleName,
// other modules ...
- ibchost.ModuleName, evidencetypes.ModuleName, ibctransfertypes.ModuleName,
+ ibchost.ModuleName, ibctransfertypes.ModuleName,
)
// .. continues
diff --git a/modules/apps/transfer/spec/01_concepts.md b/modules/apps/transfer/spec/01_concepts.md
index 96f05f12..5b513d1f 100644
--- a/modules/apps/transfer/spec/01_concepts.md
+++ b/modules/apps/transfer/spec/01_concepts.md
@@ -31,7 +31,7 @@ acting as the "source zone". When the token is sent back to the chain it previou
prefix is removed. This is a backwards movement in the token's timeline and the sender chain is
acting as the "sink zone".
-It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](./../../../../../docs/architecture/adr-001-coin-source-tracing.md) to understand the implications and context of the IBC token representations.
+It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](https://github.com/cosmos/ibc-go/blob/main/docs/architecture/adr-001-coin-source-tracing.md) to understand the implications and context of the IBC token representations.
### UX suggestions for clients
@@ -96,7 +96,7 @@ The only viable alternative for clients (at the time of writing) to tokens with
## Locked Funds
-In some [exceptional cases](./../../../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
+In some [exceptional cases](https://github.com/cosmos/ibc-go/blob/main/docs/architecture/adr-026-ibc-client-recovery-mechanisms.md#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
To mitigate this, a client update governance proposal can be submitted to update the frozen client
with a new valid header. Once the proposal passes the client state will be unfrozen and the funds
From 484cedcac875e8f96457d245d2eff816901976d5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Wed, 14 Jul 2021 12:53:44 +0200
Subject: [PATCH 097/393] add genesis restart docs (#247)
* add genesis restart docs
* add small note
---
docs/ibc/upgrades/genesis-restart.md | 49 ++++++++++++++++++++++++++++
docs/ibc/upgrades/quick-guide.md | 8 ++---
2 files changed, 53 insertions(+), 4 deletions(-)
create mode 100644 docs/ibc/upgrades/genesis-restart.md
diff --git a/docs/ibc/upgrades/genesis-restart.md b/docs/ibc/upgrades/genesis-restart.md
new file mode 100644
index 00000000..1b6ab4e4
--- /dev/null
+++ b/docs/ibc/upgrades/genesis-restart.md
@@ -0,0 +1,49 @@
+
+
+# Genesis Restart Upgrades
+
+Learn how to upgrade your chain and counterparty clients using genesis restarts. {synopsis}
+
+**NOTE**: Regular genesis restarts are currently unsupported by relayers!
+
+### IBC Client Breaking Upgrades
+
+IBC client breaking upgrades are possible using genesis restarts.
+It is highly recommended to use the in-place migrations instead of a genesis restart.
+Genesis restarts should be used sparingly and as backup plans.
+
+Genesis restarts still require the usage of an IBC upgrade proposal in order to correctly upgrade counterparty clients.
+
+#### Step-by-Step Upgrade Process for SDK Chains
+
+If the IBC-connected chain is conducting an upgrade that will break counterparty clients, it must ensure that the upgrade is first supported by IBC using the [IBC Client Breaking Upgrade List](https://github.com/cosmos/ibc-go/blob/main/docs/ibc/upgrades/quick-guide.md#ibc-client-breaking-upgrades) and then execute the upgrade process described below in order to prevent counterparty clients from breaking.
+
+1. Create a 02-client [`UpgradeProposal`](https://github.com/cosmos/ibc-go/blob/main/docs/ibc/proto-docs.md#upgradeproposal) with an `UpgradePlan` and a new IBC ClientState in the `UpgradedClientState` field. Note that the `UpgradePlan` must specify an upgrade height **only** (no upgrade time), and the `ClientState` should only include the fields common to all valid clients and zero out any client-customizable fields (such as TrustingPeriod).
+2. Vote on and pass the `UpgradeProposal`
+3. Halt the node after successful upgrade.
+4. Export the genesis file.
+5. Swap to the new binary.
+6. Run migrations on the genesis file.
+7. Remove the `UpgradeProposal` plan from the genesis file. This may be done by migrations.
+8. Change desired chain-specific fields (chain id, unbonding period, etc). This may be done by migrations.
+8. Reset the node's data.
+9. Start the chain.
+
+Upon the `UpgradeProposal` passing, the upgrade module will commit the UpgradedClient under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedClient`. On the block right before the upgrade height, the upgrade module will also commit an initial consensus state for the next chain under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedConsState`.
+
+Once the chain reaches the upgrade height and halts, a relayer can upgrade the counterparty clients to the last block of the old chain. They can then submit the proofs of the `UpgradedClient` and `UpgradedConsensusState` against this last block and upgrade the counterparty client.
+
+#### Step-by-Step Upgrade Process for Relayers Upgrading Counterparty Clients
+
+These steps are identical to the regular [IBC client breaking upgrade process](https://github.com/cosmos/ibc-go/blob/main/docs/ibc/upgrades/quick-guide.md#step-by-step-upgrade-process-for-relayers-upgrading-counterparty-clients).
+
+### Non-IBC Client Breaking Upgrades
+
+While ibc-go supports genesis restarts which do not break IBC clients, relayers do not support this upgrade path.
+Here is a tracking issue on [Hermes](https://github.com/informalsystems/ibc-rs/issues/1152).
+Please do not attempt a regular genesis restarts unless you have a tool to update counterparty clients correctly.
+
+
+
diff --git a/docs/ibc/upgrades/quick-guide.md b/docs/ibc/upgrades/quick-guide.md
index 4717e52f..2c82b3a9 100644
--- a/docs/ibc/upgrades/quick-guide.md
+++ b/docs/ibc/upgrades/quick-guide.md
@@ -30,10 +30,10 @@ Note: Since upgrades are only implemented for Tendermint clients, this doc only
If the IBC-connected chain is conducting an upgrade that will break counterparty clients, it must ensure that the upgrade is first supported by IBC using the list above and then execute the upgrade process described below in order to prevent counterparty clients from breaking.
-1. Create a `SoftwareUpgradeProposal` with an `UpgradePlan` that includes the new IBC ClientState in the `UpgradedClientState`. Note that the `UpgradePlan` must specify an upgrade height **only** (no upgrade time), and the `ClientState` should only include the fields common to all valid clients and zero out any client-customizable fields (such as TrustingPeriod).
-2. Vote on and pass the `SoftwareUpgradeProposal`
+1. Create a 02-client [`UpgradeProposal`](https://github.com/cosmos/ibc-go/blob/main/docs/ibc/proto-docs.md#upgradeproposal) with an `UpgradePlan` and a new IBC ClientState in the `UpgradedClientState` field. Note that the `UpgradePlan` must specify an upgrade height **only** (no upgrade time), and the `ClientState` should only include the fields common to all valid clients and zero out any client-customizable fields (such as TrustingPeriod).
+2. Vote on and pass the `UpgradeProposal`
-Upon the `SoftwareUpgradeProposal` passing, the upgrade module will commit the UpgradedClient under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedClient`. On the block right before the upgrade height, the upgrade module will also commit an initial consensus state for the next chain under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedConsState`.
+Upon the `UpgradeProposal` passing, the upgrade module will commit the UpgradedClient under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedClient`. On the block right before the upgrade height, the upgrade module will also commit an initial consensus state for the next chain under the key: `upgrade/UpgradedIBCState/{upgradeHeight}/upgradedConsState`.
Once the chain reaches the upgrade height and halts, a relayer can upgrade the counterparty clients to the last block of the old chain. They can then submit the proofs of the `UpgradedClient` and `UpgradedConsensusState` against this last block and upgrade the counterparty client.
@@ -51,4 +51,4 @@ Thus, the upgrade process for relayers trying to upgrade the counterparty client
The Tendermint client on the counterparty chain will verify that the upgrading chain did indeed commit to the upgraded client and upgraded consensus state at the upgrade height (since the upgrade height is included in the key). If the proofs are verified against the upgrade height, then the client will upgrade to the new client while retaining all of its client-customized fields. Thus, it will retain its old TrustingPeriod, TrustLevel, MaxClockDrift, etc; while adopting the new chain-specified fields such as UnbondingPeriod, ChainId, UpgradePath, etc. Note, this can lead to an invalid client since the old client-chosen fields may no longer be valid given the new chain-chosen fields. Upgrading chains should try to avoid these situations by not altering parameters that can break old clients. For an example, see the UnbondingPeriod example in the supported upgrades section.
-The upgraded consensus state will serve purely as a basis of trust for future `UpdateClientMsgs` and will not contain a consensus root to perform proof verification against. Thus, relayers must submit an `UpdateClientMsg` with a header from the new chain so that the connection can be used for proof verification again.
\ No newline at end of file
+The upgraded consensus state will serve purely as a basis of trust for future `UpdateClientMsgs` and will not contain a consensus root to perform proof verification against. Thus, relayers must submit an `UpdateClientMsg` with a header from the new chain so that the connection can be used for proof verification again.
From 099d7042a5b95b294b52c8e36a07307a4418773f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Thu, 15 Jul 2021 11:43:56 +0200
Subject: [PATCH 098/393] bump SDK dep to v0.43.0-rc1 (#254)
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 50fe2ef0..c0fc5269 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@ replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alp
require (
github.com/armon/go-metrics v0.3.9
github.com/confio/ics23/go v0.6.6
- github.com/cosmos/cosmos-sdk v0.43.0-rc0
+ github.com/cosmos/cosmos-sdk v0.43.0-rc1
github.com/gogo/protobuf v1.3.3
github.com/golang/protobuf v1.5.2
github.com/gorilla/mux v1.8.0
diff --git a/go.sum b/go.sum
index fdefb042..d98c7973 100644
--- a/go.sum
+++ b/go.sum
@@ -165,8 +165,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cosmos/cosmos-sdk v0.43.0-rc0 h1:+WGHEo1N/2zRSpWpKmuquTjDskL4j9K6zTc7CfDpfOM=
-github.com/cosmos/cosmos-sdk v0.43.0-rc0/go.mod h1:ctcrTEAhei9s8O3KSNvL0dxe+fVQGp07QyRb/7H9JYE=
+github.com/cosmos/cosmos-sdk v0.43.0-rc1 h1:3QGgMqwLmzW+015P4ZEIQ+wRj7TrVU063D2QpHc2Syw=
+github.com/cosmos/cosmos-sdk v0.43.0-rc1/go.mod h1:ctcrTEAhei9s8O3KSNvL0dxe+fVQGp07QyRb/7H9JYE=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
From cde1e0c2b99f60d903d33f1f3c37d9255b8097e9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 19 Jul 2021 16:42:32 +0200
Subject: [PATCH 099/393] Bump github.com/cosmos/cosmos-sdk from 0.43.0-rc1 to
0.43.0-rc2 (#269)
Bumps [github.com/cosmos/cosmos-sdk](https://github.com/cosmos/cosmos-sdk) from 0.43.0-rc1 to 0.43.0-rc2.
- [Release notes](https://github.com/cosmos/cosmos-sdk/releases)
- [Changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.43.0-rc2/CHANGELOG.md)
- [Commits](https://github.com/cosmos/cosmos-sdk/compare/v0.43.0-rc1...v0.43.0-rc2)
---
updated-dependencies:
- dependency-name: github.com/cosmos/cosmos-sdk
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index c0fc5269..33a5660f 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@ replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alp
require (
github.com/armon/go-metrics v0.3.9
github.com/confio/ics23/go v0.6.6
- github.com/cosmos/cosmos-sdk v0.43.0-rc1
+ github.com/cosmos/cosmos-sdk v0.43.0-rc2
github.com/gogo/protobuf v1.3.3
github.com/golang/protobuf v1.5.2
github.com/gorilla/mux v1.8.0
diff --git a/go.sum b/go.sum
index d98c7973..af3a1b5d 100644
--- a/go.sum
+++ b/go.sum
@@ -165,8 +165,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cosmos/cosmos-sdk v0.43.0-rc1 h1:3QGgMqwLmzW+015P4ZEIQ+wRj7TrVU063D2QpHc2Syw=
-github.com/cosmos/cosmos-sdk v0.43.0-rc1/go.mod h1:ctcrTEAhei9s8O3KSNvL0dxe+fVQGp07QyRb/7H9JYE=
+github.com/cosmos/cosmos-sdk v0.43.0-rc2 h1:9xww4vDnsNyZyF1p9U4zpc8tc5Ctx763WQWLccddP8A=
+github.com/cosmos/cosmos-sdk v0.43.0-rc2/go.mod h1:ctcrTEAhei9s8O3KSNvL0dxe+fVQGp07QyRb/7H9JYE=
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
From c92e0d6a938e9cb0c8e16d35f09a1998b7dc2f74 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?colin=20axn=C3=A9r?=
<25233464+colin-axner@users.noreply.github.com>
Date: Mon, 19 Jul 2021 17:16:47 +0200
Subject: [PATCH 100/393] generate swagger files (#267)
---
docs/client/config.json | 42 +
docs/client/swagger-ui/swagger.yaml | 13961 ++++++++++++++++++++++++++
scripts/protoc-swagger-gen.sh | 2 +-
3 files changed, 14004 insertions(+), 1 deletion(-)
create mode 100644 docs/client/config.json
create mode 100644 docs/client/swagger-ui/swagger.yaml
diff --git a/docs/client/config.json b/docs/client/config.json
new file mode 100644
index 00000000..cfc6dc7b
--- /dev/null
+++ b/docs/client/config.json
@@ -0,0 +1,42 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "title": "IBC-GO - gRPC Gateway docs",
+ "description": "A REST interface for state queries",
+ "version": "1.0.0"
+ },
+ "apis": [
+ {
+ "url": "./tmp-swagger-gen/ibc/applications/transfer/v1/query.swagger.json",
+ "operationIds": {
+ "rename": {
+ "Params": "TransferParams"
+ }
+ }
+ },
+ {
+ "url": "./tmp-swagger-gen/ibc/core/client/v1/query.swagger.json",
+ "operationIds": {
+ "rename": {
+ "Params": "ClientParams"
+ }
+ }
+ },
+ {
+ "url": "./tmp-swagger-gen/ibc/core/connection/v1/query.swagger.json",
+ "operationIds": {
+ "rename": {
+ "Params": "ConnectionParams"
+ }
+ }
+ },
+ {
+ "url": "./tmp-swagger-gen/ibc/core/channel/v1/query.swagger.json",
+ "operationIds": {
+ "rename": {
+ "Params": "ChannelParams"
+ }
+ }
+ },
+ ]
+}
diff --git a/docs/client/swagger-ui/swagger.yaml b/docs/client/swagger-ui/swagger.yaml
new file mode 100644
index 00000000..835c894c
--- /dev/null
+++ b/docs/client/swagger-ui/swagger.yaml
@@ -0,0 +1,13961 @@
+swagger: '2.0'
+info:
+ title: IBC-GO - gRPC Gateway docs
+ description: A REST interface for state queries
+ version: 1.0.0
+paths:
+ /ibc/apps/transfer/v1/denom_traces:
+ get:
+ summary: DenomTraces queries all denomination traces.
+ operationId: DenomTraces
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ denom_traces:
+ type: array
+ items:
+ type: object
+ properties:
+ path:
+ type: string
+ description: >-
+ path defines the chain of port/channel identifiers used
+ for tracing the
+
+ source of the fungible token.
+ base_denom:
+ type: string
+ description: base denomination of the relayed fungible token.
+ description: >-
+ DenomTrace contains the base denomination for ICS20 fungible
+ tokens and the
+
+ source tracing information path.
+ description: denom_traces returns all denominations trace information.
+ pagination:
+ description: pagination defines the pagination in the response.
+ type: object
+ properties:
+ next_key:
+ type: string
+ format: byte
+ title: |-
+ next_key is the key to be passed to PageRequest.key to
+ query the next page most efficiently
+ total:
+ type: string
+ format: uint64
+ title: >-
+ total is total number of results available if
+ PageRequest.count_total
+
+ was set, its value is undefined otherwise
+ description: >-
+ QueryConnectionsResponse is the response type for the
+ Query/DenomTraces RPC
+
+ method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ value:
+ type: string
+ format: byte
+ parameters:
+ - name: pagination.key
+ description: |-
+ key is a value returned in PageResponse.next_key to begin
+ querying the next page most efficiently. Only one of offset or key
+ should be set.
+ in: query
+ required: false
+ type: string
+ format: byte
+ - name: pagination.offset
+ description: >-
+ offset is a numeric offset that can be used when key is unavailable.
+
+ It is less efficient than using key. Only one of offset or key
+ should
+
+ be set.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.limit
+ description: >-
+ limit is the total number of results to be returned in the result
+ page.
+
+ If left empty it will default to a value to be set by each app.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.count_total
+ description: >-
+ count_total is set to true to indicate that the result set should
+ include
+
+ a count of the total number of items available for pagination in
+ UIs.
+
+ count_total is only respected when offset is used. It is ignored
+ when key
+
+ is set.
+ in: query
+ required: false
+ type: boolean
+ format: boolean
+ tags:
+ - Query
+ '/ibc/apps/transfer/v1/denom_traces/{hash}':
+ get:
+ summary: DenomTrace queries a denomination trace information.
+ operationId: DenomTrace
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ denom_trace:
+ type: object
+ properties:
+ path:
+ type: string
+ description: >-
+ path defines the chain of port/channel identifiers used
+ for tracing the
+
+ source of the fungible token.
+ base_denom:
+ type: string
+ description: base denomination of the relayed fungible token.
+ description: >-
+ DenomTrace contains the base denomination for ICS20 fungible
+ tokens and the
+
+ source tracing information path.
+ description: >-
+ QueryDenomTraceResponse is the response type for the
+ Query/DenomTrace RPC
+
+ method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ value:
+ type: string
+ format: byte
+ parameters:
+ - name: hash
+ description: hash (in hex format) of the denomination trace information.
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ /ibc/apps/transfer/v1/params:
+ get:
+ summary: Params queries all parameters of the ibc-transfer module.
+ operationId: TransferParams
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ params:
+ description: params defines the parameters of the module.
+ type: object
+ properties:
+ send_enabled:
+ type: boolean
+ format: boolean
+ description: >-
+ send_enabled enables or disables all cross-chain token
+ transfers from this
+
+ chain.
+ receive_enabled:
+ type: boolean
+ format: boolean
+ description: >-
+ receive_enabled enables or disables all cross-chain token
+ transfers to this
+
+ chain.
+ description: >-
+ QueryParamsResponse is the response type for the Query/Params RPC
+ method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ value:
+ type: string
+ format: byte
+ tags:
+ - Query
+ /ibc/client/v1/params:
+ get:
+ summary: ClientParams queries all parameters of the ibc client.
+ operationId: ClientParams
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ params:
+ description: params defines the parameters of the module.
+ type: object
+ properties:
+ allowed_clients:
+ type: array
+ items:
+ type: string
+ description: >-
+ allowed_clients defines the list of allowed client state
+ types.
+ description: >-
+ QueryClientParamsResponse is the response type for the
+ Query/ClientParams RPC
+
+ method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ tags:
+ - Query
+ /ibc/core/client/v1/client_states:
+ get:
+ summary: ClientStates queries all the IBC light clients of a chain.
+ operationId: ClientStates
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ client_states:
+ type: array
+ items:
+ type: object
+ properties:
+ client_id:
+ type: string
+ title: client identifier
+ client_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the
+ type of the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's
+ path must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be
+ in a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the
+ binary all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can
+ optionally set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results
+ based on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available
+ in the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty
+ scheme) might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the
+ above specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any
+ values in the form
+
+ of utility functions or additional generated methods of
+ the Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and
+ the unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will
+ yield type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the
+ regular
+
+ representation of the deserialized, embedded message,
+ with an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a
+ custom JSON
+
+ representation, that representation will be embedded
+ adding a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message
+ [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: client state
+ description: >-
+ IdentifiedClientState defines a client state with an
+ additional client
+
+ identifier field.
+ description: list of stored ClientStates of the chain.
+ pagination:
+ title: pagination response
+ type: object
+ properties:
+ next_key:
+ type: string
+ format: byte
+ title: |-
+ next_key is the key to be passed to PageRequest.key to
+ query the next page most efficiently
+ total:
+ type: string
+ format: uint64
+ title: >-
+ total is total number of results available if
+ PageRequest.count_total
+
+ was set, its value is undefined otherwise
+ description: >-
+ PageResponse is to be embedded in gRPC response messages where
+ the
+
+ corresponding request message has used PageRequest.
+
+ message SomeResponse {
+ repeated Bar results = 1;
+ PageResponse page = 2;
+ }
+ description: >-
+ QueryClientStatesResponse is the response type for the
+ Query/ClientStates RPC
+
+ method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: pagination.key
+ description: |-
+ key is a value returned in PageResponse.next_key to begin
+ querying the next page most efficiently. Only one of offset or key
+ should be set.
+ in: query
+ required: false
+ type: string
+ format: byte
+ - name: pagination.offset
+ description: >-
+ offset is a numeric offset that can be used when key is unavailable.
+
+ It is less efficient than using key. Only one of offset or key
+ should
+
+ be set.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.limit
+ description: >-
+ limit is the total number of results to be returned in the result
+ page.
+
+ If left empty it will default to a value to be set by each app.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.count_total
+ description: >-
+ count_total is set to true to indicate that the result set should
+ include
+
+ a count of the total number of items available for pagination in
+ UIs.
+
+ count_total is only respected when offset is used. It is ignored
+ when key
+
+ is set.
+ in: query
+ required: false
+ type: boolean
+ format: boolean
+ tags:
+ - Query
+ '/ibc/core/client/v1/client_states/{client_id}':
+ get:
+ summary: ClientState queries an IBC light client.
+ operationId: ClientState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ client_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at least
+
+ one "/" character. The last segment of the URL's path must
+ represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in a
+ canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary all
+ types that they
+
+ expect it to use in the context of Any. However, for URLs
+ which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in the
+ official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer message
+ along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values in
+ the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by default
+ use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the last
+ '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with an
+
+ additional field `@type` which contains the type URL. Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding a
+ field
+
+ `value` which holds the custom JSON in addition to the `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: client state associated with the request identifier
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ description: >-
+ QueryClientStateResponse is the response type for the
+ Query/ClientState RPC
+
+ method. Besides the client state, it includes a proof and the
+ height from
+
+ which the proof was retrieved.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: client_id
+ description: client state unique identifier
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ '/ibc/core/client/v1/client_status/{client_id}':
+ get:
+ summary: Status queries the status of an IBC client.
+ operationId: ClientStatus
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ status:
+ type: string
+ description: >-
+ QueryClientStatusResponse is the response type for the
+ Query/ClientStatus RPC
+
+ method. It returns the current status of the IBC client.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: client_id
+ description: client unique identifier
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ '/ibc/core/client/v1/consensus_states/{client_id}':
+ get:
+ summary: |-
+ ConsensusStates queries all the consensus state associated with a given
+ client.
+ operationId: ConsensusStates
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ consensus_states:
+ type: array
+ items:
+ type: object
+ properties:
+ height:
+ title: consensus state height
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each
+ height while keeping
+
+ RevisionNumber the same. However some consensus
+ algorithms may choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as
+ the RevisionHeight
+
+ gets reset
+ consensus_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the
+ type of the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's
+ path must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be
+ in a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the
+ binary all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can
+ optionally set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results
+ based on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available
+ in the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty
+ scheme) might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the
+ above specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any
+ values in the form
+
+ of utility functions or additional generated methods of
+ the Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and
+ the unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will
+ yield type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the
+ regular
+
+ representation of the deserialized, embedded message,
+ with an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a
+ custom JSON
+
+ representation, that representation will be embedded
+ adding a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message
+ [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: consensus state
+ description: >-
+ ConsensusStateWithHeight defines a consensus state with an
+ additional height
+
+ field.
+ title: consensus states associated with the identifier
+ pagination:
+ title: pagination response
+ type: object
+ properties:
+ next_key:
+ type: string
+ format: byte
+ title: |-
+ next_key is the key to be passed to PageRequest.key to
+ query the next page most efficiently
+ total:
+ type: string
+ format: uint64
+ title: >-
+ total is total number of results available if
+ PageRequest.count_total
+
+ was set, its value is undefined otherwise
+ description: >-
+ PageResponse is to be embedded in gRPC response messages where
+ the
+
+ corresponding request message has used PageRequest.
+
+ message SomeResponse {
+ repeated Bar results = 1;
+ PageResponse page = 2;
+ }
+ title: |-
+ QueryConsensusStatesResponse is the response type for the
+ Query/ConsensusStates RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: client_id
+ description: client identifier
+ in: path
+ required: true
+ type: string
+ - name: pagination.key
+ description: |-
+ key is a value returned in PageResponse.next_key to begin
+ querying the next page most efficiently. Only one of offset or key
+ should be set.
+ in: query
+ required: false
+ type: string
+ format: byte
+ - name: pagination.offset
+ description: >-
+ offset is a numeric offset that can be used when key is unavailable.
+
+ It is less efficient than using key. Only one of offset or key
+ should
+
+ be set.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.limit
+ description: >-
+ limit is the total number of results to be returned in the result
+ page.
+
+ If left empty it will default to a value to be set by each app.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.count_total
+ description: >-
+ count_total is set to true to indicate that the result set should
+ include
+
+ a count of the total number of items available for pagination in
+ UIs.
+
+ count_total is only respected when offset is used. It is ignored
+ when key
+
+ is set.
+ in: query
+ required: false
+ type: boolean
+ format: boolean
+ tags:
+ - Query
+ '/ibc/core/client/v1/consensus_states/{client_id}/revision/{revision_number}/height/{revision_height}':
+ get:
+ summary: >-
+ ConsensusState queries a consensus state associated with a client state
+ at
+
+ a given height.
+ operationId: ConsensusState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ consensus_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at least
+
+ one "/" character. The last segment of the URL's path must
+ represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in a
+ canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary all
+ types that they
+
+ expect it to use in the context of Any. However, for URLs
+ which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in the
+ official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer message
+ along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values in
+ the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by default
+ use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the last
+ '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with an
+
+ additional field `@type` which contains the type URL. Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding a
+ field
+
+ `value` which holds the custom JSON in addition to the `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: >-
+ consensus state associated with the client identifier at the
+ given height
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ title: >-
+ QueryConsensusStateResponse is the response type for the
+ Query/ConsensusState
+
+ RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: client_id
+ description: client identifier
+ in: path
+ required: true
+ type: string
+ - name: revision_number
+ description: consensus state revision number
+ in: path
+ required: true
+ type: string
+ format: uint64
+ - name: revision_height
+ description: consensus state revision height
+ in: path
+ required: true
+ type: string
+ format: uint64
+ - name: latest_height
+ description: >-
+ latest_height overrrides the height field and queries the latest
+ stored
+
+ ConsensusState.
+ in: query
+ required: false
+ type: boolean
+ format: boolean
+ tags:
+ - Query
+ /ibc/core/client/v1/upgraded_client_states:
+ get:
+ summary: UpgradedClientState queries an Upgraded IBC light client.
+ operationId: UpgradedClientState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ upgraded_client_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at least
+
+ one "/" character. The last segment of the URL's path must
+ represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in a
+ canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary all
+ types that they
+
+ expect it to use in the context of Any. However, for URLs
+ which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in the
+ official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer message
+ along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values in
+ the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by default
+ use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the last
+ '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with an
+
+ additional field `@type` which contains the type URL. Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding a
+ field
+
+ `value` which holds the custom JSON in addition to the `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: client state associated with the request identifier
+ description: |-
+ QueryUpgradedClientStateResponse is the response type for the
+ Query/UpgradedClientState RPC method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ tags:
+ - Query
+ /ibc/core/client/v1/upgraded_consensus_states:
+ get:
+ summary: UpgradedConsensusState queries an Upgraded IBC consensus state.
+ operationId: UpgradedConsensusState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ upgraded_consensus_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at least
+
+ one "/" character. The last segment of the URL's path must
+ represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in a
+ canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary all
+ types that they
+
+ expect it to use in the context of Any. However, for URLs
+ which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in the
+ official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer message
+ along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values in
+ the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by default
+ use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the last
+ '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with an
+
+ additional field `@type` which contains the type URL. Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding a
+ field
+
+ `value` which holds the custom JSON in addition to the `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: Consensus state associated with the request identifier
+ description: |-
+ QueryUpgradedConsensusStateResponse is the response type for the
+ Query/UpgradedConsensusState RPC method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ tags:
+ - Query
+ '/ibc/core/connection/v1/client_connections/{client_id}':
+ get:
+ summary: |-
+ ClientConnections queries the connection paths associated with a client
+ state.
+ operationId: ClientConnections
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ connection_paths:
+ type: array
+ items:
+ type: string
+ description: slice of all the connection paths associated with a client.
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was generated
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ title: |-
+ QueryClientConnectionsResponse is the response type for the
+ Query/ClientConnections RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: client_id
+ description: client identifier associated with a connection
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ /ibc/core/connection/v1/connections:
+ get:
+ summary: Connections queries all the IBC connections of a chain.
+ operationId: Connections
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ connections:
+ type: array
+ items:
+ type: object
+ properties:
+ id:
+ type: string
+ description: connection identifier.
+ client_id:
+ type: string
+ description: client associated with this connection.
+ versions:
+ type: array
+ items:
+ type: object
+ properties:
+ identifier:
+ type: string
+ title: unique version identifier
+ features:
+ type: array
+ items:
+ type: string
+ title: >-
+ list of features compatible with the specified
+ identifier
+ description: >-
+ Version defines the versioning scheme used to
+ negotiate the IBC verison in
+
+ the connection handshake.
+ title: >-
+ IBC version which can be utilised to determine encodings
+ or protocols for
+
+ channels or packets utilising this connection
+ state:
+ description: current state of the connection end.
+ type: string
+ enum:
+ - STATE_UNINITIALIZED_UNSPECIFIED
+ - STATE_INIT
+ - STATE_TRYOPEN
+ - STATE_OPEN
+ default: STATE_UNINITIALIZED_UNSPECIFIED
+ counterparty:
+ description: counterparty chain associated with this connection.
+ type: object
+ properties:
+ client_id:
+ type: string
+ description: >-
+ identifies the client on the counterparty chain
+ associated with a given
+
+ connection.
+ connection_id:
+ type: string
+ description: >-
+ identifies the connection end on the counterparty
+ chain associated with a
+
+ given connection.
+ prefix:
+ description: commitment merkle prefix of the counterparty chain.
+ type: object
+ properties:
+ key_prefix:
+ type: string
+ format: byte
+ title: >-
+ MerklePrefix is merkle path prefixed to the key.
+
+ The constructed key from the Path and the key will
+ be append(Path.KeyPath,
+
+ append(Path.KeyPrefix, key...))
+ delay_period:
+ type: string
+ format: uint64
+ description: delay period associated with this connection.
+ description: >-
+ IdentifiedConnection defines a connection with additional
+ connection
+
+ identifier field.
+ description: list of stored connections of the chain.
+ pagination:
+ title: pagination response
+ type: object
+ properties:
+ next_key:
+ type: string
+ format: byte
+ title: |-
+ next_key is the key to be passed to PageRequest.key to
+ query the next page most efficiently
+ total:
+ type: string
+ format: uint64
+ title: >-
+ total is total number of results available if
+ PageRequest.count_total
+
+ was set, its value is undefined otherwise
+ description: >-
+ PageResponse is to be embedded in gRPC response messages where
+ the
+
+ corresponding request message has used PageRequest.
+
+ message SomeResponse {
+ repeated Bar results = 1;
+ PageResponse page = 2;
+ }
+ height:
+ title: query block height
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ description: >-
+ QueryConnectionsResponse is the response type for the
+ Query/Connections RPC
+
+ method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: pagination.key
+ description: |-
+ key is a value returned in PageResponse.next_key to begin
+ querying the next page most efficiently. Only one of offset or key
+ should be set.
+ in: query
+ required: false
+ type: string
+ format: byte
+ - name: pagination.offset
+ description: >-
+ offset is a numeric offset that can be used when key is unavailable.
+
+ It is less efficient than using key. Only one of offset or key
+ should
+
+ be set.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.limit
+ description: >-
+ limit is the total number of results to be returned in the result
+ page.
+
+ If left empty it will default to a value to be set by each app.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.count_total
+ description: >-
+ count_total is set to true to indicate that the result set should
+ include
+
+ a count of the total number of items available for pagination in
+ UIs.
+
+ count_total is only respected when offset is used. It is ignored
+ when key
+
+ is set.
+ in: query
+ required: false
+ type: boolean
+ format: boolean
+ tags:
+ - Query
+ '/ibc/core/connection/v1/connections/{connection_id}':
+ get:
+ summary: Connection queries an IBC connection end.
+ operationId: Connection
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ connection:
+ title: connection associated with the request identifier
+ type: object
+ properties:
+ client_id:
+ type: string
+ description: client associated with this connection.
+ versions:
+ type: array
+ items:
+ type: object
+ properties:
+ identifier:
+ type: string
+ title: unique version identifier
+ features:
+ type: array
+ items:
+ type: string
+ title: >-
+ list of features compatible with the specified
+ identifier
+ description: >-
+ Version defines the versioning scheme used to negotiate
+ the IBC verison in
+
+ the connection handshake.
+ description: >-
+ IBC version which can be utilised to determine encodings
+ or protocols for
+
+ channels or packets utilising this connection.
+ state:
+ description: current state of the connection end.
+ type: string
+ enum:
+ - STATE_UNINITIALIZED_UNSPECIFIED
+ - STATE_INIT
+ - STATE_TRYOPEN
+ - STATE_OPEN
+ default: STATE_UNINITIALIZED_UNSPECIFIED
+ counterparty:
+ description: counterparty chain associated with this connection.
+ type: object
+ properties:
+ client_id:
+ type: string
+ description: >-
+ identifies the client on the counterparty chain
+ associated with a given
+
+ connection.
+ connection_id:
+ type: string
+ description: >-
+ identifies the connection end on the counterparty
+ chain associated with a
+
+ given connection.
+ prefix:
+ description: commitment merkle prefix of the counterparty chain.
+ type: object
+ properties:
+ key_prefix:
+ type: string
+ format: byte
+ title: >-
+ MerklePrefix is merkle path prefixed to the key.
+
+ The constructed key from the Path and the key will be
+ append(Path.KeyPath,
+
+ append(Path.KeyPrefix, key...))
+ delay_period:
+ type: string
+ format: uint64
+ description: >-
+ delay period that must pass before a consensus state can
+ be used for
+
+ packet-verification NOTE: delay period logic is only
+ implemented by some
+
+ clients.
+ description: >-
+ ConnectionEnd defines a stateful object on a chain connected
+ to another
+
+ separate one.
+
+ NOTE: there must only be 2 defined ConnectionEnds to establish
+
+ a connection between two chains.
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ description: >-
+ QueryConnectionResponse is the response type for the
+ Query/Connection RPC
+
+ method. Besides the connection end, it includes a proof and the
+ height from
+
+ which the proof was retrieved.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: connection_id
+ description: connection unique identifier
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ '/ibc/core/connection/v1/connections/{connection_id}/client_state':
+ get:
+ summary: |-
+ ConnectionClientState queries the client state associated with the
+ connection.
+ operationId: ConnectionClientState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ identified_client_state:
+ title: client state associated with the channel
+ type: object
+ properties:
+ client_id:
+ type: string
+ title: client identifier
+ client_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type
+ of the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be
+ in a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can
+ optionally set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results
+ based on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty
+ scheme) might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the
+ above specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any
+ values in the form
+
+ of utility functions or additional generated methods of
+ the Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and
+ the unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will
+ yield type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a
+ custom JSON
+
+ representation, that representation will be embedded
+ adding a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: client state
+ description: >-
+ IdentifiedClientState defines a client state with an
+ additional client
+
+ identifier field.
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ title: |-
+ QueryConnectionClientStateResponse is the response type for the
+ Query/ConnectionClientState RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: connection_id
+ description: connection identifier
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ '/ibc/core/connection/v1/connections/{connection_id}/consensus_state/revision/{revision_number}/height/{revision_height}':
+ get:
+ summary: |-
+ ConnectionConsensusState queries the consensus state associated with the
+ connection.
+ operationId: ConnectionConsensusState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ consensus_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at least
+
+ one "/" character. The last segment of the URL's path must
+ represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in a
+ canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary all
+ types that they
+
+ expect it to use in the context of Any. However, for URLs
+ which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in the
+ official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer message
+ along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values in
+ the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by default
+ use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the last
+ '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with an
+
+ additional field `@type` which contains the type URL. Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding a
+ field
+
+ `value` which holds the custom JSON in addition to the `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: consensus state associated with the channel
+ client_id:
+ type: string
+ title: client ID associated with the consensus state
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ title: |-
+ QueryConnectionConsensusStateResponse is the response type for the
+ Query/ConnectionConsensusState RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: connection_id
+ description: connection identifier
+ in: path
+ required: true
+ type: string
+ - name: revision_number
+ in: path
+ required: true
+ type: string
+ format: uint64
+ - name: revision_height
+ in: path
+ required: true
+ type: string
+ format: uint64
+ tags:
+ - Query
+ /ibc/core/channel/v1/channels:
+ get:
+ summary: Channels queries all the IBC channels of a chain.
+ operationId: Channels
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ channels:
+ type: array
+ items:
+ type: object
+ properties:
+ state:
+ title: current state of the channel end
+ type: string
+ enum:
+ - STATE_UNINITIALIZED_UNSPECIFIED
+ - STATE_INIT
+ - STATE_TRYOPEN
+ - STATE_OPEN
+ - STATE_CLOSED
+ default: STATE_UNINITIALIZED_UNSPECIFIED
+ description: >-
+ State defines if a channel is in one of the following
+ states:
+
+ CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
+
+ - STATE_UNINITIALIZED_UNSPECIFIED: Default State
+ - STATE_INIT: A channel has just started the opening handshake.
+ - STATE_TRYOPEN: A channel has acknowledged the handshake step on the counterparty chain.
+ - STATE_OPEN: A channel has completed the handshake. Open channels are
+ ready to send and receive packets.
+ - STATE_CLOSED: A channel has been closed and can no longer be used to send or receive
+ packets.
+ ordering:
+ title: whether the channel is ordered or unordered
+ type: string
+ enum:
+ - ORDER_NONE_UNSPECIFIED
+ - ORDER_UNORDERED
+ - ORDER_ORDERED
+ default: ORDER_NONE_UNSPECIFIED
+ description: >-
+ - ORDER_NONE_UNSPECIFIED: zero-value for channel
+ ordering
+ - ORDER_UNORDERED: packets can be delivered in any order, which may differ from the order in
+ which they were sent.
+ - ORDER_ORDERED: packets are delivered exactly in the order which they were sent
+ counterparty:
+ title: counterparty channel end
+ type: object
+ properties:
+ port_id:
+ type: string
+ description: >-
+ port on the counterparty chain which owns the other
+ end of the channel.
+ channel_id:
+ type: string
+ title: channel end on the counterparty chain
+ connection_hops:
+ type: array
+ items:
+ type: string
+ title: >-
+ list of connection identifiers, in order, along which
+ packets sent on
+
+ this channel will travel
+ version:
+ type: string
+ title: >-
+ opaque channel version, which is agreed upon during the
+ handshake
+ port_id:
+ type: string
+ title: port identifier
+ channel_id:
+ type: string
+ title: channel identifier
+ description: >-
+ IdentifiedChannel defines a channel with additional port and
+ channel
+
+ identifier fields.
+ description: list of stored channels of the chain.
+ pagination:
+ title: pagination response
+ type: object
+ properties:
+ next_key:
+ type: string
+ format: byte
+ title: |-
+ next_key is the key to be passed to PageRequest.key to
+ query the next page most efficiently
+ total:
+ type: string
+ format: uint64
+ title: >-
+ total is total number of results available if
+ PageRequest.count_total
+
+ was set, its value is undefined otherwise
+ description: >-
+ PageResponse is to be embedded in gRPC response messages where
+ the
+
+ corresponding request message has used PageRequest.
+
+ message SomeResponse {
+ repeated Bar results = 1;
+ PageResponse page = 2;
+ }
+ height:
+ title: query block height
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ description: >-
+ QueryChannelsResponse is the response type for the Query/Channels
+ RPC method.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: pagination.key
+ description: |-
+ key is a value returned in PageResponse.next_key to begin
+ querying the next page most efficiently. Only one of offset or key
+ should be set.
+ in: query
+ required: false
+ type: string
+ format: byte
+ - name: pagination.offset
+ description: >-
+ offset is a numeric offset that can be used when key is unavailable.
+
+ It is less efficient than using key. Only one of offset or key
+ should
+
+ be set.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.limit
+ description: >-
+ limit is the total number of results to be returned in the result
+ page.
+
+ If left empty it will default to a value to be set by each app.
+ in: query
+ required: false
+ type: string
+ format: uint64
+ - name: pagination.count_total
+ description: >-
+ count_total is set to true to indicate that the result set should
+ include
+
+ a count of the total number of items available for pagination in
+ UIs.
+
+ count_total is only respected when offset is used. It is ignored
+ when key
+
+ is set.
+ in: query
+ required: false
+ type: boolean
+ format: boolean
+ tags:
+ - Query
+ '/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}':
+ get:
+ summary: Channel queries an IBC Channel.
+ operationId: Channel
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ channel:
+ title: channel associated with the request identifiers
+ type: object
+ properties:
+ state:
+ title: current state of the channel end
+ type: string
+ enum:
+ - STATE_UNINITIALIZED_UNSPECIFIED
+ - STATE_INIT
+ - STATE_TRYOPEN
+ - STATE_OPEN
+ - STATE_CLOSED
+ default: STATE_UNINITIALIZED_UNSPECIFIED
+ description: >-
+ State defines if a channel is in one of the following
+ states:
+
+ CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.
+
+ - STATE_UNINITIALIZED_UNSPECIFIED: Default State
+ - STATE_INIT: A channel has just started the opening handshake.
+ - STATE_TRYOPEN: A channel has acknowledged the handshake step on the counterparty chain.
+ - STATE_OPEN: A channel has completed the handshake. Open channels are
+ ready to send and receive packets.
+ - STATE_CLOSED: A channel has been closed and can no longer be used to send or receive
+ packets.
+ ordering:
+ title: whether the channel is ordered or unordered
+ type: string
+ enum:
+ - ORDER_NONE_UNSPECIFIED
+ - ORDER_UNORDERED
+ - ORDER_ORDERED
+ default: ORDER_NONE_UNSPECIFIED
+ description: |-
+ - ORDER_NONE_UNSPECIFIED: zero-value for channel ordering
+ - ORDER_UNORDERED: packets can be delivered in any order, which may differ from the order in
+ which they were sent.
+ - ORDER_ORDERED: packets are delivered exactly in the order which they were sent
+ counterparty:
+ title: counterparty channel end
+ type: object
+ properties:
+ port_id:
+ type: string
+ description: >-
+ port on the counterparty chain which owns the other
+ end of the channel.
+ channel_id:
+ type: string
+ title: channel end on the counterparty chain
+ connection_hops:
+ type: array
+ items:
+ type: string
+ title: >-
+ list of connection identifiers, in order, along which
+ packets sent on
+
+ this channel will travel
+ version:
+ type: string
+ title: >-
+ opaque channel version, which is agreed upon during the
+ handshake
+ description: >-
+ Channel defines pipeline for exactly-once packet delivery
+ between specific
+
+ modules on separate blockchains, which has at least one end
+ capable of
+
+ sending packets and one end capable of receiving packets.
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ description: >-
+ QueryChannelResponse is the response type for the Query/Channel
+ RPC method.
+
+ Besides the Channel end, it includes a proof and the height from
+ which the
+
+ proof was retrieved.
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: channel_id
+ description: channel unique identifier
+ in: path
+ required: true
+ type: string
+ - name: port_id
+ description: port unique identifier
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ '/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/client_state':
+ get:
+ summary: >-
+ ChannelClientState queries for the client state for the channel
+ associated
+
+ with the provided channel identifiers.
+ operationId: ChannelClientState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ identified_client_state:
+ title: client state associated with the channel
+ type: object
+ properties:
+ client_id:
+ type: string
+ title: client identifier
+ client_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type
+ of the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be
+ in a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can
+ optionally set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results
+ based on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty
+ scheme) might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the
+ above specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any
+ values in the form
+
+ of utility functions or additional generated methods of
+ the Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and
+ the unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will
+ yield type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a
+ custom JSON
+
+ representation, that representation will be embedded
+ adding a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: client state
+ description: >-
+ IdentifiedClientState defines a client state with an
+ additional client
+
+ identifier field.
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ title: |-
+ QueryChannelClientStateResponse is the Response type for the
+ Query/QueryChannelClientState RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding
+ a field
+
+ `value` which holds the custom JSON in addition to the
+ `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ parameters:
+ - name: channel_id
+ description: channel unique identifier
+ in: path
+ required: true
+ type: string
+ - name: port_id
+ description: port unique identifier
+ in: path
+ required: true
+ type: string
+ tags:
+ - Query
+ '/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/consensus_state/revision/{revision_number}/height/{revision_height}':
+ get:
+ summary: |-
+ ChannelConsensusState queries for the consensus state for the channel
+ associated with the provided channel identifiers.
+ operationId: ChannelConsensusState
+ responses:
+ '200':
+ description: A successful response.
+ schema:
+ type: object
+ properties:
+ consensus_state:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at least
+
+ one "/" character. The last segment of the URL's path must
+ represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in a
+ canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary all
+ types that they
+
+ expect it to use in the context of Any. However, for URLs
+ which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in the
+ official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer message
+ along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values in
+ the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by default
+ use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the last
+ '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with an
+
+ additional field `@type` which contains the type URL. Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":
+ }
+
+ If the embedded message type is well-known and has a custom
+ JSON
+
+ representation, that representation will be embedded adding a
+ field
+
+ `value` which holds the custom JSON in addition to the `@type`
+
+ field. Example (for message [google.protobuf.Duration][]):
+
+ {
+ "@type": "type.googleapis.com/google.protobuf.Duration",
+ "value": "1.212s"
+ }
+ title: consensus state associated with the channel
+ client_id:
+ type: string
+ title: client ID associated with the consensus state
+ proof:
+ type: string
+ format: byte
+ title: merkle proof of existence
+ proof_height:
+ title: height at which the proof was retrieved
+ type: object
+ properties:
+ revision_number:
+ type: string
+ format: uint64
+ title: the revision that the client is currently on
+ revision_height:
+ type: string
+ format: uint64
+ title: the height within the given revision
+ description: >-
+ Normally the RevisionHeight is incremented at each height
+ while keeping
+
+ RevisionNumber the same. However some consensus algorithms may
+ choose to
+
+ reset the height in certain conditions e.g. hard forks,
+ state-machine
+
+ breaking changes In these cases, the RevisionNumber is
+ incremented so that
+
+ height continues to be monitonically increasing even as the
+ RevisionHeight
+
+ gets reset
+ title: |-
+ QueryChannelClientStateResponse is the Response type for the
+ Query/QueryChannelClientState RPC method
+ default:
+ description: An unexpected error response
+ schema:
+ type: object
+ properties:
+ error:
+ type: string
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ details:
+ type: array
+ items:
+ type: object
+ properties:
+ type_url:
+ type: string
+ description: >-
+ A URL/resource name that uniquely identifies the type of
+ the serialized
+
+ protocol buffer message. This string must contain at
+ least
+
+ one "/" character. The last segment of the URL's path
+ must represent
+
+ the fully qualified name of the type (as in
+
+ `path/google.protobuf.Duration`). The name should be in
+ a canonical form
+
+ (e.g., leading "." is not accepted).
+
+
+ In practice, teams usually precompile into the binary
+ all types that they
+
+ expect it to use in the context of Any. However, for
+ URLs which use the
+
+ scheme `http`, `https`, or no scheme, one can optionally
+ set up a type
+
+ server that maps type URLs to message definitions as
+ follows:
+
+
+ * If no scheme is provided, `https` is assumed.
+
+ * An HTTP GET on the URL must yield a
+ [google.protobuf.Type][]
+ value in binary format, or produce an error.
+ * Applications are allowed to cache lookup results based
+ on the
+ URL, or have them precompiled into a binary to avoid any
+ lookup. Therefore, binary compatibility needs to be preserved
+ on changes to types. (Use versioned type names to manage
+ breaking changes.)
+
+ Note: this functionality is not currently available in
+ the official
+
+ protobuf release, and it is not used for type URLs
+ beginning with
+
+ type.googleapis.com.
+
+
+ Schemes other than `http`, `https` (or the empty scheme)
+ might be
+
+ used with implementation specific semantics.
+ value:
+ type: string
+ format: byte
+ description: >-
+ Must be a valid serialized protocol buffer of the above
+ specified type.
+ description: >-
+ `Any` contains an arbitrary serialized protocol buffer
+ message along with a
+
+ URL that describes the type of the serialized message.
+
+
+ Protobuf library provides support to pack/unpack Any values
+ in the form
+
+ of utility functions or additional generated methods of the
+ Any type.
+
+
+ Example 1: Pack and unpack a message in C++.
+
+ Foo foo = ...;
+ Any any;
+ any.PackFrom(foo);
+ ...
+ if (any.UnpackTo(&foo)) {
+ ...
+ }
+
+ Example 2: Pack and unpack a message in Java.
+
+ Foo foo = ...;
+ Any any = Any.pack(foo);
+ ...
+ if (any.is(Foo.class)) {
+ foo = any.unpack(Foo.class);
+ }
+
+ Example 3: Pack and unpack a message in Python.
+
+ foo = Foo(...)
+ any = Any()
+ any.Pack(foo)
+ ...
+ if any.Is(Foo.DESCRIPTOR):
+ any.Unpack(foo)
+ ...
+
+ Example 4: Pack and unpack a message in Go
+
+ foo := &pb.Foo{...}
+ any, err := ptypes.MarshalAny(foo)
+ ...
+ foo := &pb.Foo{}
+ if err := ptypes.UnmarshalAny(any, foo); err != nil {
+ ...
+ }
+
+ The pack methods provided by protobuf library will by
+ default use
+
+ 'type.googleapis.com/full.type.name' as the type URL and the
+ unpack
+
+ methods only use the fully qualified type name after the
+ last '/'
+
+ in the type URL, for example "foo.bar.com/x/y.z" will yield
+ type
+
+ name "y.z".
+
+
+
+ JSON
+
+ ====
+
+ The JSON representation of an `Any` value uses the regular
+
+ representation of the deserialized, embedded message, with
+ an
+
+ additional field `@type` which contains the type URL.
+ Example:
+
+ package google.profile;
+ message Person {
+ string first_name = 1;
+ string last_name = 2;
+ }
+
+ {
+ "@type": "type.googleapis.com/google.profile.Person",
+ "firstName": ,
+ "lastName":